smu8_hwmgr.c 55.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */
23
#include "pp_debug.h"
24 25 26 27 28 29 30 31
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include "atom-types.h"
#include "atombios.h"
#include "processpptables.h"
#include "cgs_common.h"
#include "smu/smu_8_0_d.h"
32 33
#include "smu8_fusion.h"
#include "smu/smu_8_0_sh_mask.h"
34 35 36 37
#include "smumgr.h"
#include "hwmgr.h"
#include "hardwaremanager.h"
#include "cz_ppsmc.h"
38
#include "smu8_hwmgr.h"
39
#include "power_state.h"
40
#include "pp_thermal.h"
41 42 43 44 45 46 47

#define ixSMUSVI_NB_CURRENTVID 0xD8230044
#define CURRENT_NB_VID_MASK 0xff000000
#define CURRENT_NB_VID__SHIFT 24
#define ixSMUSVI_GFX_CURRENTVID  0xD8230048
#define CURRENT_GFX_VID_MASK 0xff000000
#define CURRENT_GFX_VID__SHIFT 24
48

49
static const unsigned long smu8_magic = (unsigned long) PHM_Cz_Magic;
50

51
static struct smu8_power_state *cast_smu8_power_state(struct pp_hw_power_state *hw_ps)
52
{
53
	if (smu8_magic != hw_ps->magic)
54 55
		return NULL;

56
	return (struct smu8_power_state *)hw_ps;
57 58
}

59
static const struct smu8_power_state *cast_const_smu8_power_state(
60 61
				const struct pp_hw_power_state *hw_ps)
{
62
	if (smu8_magic != hw_ps->magic)
63 64
		return NULL;

65
	return (struct smu8_power_state *)hw_ps;
66 67
}

68
static uint32_t smu8_get_eclk_level(struct pp_hwmgr *hwmgr,
69 70 71 72
					uint32_t clock, uint32_t msg)
{
	int i = 0;
	struct phm_vce_clock_voltage_dependency_table *ptable =
73
		hwmgr->dyn_state.vce_clock_voltage_dependency_table;
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98

	switch (msg) {
	case PPSMC_MSG_SetEclkSoftMin:
	case PPSMC_MSG_SetEclkHardMin:
		for (i = 0; i < (int)ptable->count; i++) {
			if (clock <= ptable->entries[i].ecclk)
				break;
		}
		break;

	case PPSMC_MSG_SetEclkSoftMax:
	case PPSMC_MSG_SetEclkHardMax:
		for (i = ptable->count - 1; i >= 0; i--) {
			if (clock >= ptable->entries[i].ecclk)
				break;
		}
		break;

	default:
		break;
	}

	return i;
}

99
static uint32_t smu8_get_sclk_level(struct pp_hwmgr *hwmgr,
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
				uint32_t clock, uint32_t msg)
{
	int i = 0;
	struct phm_clock_voltage_dependency_table *table =
				hwmgr->dyn_state.vddc_dependency_on_sclk;

	switch (msg) {
	case PPSMC_MSG_SetSclkSoftMin:
	case PPSMC_MSG_SetSclkHardMin:
		for (i = 0; i < (int)table->count; i++) {
			if (clock <= table->entries[i].clk)
				break;
		}
		break;

	case PPSMC_MSG_SetSclkSoftMax:
	case PPSMC_MSG_SetSclkHardMax:
		for (i = table->count - 1; i >= 0; i--) {
			if (clock >= table->entries[i].clk)
				break;
		}
		break;

	default:
		break;
	}
	return i;
}

129
static uint32_t smu8_get_uvd_level(struct pp_hwmgr *hwmgr,
130 131 132 133
					uint32_t clock, uint32_t msg)
{
	int i = 0;
	struct phm_uvd_clock_voltage_dependency_table *ptable =
134
		hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159

	switch (msg) {
	case PPSMC_MSG_SetUvdSoftMin:
	case PPSMC_MSG_SetUvdHardMin:
		for (i = 0; i < (int)ptable->count; i++) {
			if (clock <= ptable->entries[i].vclk)
				break;
		}
		break;

	case PPSMC_MSG_SetUvdSoftMax:
	case PPSMC_MSG_SetUvdHardMax:
		for (i = ptable->count - 1; i >= 0; i--) {
			if (clock >= ptable->entries[i].vclk)
				break;
		}
		break;

	default:
		break;
	}

	return i;
}

160
static uint32_t smu8_get_max_sclk_level(struct pp_hwmgr *hwmgr)
161
{
162
	struct smu8_hwmgr *data = hwmgr->backend;
163

164
	if (data->max_sclk_level == 0) {
165
		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel);
166
		data->max_sclk_level = smum_get_argument(hwmgr) + 1;
167 168
	}

169
	return data->max_sclk_level;
170 171
}

172
static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
173
{
174
	struct smu8_hwmgr *data = hwmgr->backend;
175
	struct amdgpu_device *adev = hwmgr->adev;
176

177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
	data->gfx_ramp_step = 256*25/100;
	data->gfx_ramp_delay = 1; /* by default, we delay 1us */

	data->mgcg_cgtt_local0 = 0x00000000;
	data->mgcg_cgtt_local1 = 0x00000000;
	data->clock_slow_down_freq = 25000;
	data->skip_clock_slow_down = 1;
	data->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */
	data->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */
	data->voting_rights_clients = 0x00C00033;
	data->static_screen_threshold = 8;
	data->ddi_power_gating_disabled = 0;
	data->bapm_enabled = 1;
	data->voltage_drop_threshold = 0;
	data->gfx_power_gating_threshold = 500;
	data->vce_slow_sclk_threshold = 20000;
	data->dce_slow_sclk_threshold = 30000;
	data->disable_driver_thermal_policy = 1;
	data->disable_nb_ps3_in_battery = 0;
196 197 198 199 200 201 202 203 204 205

	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
							PHM_PlatformCaps_ABM);

	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
				    PHM_PlatformCaps_NonABMSupportInPPLib);

	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
					PHM_PlatformCaps_DynamicM3Arbiter);

206
	data->override_dynamic_mgpg = 1;
207 208 209 210

	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
				  PHM_PlatformCaps_DynamicPatchPowerState);

211 212 213
	data->thermal_auto_throttling_treshold = 0;
	data->tdr_clock = 0;
	data->disable_gfx_power_gating_in_uvd = 0;
214 215 216 217

	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
					PHM_PlatformCaps_DynamicUVDState);

218 219 220 221 222
	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
			PHM_PlatformCaps_UVDDPM);
	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
			PHM_PlatformCaps_VCEDPM);

223 224 225 226
	data->cc6_settings.cpu_cc6_disable = false;
	data->cc6_settings.cpu_pstate_disable = false;
	data->cc6_settings.nb_pstate_switch_disable = false;
	data->cc6_settings.cpu_pstate_separation_time = 0;
227 228 229 230

	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
				   PHM_PlatformCaps_DisableVoltageIsland);

231 232
	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
		      PHM_PlatformCaps_UVDPowerGating);
233 234
	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
		      PHM_PlatformCaps_VCEPowerGating);
235 236 237 238 239 240 241 242

	if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
			      PHM_PlatformCaps_UVDPowerGating);
	if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
			      PHM_PlatformCaps_VCEPowerGating);

243

244 245 246
	return 0;
}

247
/* convert form 8bit vid to real voltage in mV*4 */
248
static uint32_t smu8_convert_8Bit_index_to_voltage(
249 250 251 252 253
			struct pp_hwmgr *hwmgr, uint16_t voltage)
{
	return 6200 - (voltage * 25);
}

254
static int smu8_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
255 256
			struct phm_clock_and_voltage_limits *table)
{
257 258
	struct smu8_hwmgr *data = hwmgr->backend;
	struct smu8_sys_info *sys_info = &data->sys_info;
259 260 261 262 263
	struct phm_clock_voltage_dependency_table *dep_table =
				hwmgr->dyn_state.vddc_dependency_on_sclk;

	if (dep_table->count > 0) {
		table->sclk = dep_table->entries[dep_table->count-1].clk;
264
		table->vddc = smu8_convert_8Bit_index_to_voltage(hwmgr,
265 266 267 268 269 270
		   (uint16_t)dep_table->entries[dep_table->count-1].v);
	}
	table->mclk = sys_info->nbp_memory_clock[0];
	return 0;
}

271
static int smu8_init_dynamic_state_adjustment_rule_settings(
272 273 274 275 276 277 278 279 280 281 282
			struct pp_hwmgr *hwmgr,
			ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table)
{
	uint32_t table_size =
		sizeof(struct phm_clock_voltage_dependency_table) +
		(7 * sizeof(struct phm_clock_voltage_dependency_record));

	struct phm_clock_voltage_dependency_table *table_clk_vlt =
					kzalloc(table_size, GFP_KERNEL);

	if (NULL == table_clk_vlt) {
283
		pr_err("Can not allocate memory!\n");
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
		return -ENOMEM;
	}

	table_clk_vlt->count = 8;
	table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
	table_clk_vlt->entries[0].v = 0;
	table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
	table_clk_vlt->entries[1].v = 1;
	table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
	table_clk_vlt->entries[2].v = 2;
	table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
	table_clk_vlt->entries[3].v = 3;
	table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
	table_clk_vlt->entries[4].v = 4;
	table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
	table_clk_vlt->entries[5].v = 5;
	table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
	table_clk_vlt->entries[6].v = 6;
	table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
	table_clk_vlt->entries[7].v = 7;
	hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;

	return 0;
}

309
static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr)
310
{
311
	struct smu8_hwmgr *data = hwmgr->backend;
312 313 314 315 316 317
	ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info = NULL;
	uint32_t i;
	int result = 0;
	uint8_t frev, crev;
	uint16_t size;

318
	info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *)smu_atom_get_data_table(hwmgr->adev,
319 320 321
			GetIndexIntoMasterTable(DATA, IntegratedSystemInfo),
			&size, &frev, &crev);

322 323
	if (info == NULL) {
		pr_err("Could not retrieve the Integrated System Info Table!\n");
324 325 326
		return -EINVAL;
	}

327 328
	if (crev != 9) {
		pr_err("Unsupported IGP table: %d %d\n", frev, crev);
329 330 331
		return -EINVAL;
	}

332
	data->sys_info.bootup_uma_clock =
333 334
				   le32_to_cpu(info->ulBootUpUMAClock);

335
	data->sys_info.bootup_engine_clock =
336 337
				le32_to_cpu(info->ulBootUpEngineClock);

338
	data->sys_info.dentist_vco_freq =
339 340
				   le32_to_cpu(info->ulDentistVCOFreq);

341
	data->sys_info.system_config =
342 343
				     le32_to_cpu(info->ulSystemConfig);

344
	data->sys_info.bootup_nb_voltage_index =
345 346
				  le16_to_cpu(info->usBootUpNBVoltage);

347
	data->sys_info.htc_hyst_lmt =
348 349
			(info->ucHtcHystLmt == 0) ? 5 : info->ucHtcHystLmt;

350
	data->sys_info.htc_tmp_lmt =
351 352
			(info->ucHtcTmpLmt == 0) ? 203 : info->ucHtcTmpLmt;

353 354
	if (data->sys_info.htc_tmp_lmt <=
			data->sys_info.htc_hyst_lmt) {
355
		pr_err("The htcTmpLmt should be larger than htcHystLmt.\n");
356 357 358
		return -EINVAL;
	}

359 360
	data->sys_info.nb_dpm_enable =
				data->enable_nb_ps_policy &&
361 362
				(le32_to_cpu(info->ulSystemConfig) >> 3 & 0x1);

363 364 365
	for (i = 0; i < SMU8_NUM_NBPSTATES; i++) {
		if (i < SMU8_NUM_NBPMEMORYCLOCK) {
			data->sys_info.nbp_memory_clock[i] =
366 367
			  le32_to_cpu(info->ulNbpStateMemclkFreq[i]);
		}
368
		data->sys_info.nbp_n_clock[i] =
369 370 371 372
			    le32_to_cpu(info->ulNbpStateNClkFreq[i]);
	}

	for (i = 0; i < MAX_DISPLAY_CLOCK_LEVEL; i++) {
373
		data->sys_info.display_clock[i] =
374 375 376 377
					le32_to_cpu(info->sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
	}

	/* Here use 4 levels, make sure not exceed */
378 379
	for (i = 0; i < SMU8_NUM_NBPSTATES; i++) {
		data->sys_info.nbp_voltage_index[i] =
380 381 382
			     le16_to_cpu(info->usNBPStateVoltage[i]);
	}

383 384 385 386 387
	if (!data->sys_info.nb_dpm_enable) {
		for (i = 1; i < SMU8_NUM_NBPSTATES; i++) {
			if (i < SMU8_NUM_NBPMEMORYCLOCK) {
				data->sys_info.nbp_memory_clock[i] =
				    data->sys_info.nbp_memory_clock[0];
388
			}
389 390 391 392
			data->sys_info.nbp_n_clock[i] =
				    data->sys_info.nbp_n_clock[0];
			data->sys_info.nbp_voltage_index[i] =
				    data->sys_info.nbp_voltage_index[0];
393 394 395 396 397 398 399 400 401
		}
	}

	if (le32_to_cpu(info->ulGPUCapInfo) &
		SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) {
		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
				    PHM_PlatformCaps_EnableDFSBypass);
	}

402
	data->sys_info.uma_channel_number = info->ucUMAChannelNumber;
403

404
	smu8_construct_max_power_limits_table (hwmgr,
405 406
				    &hwmgr->dyn_state.max_clock_voltage_on_ac);

407
	smu8_init_dynamic_state_adjustment_rule_settings(hwmgr,
408 409 410 411 412
				    &info->sDISPCLK_Voltage[0]);

	return result;
}

413
static int smu8_construct_boot_state(struct pp_hwmgr *hwmgr)
414
{
415
	struct smu8_hwmgr *data = hwmgr->backend;
416

417 418
	data->boot_power_level.engineClock =
				data->sys_info.bootup_engine_clock;
419

420 421
	data->boot_power_level.vddcIndex =
			(uint8_t)data->sys_info.bootup_nb_voltage_index;
422

423 424 425 426 427 428 429 430
	data->boot_power_level.dsDividerIndex = 0;
	data->boot_power_level.ssDividerIndex = 0;
	data->boot_power_level.allowGnbSlow = 1;
	data->boot_power_level.forceNBPstate = 0;
	data->boot_power_level.hysteresis_up = 0;
	data->boot_power_level.numSIMDToPowerDown = 0;
	data->boot_power_level.display_wm = 0;
	data->boot_power_level.vce_wm = 0;
431 432 433 434

	return 0;
}

435
static int smu8_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
436
{
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
	struct SMU8_Fusion_ClkTable *clock_table;
	int ret;
	uint32_t i;
	void *table = NULL;
	pp_atomctrl_clock_dividers_kong dividers;

	struct phm_clock_voltage_dependency_table *vddc_table =
		hwmgr->dyn_state.vddc_dependency_on_sclk;
	struct phm_clock_voltage_dependency_table *vdd_gfx_table =
		hwmgr->dyn_state.vdd_gfx_dependency_on_sclk;
	struct phm_acp_clock_voltage_dependency_table *acp_table =
		hwmgr->dyn_state.acp_clock_voltage_dependency_table;
	struct phm_uvd_clock_voltage_dependency_table *uvd_table =
		hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
	struct phm_vce_clock_voltage_dependency_table *vce_table =
		hwmgr->dyn_state.vce_clock_voltage_dependency_table;

	if (!hwmgr->need_pp_table_upload)
		return 0;

457
	ret = smum_download_powerplay_table(hwmgr, &table);
458 459 460 461 462 463 464

	PP_ASSERT_WITH_CODE((0 == ret && NULL != table),
			    "Fail to get clock table from SMU!", return -EINVAL;);

	clock_table = (struct SMU8_Fusion_ClkTable *)table;

	/* patch clock table */
465
	PP_ASSERT_WITH_CODE((vddc_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
466
			    "Dependency table entry exceeds max limit!", return -EINVAL;);
467
	PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
468
			    "Dependency table entry exceeds max limit!", return -EINVAL;);
469
	PP_ASSERT_WITH_CODE((acp_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
470
			    "Dependency table entry exceeds max limit!", return -EINVAL;);
471
	PP_ASSERT_WITH_CODE((uvd_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
472
			    "Dependency table entry exceeds max limit!", return -EINVAL;);
473
	PP_ASSERT_WITH_CODE((vce_table->count <= SMU8_MAX_HARDWARE_POWERLEVELS),
474 475
			    "Dependency table entry exceeds max limit!", return -EINVAL;);

476
	for (i = 0; i < SMU8_MAX_HARDWARE_POWERLEVELS; i++) {
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548

		/* vddc_sclk */
		clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
			(i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0;
		clock_table->SclkBreakdownTable.ClkLevel[i].Frequency =
			(i < vddc_table->count) ? vddc_table->entries[i].clk : 0;

		atomctrl_get_engine_pll_dividers_kong(hwmgr,
						      clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
						      &dividers);

		clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid =
			(uint8_t)dividers.pll_post_divider;

		/* vddgfx_sclk */
		clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid =
			(i < vdd_gfx_table->count) ? (uint8_t)vdd_gfx_table->entries[i].v : 0;

		/* acp breakdown */
		clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid =
			(i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0;
		clock_table->AclkBreakdownTable.ClkLevel[i].Frequency =
			(i < acp_table->count) ? acp_table->entries[i].acpclk : 0;

		atomctrl_get_engine_pll_dividers_kong(hwmgr,
						      clock_table->AclkBreakdownTable.ClkLevel[i].Frequency,
						      &dividers);

		clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid =
			(uint8_t)dividers.pll_post_divider;


		/* uvd breakdown */
		clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid =
			(i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
		clock_table->VclkBreakdownTable.ClkLevel[i].Frequency =
			(i < uvd_table->count) ? uvd_table->entries[i].vclk : 0;

		atomctrl_get_engine_pll_dividers_kong(hwmgr,
						      clock_table->VclkBreakdownTable.ClkLevel[i].Frequency,
						      &dividers);

		clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid =
			(uint8_t)dividers.pll_post_divider;

		clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid =
			(i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
		clock_table->DclkBreakdownTable.ClkLevel[i].Frequency =
			(i < uvd_table->count) ? uvd_table->entries[i].dclk : 0;

		atomctrl_get_engine_pll_dividers_kong(hwmgr,
						      clock_table->DclkBreakdownTable.ClkLevel[i].Frequency,
						      &dividers);

		clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid =
			(uint8_t)dividers.pll_post_divider;

		/* vce breakdown */
		clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid =
			(i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0;
		clock_table->EclkBreakdownTable.ClkLevel[i].Frequency =
			(i < vce_table->count) ? vce_table->entries[i].ecclk : 0;


		atomctrl_get_engine_pll_dividers_kong(hwmgr,
						      clock_table->EclkBreakdownTable.ClkLevel[i].Frequency,
						      &dividers);

		clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid =
			(uint8_t)dividers.pll_post_divider;

	}
549
	ret = smum_upload_powerplay_table(hwmgr);
550 551

	return ret;
552 553
}

554
static int smu8_init_sclk_limit(struct pp_hwmgr *hwmgr)
555
{
556
	struct smu8_hwmgr *data = hwmgr->backend;
557 558 559 560
	struct phm_clock_voltage_dependency_table *table =
					hwmgr->dyn_state.vddc_dependency_on_sclk;
	unsigned long clock = 0, level;

561
	if (NULL == table || table->count <= 0)
562 563
		return -EINVAL;

564 565
	data->sclk_dpm.soft_min_clk = table->entries[0].clk;
	data->sclk_dpm.hard_min_clk = table->entries[0].clk;
566

567
	level = smu8_get_max_sclk_level(hwmgr) - 1;
568 569 570 571 572 573

	if (level < table->count)
		clock = table->entries[level].clk;
	else
		clock = table->entries[table->count - 1].clk;

574 575
	data->sclk_dpm.soft_max_clk = clock;
	data->sclk_dpm.hard_max_clk = clock;
576 577 578 579

	return 0;
}

580
static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
581
{
582
	struct smu8_hwmgr *data = hwmgr->backend;
583
	struct phm_uvd_clock_voltage_dependency_table *table =
584
				hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
585 586
	unsigned long clock = 0, level;

587
	if (NULL == table || table->count <= 0)
588 589
		return -EINVAL;

590 591
	data->uvd_dpm.soft_min_clk = 0;
	data->uvd_dpm.hard_min_clk = 0;
592

593 594
	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel);
	level = smum_get_argument(hwmgr);
595 596 597 598 599 600

	if (level < table->count)
		clock = table->entries[level].vclk;
	else
		clock = table->entries[table->count - 1].vclk;

601 602
	data->uvd_dpm.soft_max_clk = clock;
	data->uvd_dpm.hard_max_clk = clock;
603 604 605 606

	return 0;
}

607
static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
608
{
609
	struct smu8_hwmgr *data = hwmgr->backend;
610
	struct phm_vce_clock_voltage_dependency_table *table =
611
				hwmgr->dyn_state.vce_clock_voltage_dependency_table;
612 613
	unsigned long clock = 0, level;

614
	if (NULL == table || table->count <= 0)
615 616
		return -EINVAL;

617 618
	data->vce_dpm.soft_min_clk = 0;
	data->vce_dpm.hard_min_clk = 0;
619

620 621
	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel);
	level = smum_get_argument(hwmgr);
622 623 624 625 626 627

	if (level < table->count)
		clock = table->entries[level].ecclk;
	else
		clock = table->entries[table->count - 1].ecclk;

628 629
	data->vce_dpm.soft_max_clk = clock;
	data->vce_dpm.hard_max_clk = clock;
630 631 632 633

	return 0;
}

634
static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
635
{
636
	struct smu8_hwmgr *data = hwmgr->backend;
637 638 639 640
	struct phm_acp_clock_voltage_dependency_table *table =
				hwmgr->dyn_state.acp_clock_voltage_dependency_table;
	unsigned long clock = 0, level;

641
	if (NULL == table || table->count <= 0)
642 643
		return -EINVAL;

644 645
	data->acp_dpm.soft_min_clk = 0;
	data->acp_dpm.hard_min_clk = 0;
646

647 648
	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel);
	level = smum_get_argument(hwmgr);
649 650 651 652 653 654

	if (level < table->count)
		clock = table->entries[level].acpclk;
	else
		clock = table->entries[table->count - 1].acpclk;

655 656
	data->acp_dpm.soft_max_clk = clock;
	data->acp_dpm.hard_max_clk = clock;
657 658 659
	return 0;
}

660
static void smu8_init_power_gate_state(struct pp_hwmgr *hwmgr)
661
{
662
	struct smu8_hwmgr *data = hwmgr->backend;
663

664 665 666 667 668
	data->uvd_power_gated = false;
	data->vce_power_gated = false;
	data->samu_power_gated = false;
	data->acp_power_gated = false;
	data->pgacpinit = true;
669 670
}

671
static void smu8_init_sclk_threshold(struct pp_hwmgr *hwmgr)
672
{
673
	struct smu8_hwmgr *data = hwmgr->backend;
674

675
	data->low_sclk_interrupt_threshold = 0;
676
}
677

678
static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
679
{
680
	struct smu8_hwmgr *data = hwmgr->backend;
681 682 683 684 685 686 687 688
	struct phm_clock_voltage_dependency_table *table =
					hwmgr->dyn_state.vddc_dependency_on_sclk;

	unsigned long clock = 0;
	unsigned long level;
	unsigned long stable_pstate_sclk;
	unsigned long percentage;

689 690
	data->sclk_dpm.soft_min_clk = table->entries[0].clk;
	level = smu8_get_max_sclk_level(hwmgr) - 1;
691 692

	if (level < table->count)
693
		data->sclk_dpm.soft_max_clk  = table->entries[level].clk;
694
	else
695
		data->sclk_dpm.soft_max_clk  = table->entries[table->count - 1].clk;
696

697
	clock = hwmgr->display_config->min_core_set_clock;
698
	if (clock == 0)
699
		pr_debug("min_core_set_clock not set\n");
700

701 702
	if (data->sclk_dpm.hard_min_clk != clock) {
		data->sclk_dpm.hard_min_clk = clock;
703

704
		smum_send_msg_to_smc_with_parameter(hwmgr,
705
						PPSMC_MSG_SetSclkHardMin,
706 707
						 smu8_get_sclk_level(hwmgr,
					data->sclk_dpm.hard_min_clk,
708 709 710
					     PPSMC_MSG_SetSclkHardMin));
	}

711
	clock = data->sclk_dpm.soft_min_clk;
712 713 714 715 716 717 718 719 720 721 722 723 724

	/* update minimum clocks for Stable P-State feature */
	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
				     PHM_PlatformCaps_StablePState)) {
		percentage = 75;
		/*Sclk - calculate sclk value based on percentage and find FLOOR sclk from VddcDependencyOnSCLK table  */
		stable_pstate_sclk = (hwmgr->dyn_state.max_clock_voltage_on_ac.mclk *
					percentage) / 100;

		if (clock < stable_pstate_sclk)
			clock = stable_pstate_sclk;
	}

725 726
	if (data->sclk_dpm.soft_min_clk != clock) {
		data->sclk_dpm.soft_min_clk = clock;
727
		smum_send_msg_to_smc_with_parameter(hwmgr,
728
						PPSMC_MSG_SetSclkSoftMin,
729 730
						smu8_get_sclk_level(hwmgr,
					data->sclk_dpm.soft_min_clk,
731 732 733 734 735
					     PPSMC_MSG_SetSclkSoftMin));
	}

	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
				    PHM_PlatformCaps_StablePState) &&
736 737
			 data->sclk_dpm.soft_max_clk != clock) {
		data->sclk_dpm.soft_max_clk = clock;
738
		smum_send_msg_to_smc_with_parameter(hwmgr,
739
						PPSMC_MSG_SetSclkSoftMax,
740 741
						smu8_get_sclk_level(hwmgr,
					data->sclk_dpm.soft_max_clk,
742 743 744 745 746 747
					PPSMC_MSG_SetSclkSoftMax));
	}

	return 0;
}

748
static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
749 750
{
	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
751
				PHM_PlatformCaps_SclkDeepSleep)) {
752
		uint32_t clks = hwmgr->display_config->min_core_set_clock_in_sr;
753
		if (clks == 0)
754
			clks = SMU8_MIN_DEEP_SLEEP_SCLK;
755

756 757
		PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks);

758
		smum_send_msg_to_smc_with_parameter(hwmgr,
759 760
				PPSMC_MSG_SetMinDeepSleepSclk,
				clks);
761 762 763 764 765
	}

	return 0;
}

766
static int smu8_set_watermark_threshold(struct pp_hwmgr *hwmgr)
767
{
768
	struct smu8_hwmgr *data =
769
				  hwmgr->backend;
770

771
	smum_send_msg_to_smc_with_parameter(hwmgr,
772
					PPSMC_MSG_SetWatermarkFrequency,
773
					data->sclk_dpm.soft_max_clk);
774 775 776 777

	return 0;
}

778
static int smu8_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
779
{
780
	struct smu8_hwmgr *hw_data = hwmgr->backend;
781 782 783 784 785

	if (hw_data->is_nb_dpm_enabled) {
		if (enable) {
			PP_DBG_LOG("enable Low Memory PState.\n");

786
			return smum_send_msg_to_smc_with_parameter(hwmgr,
787 788 789 790 791
						PPSMC_MSG_EnableLowMemoryPstate,
						(lock ? 1 : 0));
		} else {
			PP_DBG_LOG("disable Low Memory PState.\n");

792
			return smum_send_msg_to_smc_with_parameter(hwmgr,
793 794 795 796 797
						PPSMC_MSG_DisableLowMemoryPstate,
						(lock ? 1 : 0));
		}
	}

798 799 800
	return 0;
}

801
static int smu8_disable_nb_dpm(struct pp_hwmgr *hwmgr)
802 803
{
	int ret = 0;
804

805
	struct smu8_hwmgr *data = hwmgr->backend;
806 807
	unsigned long dpm_features = 0;

808 809
	if (data->is_nb_dpm_enabled) {
		smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
810 811
		dpm_features |= NB_DPM_MASK;
		ret = smum_send_msg_to_smc_with_parameter(
812
							  hwmgr,
813
							  PPSMC_MSG_DisableAllSmuFeatures,
814
							  dpm_features);
815
		if (ret == 0)
816
			data->is_nb_dpm_enabled = false;
817
	}
818

819 820 821
	return ret;
}

822
static int smu8_enable_nb_dpm(struct pp_hwmgr *hwmgr)
823
{
824
	int ret = 0;
825

826
	struct smu8_hwmgr *data = hwmgr->backend;
827
	unsigned long dpm_features = 0;
828

829
	if (!data->is_nb_dpm_enabled) {
830 831 832
		PP_DBG_LOG("enabling ALL SMU features.\n");
		dpm_features |= NB_DPM_MASK;
		ret = smum_send_msg_to_smc_with_parameter(
833
							  hwmgr,
834 835 836
							  PPSMC_MSG_EnableAllSmuFeatures,
							  dpm_features);
		if (ret == 0)
837
			data->is_nb_dpm_enabled = true;
838 839
	}

840
	return ret;
841 842
}

843
static int smu8_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input)
844
{
845 846
	bool disable_switch;
	bool enable_low_mem_state;
847
	struct smu8_hwmgr *hw_data = hwmgr->backend;
848
	const struct phm_set_power_state_input *states = (struct phm_set_power_state_input *)input;
849
	const struct smu8_power_state *pnew_state = cast_const_smu8_power_state(states->pnew_state);
850

851
	if (hw_data->sys_info.nb_dpm_enable) {
852 853
		disable_switch = hw_data->cc6_settings.nb_pstate_switch_disable ? true : false;
		enable_low_mem_state = hw_data->cc6_settings.nb_pstate_switch_disable ? false : true;
854

855
		if (pnew_state->action == FORCE_HIGH)
856
			smu8_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch);
857
		else if (pnew_state->action == CANCEL_FORCE_HIGH)
858
			smu8_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch);
859
		else
860
			smu8_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch);
861 862 863 864
	}
	return 0;
}

865
static int smu8_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
866 867
{
	int ret = 0;
868

869 870 871 872
	smu8_update_sclk_limit(hwmgr);
	smu8_set_deep_sleep_sclk_threshold(hwmgr);
	smu8_set_watermark_threshold(hwmgr);
	ret = smu8_enable_nb_dpm(hwmgr);
873 874
	if (ret)
		return ret;
875
	smu8_update_low_mem_pstate(hwmgr, input);
876

877
	return 0;
878 879 880
};


881
static int smu8_setup_asic_task(struct pp_hwmgr *hwmgr)
882 883 884
{
	int ret;

885
	ret = smu8_upload_pptable_to_smu(hwmgr);
886 887
	if (ret)
		return ret;
888
	ret = smu8_init_sclk_limit(hwmgr);
889 890
	if (ret)
		return ret;
891
	ret = smu8_init_uvd_limit(hwmgr);
892 893
	if (ret)
		return ret;
894
	ret = smu8_init_vce_limit(hwmgr);
895 896
	if (ret)
		return ret;
897
	ret = smu8_init_acp_limit(hwmgr);
898 899 900
	if (ret)
		return ret;

901 902
	smu8_init_power_gate_state(hwmgr);
	smu8_init_sclk_threshold(hwmgr);
903 904 905 906

	return 0;
}

907
static void smu8_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr)
908
{
909
	struct smu8_hwmgr *hw_data = hwmgr->backend;
910

911 912 913 914
	hw_data->disp_clk_bypass_pending = false;
	hw_data->disp_clk_bypass = false;
}

915
static void smu8_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr)
916
{
917
	struct smu8_hwmgr *hw_data = hwmgr->backend;
918

919
	hw_data->is_nb_dpm_enabled = false;
920 921
}

922
static void smu8_reset_cc6_data(struct pp_hwmgr *hwmgr)
923
{
924
	struct smu8_hwmgr *hw_data = hwmgr->backend;
925 926 927 928 929 930 931

	hw_data->cc6_settings.cc6_setting_changed = false;
	hw_data->cc6_settings.cpu_pstate_separation_time = 0;
	hw_data->cc6_settings.cpu_cc6_disable = false;
	hw_data->cc6_settings.cpu_pstate_disable = false;
}

932
static int smu8_power_off_asic(struct pp_hwmgr *hwmgr)
933
{
934 935 936
	smu8_power_up_display_clock_sys_pll(hwmgr);
	smu8_clear_nb_dpm_flag(hwmgr);
	smu8_reset_cc6_data(hwmgr);
937
	return 0;
938 939
};

940
static void smu8_program_voting_clients(struct pp_hwmgr *hwmgr)
941
{
942 943 944
	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
				ixCG_FREQ_TRAN_VOTING_0,
				SMU8_VOTINGRIGHTSCLIENTS_DFLT0);
945 946
}

947
static void smu8_clear_voting_clients(struct pp_hwmgr *hwmgr)
948
{
949 950
	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
				ixCG_FREQ_TRAN_VOTING_0, 0);
951 952
}

953
static int smu8_start_dpm(struct pp_hwmgr *hwmgr)
954
{
955
	struct smu8_hwmgr *data = hwmgr->backend;
956

957
	data->dpm_flags |= DPMFlags_SCLK_Enabled;
958

959
	return smum_send_msg_to_smc_with_parameter(hwmgr,
960
				PPSMC_MSG_EnableAllSmuFeatures,
961
				SCLK_DPM_MASK);
962 963
}

964
static int smu8_stop_dpm(struct pp_hwmgr *hwmgr)
965 966
{
	int ret = 0;
967
	struct smu8_hwmgr *data = hwmgr->backend;
968 969
	unsigned long dpm_features = 0;

970
	if (data->dpm_flags & DPMFlags_SCLK_Enabled) {
971
		dpm_features |= SCLK_DPM_MASK;
972
		data->dpm_flags &= ~DPMFlags_SCLK_Enabled;
973
		ret = smum_send_msg_to_smc_with_parameter(hwmgr,
974 975 976 977
					PPSMC_MSG_DisableAllSmuFeatures,
					dpm_features);
	}
	return ret;
978 979
}

980
static int smu8_program_bootup_state(struct pp_hwmgr *hwmgr)
981
{
982
	struct smu8_hwmgr *data = hwmgr->backend;
983

984 985
	data->sclk_dpm.soft_min_clk = data->sys_info.bootup_engine_clock;
	data->sclk_dpm.soft_max_clk = data->sys_info.bootup_engine_clock;
986

987
	smum_send_msg_to_smc_with_parameter(hwmgr,
988
				PPSMC_MSG_SetSclkSoftMin,
989 990
				smu8_get_sclk_level(hwmgr,
				data->sclk_dpm.soft_min_clk,
991 992
				PPSMC_MSG_SetSclkSoftMin));

993
	smum_send_msg_to_smc_with_parameter(hwmgr,
994
				PPSMC_MSG_SetSclkSoftMax,
995 996
				smu8_get_sclk_level(hwmgr,
				data->sclk_dpm.soft_max_clk,
997 998 999 1000 1001
				PPSMC_MSG_SetSclkSoftMax));

	return 0;
}

1002
static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
1003
{
1004
	struct smu8_hwmgr *data = hwmgr->backend;
1005

1006
	data->acp_boot_level = 0xff;
1007 1008
}

1009
static int smu8_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1010
{
1011
	smu8_disable_nb_dpm(hwmgr);
1012

1013 1014
	smu8_clear_voting_clients(hwmgr);
	if (smu8_stop_dpm(hwmgr))
1015
		return -EINVAL;
1016

1017
	return 0;
1018 1019
};

1020
static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1021
{
1022 1023
	smu8_program_voting_clients(hwmgr);
	if (smu8_start_dpm(hwmgr))
1024
		return -EINVAL;
1025 1026
	smu8_program_bootup_state(hwmgr);
	smu8_reset_acp_boot_level(hwmgr);
1027

1028
	return 0;
1029 1030
};

1031
static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
1032 1033 1034
				struct pp_power_state  *prequest_ps,
			const struct pp_power_state *pcurrent_ps)
{
1035 1036
	struct smu8_power_state *smu8_ps =
				cast_smu8_power_state(&prequest_ps->hardware);
1037

1038 1039
	const struct smu8_power_state *smu8_current_ps =
				cast_const_smu8_power_state(&pcurrent_ps->hardware);
1040

1041
	struct smu8_hwmgr *data = hwmgr->backend;
1042
	struct PP_Clocks clocks = {0, 0, 0, 0};
1043 1044
	bool force_high;

1045
	smu8_ps->need_dfs_bypass = true;
1046

1047
	data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
1048

1049 1050
	clocks.memoryClock = hwmgr->display_config->min_mem_set_clock != 0 ?
				hwmgr->display_config->min_mem_set_clock :
1051
				data->sys_info.nbp_memory_clock[1];
1052 1053


1054 1055 1056
	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
		clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;

1057
	force_high = (clocks.memoryClock > data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1])
1058
			|| (hwmgr->display_config->num_display >= 3);
1059

1060
	smu8_ps->action = smu8_current_ps->action;
1061

1062
	if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
1063
		smu8_nbdpm_pstate_enable_disable(hwmgr, false, false);
1064
	else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD)
1065 1066 1067 1068 1069
		smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
	else if (!force_high && (smu8_ps->action == FORCE_HIGH))
		smu8_ps->action = CANCEL_FORCE_HIGH;
	else if (force_high && (smu8_ps->action != FORCE_HIGH))
		smu8_ps->action = FORCE_HIGH;
1070
	else
1071
		smu8_ps->action = DO_NOTHING;
1072 1073 1074 1075

	return 0;
}

1076
static int smu8_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
1077 1078
{
	int result = 0;
1079
	struct smu8_hwmgr *data;
1080

1081
	data = kzalloc(sizeof(struct smu8_hwmgr), GFP_KERNEL);
1082 1083 1084 1085
	if (data == NULL)
		return -ENOMEM;

	hwmgr->backend = data;
1086

1087
	result = smu8_initialize_dpm_defaults(hwmgr);
1088
	if (result != 0) {
1089
		pr_err("smu8_initialize_dpm_defaults failed\n");
1090 1091 1092
		return result;
	}

1093
	result = smu8_get_system_info_data(hwmgr);
1094
	if (result != 0) {
1095
		pr_err("smu8_get_system_info_data failed\n");
1096 1097 1098
		return result;
	}

1099
	smu8_construct_boot_state(hwmgr);
1100

1101
	hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =  SMU8_MAX_HARDWARE_POWERLEVELS;
1102 1103 1104 1105

	return result;
}

1106
static int smu8_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
1107
{
1108
	if (hwmgr != NULL) {
1109 1110
		kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
		hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
1111

1112
		kfree(hwmgr->backend);
1113 1114
		hwmgr->backend = NULL;
	}
1115 1116 1117
	return 0;
}

1118
static int smu8_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
1119
{
1120
	struct smu8_hwmgr *data = hwmgr->backend;
1121

1122
	smum_send_msg_to_smc_with_parameter(hwmgr,
1123
					PPSMC_MSG_SetSclkSoftMin,
1124 1125
					smu8_get_sclk_level(hwmgr,
					data->sclk_dpm.soft_max_clk,
1126 1127
					PPSMC_MSG_SetSclkSoftMin));

1128
	smum_send_msg_to_smc_with_parameter(hwmgr,
1129
				PPSMC_MSG_SetSclkSoftMax,
1130 1131
				smu8_get_sclk_level(hwmgr,
				data->sclk_dpm.soft_max_clk,
1132 1133
				PPSMC_MSG_SetSclkSoftMax));

1134 1135 1136
	return 0;
}

1137
static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
1138
{
1139
	struct smu8_hwmgr *data = hwmgr->backend;
1140 1141 1142 1143
	struct phm_clock_voltage_dependency_table *table =
				hwmgr->dyn_state.vddc_dependency_on_sclk;
	unsigned long clock = 0, level;

1144
	if (NULL == table || table->count <= 0)
1145 1146
		return -EINVAL;

1147 1148
	data->sclk_dpm.soft_min_clk = table->entries[0].clk;
	data->sclk_dpm.hard_min_clk = table->entries[0].clk;
R
Rex Zhu 已提交
1149 1150
	hwmgr->pstate_sclk = table->entries[0].clk;
	hwmgr->pstate_mclk = 0;
1151

1152
	level = smu8_get_max_sclk_level(hwmgr) - 1;
1153 1154 1155 1156 1157 1158

	if (level < table->count)
		clock = table->entries[level].clk;
	else
		clock = table->entries[table->count - 1].clk;

1159 1160
	data->sclk_dpm.soft_max_clk = clock;
	data->sclk_dpm.hard_max_clk = clock;
1161

1162
	smum_send_msg_to_smc_with_parameter(hwmgr,
1163
				PPSMC_MSG_SetSclkSoftMin,
1164 1165
				smu8_get_sclk_level(hwmgr,
				data->sclk_dpm.soft_min_clk,
1166 1167
				PPSMC_MSG_SetSclkSoftMin));

1168
	smum_send_msg_to_smc_with_parameter(hwmgr,
1169
				PPSMC_MSG_SetSclkSoftMax,
1170 1171
				smu8_get_sclk_level(hwmgr,
				data->sclk_dpm.soft_max_clk,
1172 1173 1174 1175 1176
				PPSMC_MSG_SetSclkSoftMax));

	return 0;
}

1177
static int smu8_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
1178
{
1179
	struct smu8_hwmgr *data = hwmgr->backend;
1180

1181
	smum_send_msg_to_smc_with_parameter(hwmgr,
1182
			PPSMC_MSG_SetSclkSoftMax,
1183 1184
			smu8_get_sclk_level(hwmgr,
			data->sclk_dpm.soft_min_clk,
1185
			PPSMC_MSG_SetSclkSoftMax));
1186

1187
	smum_send_msg_to_smc_with_parameter(hwmgr,
1188
				PPSMC_MSG_SetSclkSoftMin,
1189 1190
				smu8_get_sclk_level(hwmgr,
				data->sclk_dpm.soft_min_clk,
1191
				PPSMC_MSG_SetSclkSoftMin));
1192 1193 1194 1195

	return 0;
}

1196
static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1197 1198 1199 1200 1201 1202
				enum amd_dpm_forced_level level)
{
	int ret = 0;

	switch (level) {
	case AMD_DPM_FORCED_LEVEL_HIGH:
1203
	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1204
		ret = smu8_phm_force_dpm_highest(hwmgr);
1205 1206
		break;
	case AMD_DPM_FORCED_LEVEL_LOW:
1207
	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1208
	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1209
		ret = smu8_phm_force_dpm_lowest(hwmgr);
1210 1211
		break;
	case AMD_DPM_FORCED_LEVEL_AUTO:
1212
		ret = smu8_phm_unforce_dpm_levels(hwmgr);
1213 1214 1215
		break;
	case AMD_DPM_FORCED_LEVEL_MANUAL:
	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1216 1217 1218 1219 1220 1221 1222
	default:
		break;
	}

	return ret;
}

1223
static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
1224
{
1225 1226
	if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
		smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
1227
		return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
1228
	}
1229 1230 1231
	return 0;
}

1232
static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
1233
{
1234
	if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
1235
		smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
1236 1237 1238 1239
		return smum_send_msg_to_smc_with_parameter(
			hwmgr,
			PPSMC_MSG_UVDPowerON,
			PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0);
1240 1241 1242 1243 1244
	}

	return 0;
}

1245
static int  smu8_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
1246
{
1247
	struct smu8_hwmgr *data = hwmgr->backend;
1248
	struct phm_vce_clock_voltage_dependency_table *ptable =
1249
		hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1250 1251

	/* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
1252 1253
	if (PP_CAP(PHM_PlatformCaps_StablePState) ||
	    hwmgr->en_umd_pstate) {
1254
		data->vce_dpm.hard_min_clk =
1255 1256
				  ptable->entries[ptable->count - 1].ecclk;

1257
		smum_send_msg_to_smc_with_parameter(hwmgr,
1258
			PPSMC_MSG_SetEclkHardMin,
1259 1260
			smu8_get_eclk_level(hwmgr,
				data->vce_dpm.hard_min_clk,
1261
				PPSMC_MSG_SetEclkHardMin));
1262
	} else {
1263 1264 1265

		smum_send_msg_to_smc_with_parameter(hwmgr,
					PPSMC_MSG_SetEclkHardMin, 0);
1266 1267
		/* disable ECLK DPM 0. Otherwise VCE could hang if
		 * switching SCLK from DPM 0 to 6/7 */
1268
		smum_send_msg_to_smc_with_parameter(hwmgr,
1269
					PPSMC_MSG_SetEclkSoftMin, 1);
1270 1271 1272 1273
	}
	return 0;
}

1274
static int smu8_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
1275
{
1276
	if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
1277
		return smum_send_msg_to_smc(hwmgr,
1278 1279 1280 1281
						     PPSMC_MSG_VCEPowerOFF);
	return 0;
}

1282
static int smu8_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
1283
{
1284
	if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
1285
		return smum_send_msg_to_smc(hwmgr,
1286 1287 1288 1289
						     PPSMC_MSG_VCEPowerON);
	return 0;
}

1290
static uint32_t smu8_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
1291
{
1292
	struct smu8_hwmgr *data = hwmgr->backend;
1293

1294
	return data->sys_info.bootup_uma_clock;
1295 1296
}

1297
static uint32_t smu8_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
1298 1299
{
	struct pp_power_state  *ps;
1300
	struct smu8_power_state  *smu8_ps;
1301 1302 1303 1304 1305 1306 1307 1308 1309

	if (hwmgr == NULL)
		return -EINVAL;

	ps = hwmgr->request_ps;

	if (ps == NULL)
		return -EINVAL;

1310
	smu8_ps = cast_smu8_power_state(&ps->hardware);
1311 1312

	if (low)
1313
		return smu8_ps->levels[0].engineClock;
1314
	else
1315
		return smu8_ps->levels[smu8_ps->level-1].engineClock;
1316 1317
}

1318
static int smu8_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
1319 1320
					struct pp_hw_power_state *hw_ps)
{
1321 1322
	struct smu8_hwmgr *data = hwmgr->backend;
	struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps);
1323

1324 1325 1326 1327
	smu8_ps->level = 1;
	smu8_ps->nbps_flags = 0;
	smu8_ps->bapm_flags = 0;
	smu8_ps->levels[0] = data->boot_power_level;
1328 1329 1330 1331

	return 0;
}

1332
static int smu8_dpm_get_pp_table_entry_callback(
1333 1334 1335 1336 1337
						     struct pp_hwmgr *hwmgr,
					   struct pp_hw_power_state *hw_ps,
							  unsigned int index,
						     const void *clock_info)
{
1338
	struct smu8_power_state *smu8_ps = cast_smu8_power_state(hw_ps);
1339

1340
	const ATOM_PPLIB_CZ_CLOCK_INFO *smu8_clock_info = clock_info;
1341 1342 1343

	struct phm_clock_voltage_dependency_table *table =
				    hwmgr->dyn_state.vddc_dependency_on_sclk;
1344
	uint8_t clock_info_index = smu8_clock_info->index;
1345 1346 1347 1348

	if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1))
		clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1);

1349 1350
	smu8_ps->levels[index].engineClock = table->entries[clock_info_index].clk;
	smu8_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v;
1351

1352
	smu8_ps->level = index + 1;
1353 1354

	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
1355 1356
		smu8_ps->levels[index].dsDividerIndex = 5;
		smu8_ps->levels[index].ssDividerIndex = 5;
1357 1358 1359 1360 1361
	}

	return 0;
}

1362
static int smu8_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
1363 1364 1365 1366 1367 1368 1369 1370 1371
{
	int result;
	unsigned long ret = 0;

	result = pp_tables_get_num_of_entries(hwmgr, &ret);

	return result ? 0 : ret;
}

1372
static int smu8_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
1373 1374 1375
		    unsigned long entry, struct pp_power_state *ps)
{
	int result;
1376
	struct smu8_power_state *smu8_ps;
1377

1378
	ps->hardware.magic = smu8_magic;
1379

1380
	smu8_ps = cast_smu8_power_state(&(ps->hardware));
1381 1382

	result = pp_tables_get_entry(hwmgr, entry, ps,
1383
			smu8_dpm_get_pp_table_entry_callback);
1384

1385 1386
	smu8_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
	smu8_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
1387 1388 1389 1390

	return result;
}

1391
static int smu8_get_power_state_size(struct pp_hwmgr *hwmgr)
1392
{
1393
	return sizeof(struct smu8_power_state);
1394 1395
}

1396
static void smu8_hw_print_display_cfg(
1397
	const struct cc6_settings *cc6_settings)
1398 1399 1400 1401
{
	PP_DBG_LOG("New Display Configuration:\n");

	PP_DBG_LOG("   cpu_cc6_disable: %d\n",
1402
			cc6_settings->cpu_cc6_disable);
1403
	PP_DBG_LOG("   cpu_pstate_disable: %d\n",
1404
			cc6_settings->cpu_pstate_disable);
1405
	PP_DBG_LOG("   nb_pstate_switch_disable: %d\n",
1406
			cc6_settings->nb_pstate_switch_disable);
1407
	PP_DBG_LOG("   cpu_pstate_separation_time: %d\n\n",
1408
			cc6_settings->cpu_pstate_separation_time);
1409 1410
}

1411
 static int smu8_set_cpu_power_state(struct pp_hwmgr *hwmgr)
1412
{
1413
	struct smu8_hwmgr *hw_data = hwmgr->backend;
1414
	uint32_t data = 0;
1415

1416
	if (hw_data->cc6_settings.cc6_setting_changed) {
1417 1418

		hw_data->cc6_settings.cc6_setting_changed = false;
1419

1420
		smu8_hw_print_display_cfg(&hw_data->cc6_settings);
1421

1422
		data |= (hw_data->cc6_settings.cpu_pstate_separation_time
1423 1424 1425
			& PWRMGT_SEPARATION_TIME_MASK)
			<< PWRMGT_SEPARATION_TIME_SHIFT;

1426
		data |= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0)
1427 1428
			<< PWRMGT_DISABLE_CPU_CSTATES_SHIFT;

1429
		data |= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0)
1430 1431
			<< PWRMGT_DISABLE_CPU_PSTATES_SHIFT;

1432 1433 1434
		PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n",
			data);

1435
		smum_send_msg_to_smc_with_parameter(hwmgr,
1436 1437 1438 1439 1440 1441 1442
						PPSMC_MSG_SetDisplaySizePowerParams,
						data);
	}

	return 0;
}

1443

1444
static int smu8_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
1445
			bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
1446
{
1447
	struct smu8_hwmgr *hw_data = hwmgr->backend;
1448

1449
	if (separation_time !=
1450 1451 1452 1453
	    hw_data->cc6_settings.cpu_pstate_separation_time ||
	    cc6_disable != hw_data->cc6_settings.cpu_cc6_disable ||
	    pstate_disable != hw_data->cc6_settings.cpu_pstate_disable ||
	    pstate_switch_disable != hw_data->cc6_settings.nb_pstate_switch_disable) {
1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464

		hw_data->cc6_settings.cc6_setting_changed = true;

		hw_data->cc6_settings.cpu_pstate_separation_time =
			separation_time;
		hw_data->cc6_settings.cpu_cc6_disable =
			cc6_disable;
		hw_data->cc6_settings.cpu_pstate_disable =
			pstate_disable;
		hw_data->cc6_settings.nb_pstate_switch_disable =
			pstate_switch_disable;
1465 1466

	}
1467

1468 1469 1470
	return 0;
}

1471
static int smu8_get_dal_power_level(struct pp_hwmgr *hwmgr,
R
Rex Zhu 已提交
1472
		struct amd_pp_simple_clock_info *info)
1473 1474
{
	uint32_t i;
R
Rex Zhu 已提交
1475
	const struct phm_clock_voltage_dependency_table *table =
1476
			hwmgr->dyn_state.vddc_dep_on_dal_pwrl;
1477
	const struct phm_clock_and_voltage_limits *limits =
1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491
			&hwmgr->dyn_state.max_clock_voltage_on_ac;

	info->engine_max_clock = limits->sclk;
	info->memory_max_clock = limits->mclk;

	for (i = table->count - 1; i > 0; i--) {
		if (limits->vddc >= table->entries[i].v) {
			info->level = table->entries[i].clk;
			return 0;
		}
	}
	return -EINVAL;
}

1492
static int smu8_force_clock_level(struct pp_hwmgr *hwmgr,
1493
		enum pp_clock_type type, uint32_t mask)
1494 1495 1496
{
	switch (type) {
	case PP_SCLK:
1497
		smum_send_msg_to_smc_with_parameter(hwmgr,
1498
				PPSMC_MSG_SetSclkSoftMin,
1499
				mask);
1500
		smum_send_msg_to_smc_with_parameter(hwmgr,
1501
				PPSMC_MSG_SetSclkSoftMax,
1502
				mask);
1503 1504 1505 1506 1507 1508 1509 1510
		break;
	default:
		break;
	}

	return 0;
}

1511
static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
1512 1513
		enum pp_clock_type type, char *buf)
{
1514
	struct smu8_hwmgr *data = hwmgr->backend;
1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
	struct phm_clock_voltage_dependency_table *sclk_table =
			hwmgr->dyn_state.vddc_dependency_on_sclk;
	int i, now, size = 0;

	switch (type) {
	case PP_SCLK:
		now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
				CGS_IND_REG__SMC,
				ixTARGET_AND_CURRENT_PROFILE_INDEX),
				TARGET_AND_CURRENT_PROFILE_INDEX,
				CURR_SCLK_INDEX);

		for (i = 0; i < sclk_table->count; i++)
			size += sprintf(buf + size, "%d: %uMhz %s\n",
					i, sclk_table->entries[i].clk / 100,
					(i == now) ? "*" : "");
		break;
1532 1533 1534 1535 1536 1537 1538
	case PP_MCLK:
		now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
				CGS_IND_REG__SMC,
				ixTARGET_AND_CURRENT_PROFILE_INDEX),
				TARGET_AND_CURRENT_PROFILE_INDEX,
				CURR_MCLK_INDEX);

1539
		for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--)
1540
			size += sprintf(buf + size, "%d: %uMhz %s\n",
1541 1542
					SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
					(SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
1543
		break;
1544 1545 1546 1547 1548 1549
	default:
		break;
	}
	return size;
}

1550
static int smu8_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
1551 1552 1553
				PHM_PerformanceLevelDesignation designation, uint32_t index,
				PHM_PerformanceLevel *level)
{
1554 1555
	const struct smu8_power_state *ps;
	struct smu8_hwmgr *data;
1556 1557 1558 1559 1560 1561
	uint32_t level_index;
	uint32_t i;

	if (level == NULL || hwmgr == NULL || state == NULL)
		return -EINVAL;

1562
	data = hwmgr->backend;
1563
	ps = cast_const_smu8_power_state(state);
1564

1565
	level_index = index > ps->level - 1 ? ps->level - 1 : index;
1566
	level->coreClock = ps->levels[level_index].engineClock;
1567

1568 1569 1570 1571 1572 1573 1574 1575 1576
	if (designation == PHM_PerformanceLevelDesignation_PowerContainment) {
		for (i = 1; i < ps->level; i++) {
			if (ps->levels[i].engineClock > data->dce_slow_sclk_threshold) {
				level->coreClock = ps->levels[i].engineClock;
				break;
			}
		}
	}

1577
	if (level_index == 0)
1578
		level->memory_clock = data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1];
1579 1580 1581
	else
		level->memory_clock = data->sys_info.nbp_memory_clock[0];

1582
	level->vddc = (smu8_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4;
1583 1584 1585 1586 1587 1588
	level->nonLocalMemoryFreq = 0;
	level->nonLocalMemoryWidth = 0;

	return 0;
}

1589
static int smu8_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
1590 1591
	const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
{
1592
	const struct smu8_power_state *ps = cast_const_smu8_power_state(state);
1593 1594 1595 1596 1597 1598 1599

	clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex));
	clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex));

	return 0;
}

1600
static int smu8_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
1601 1602
						struct amd_pp_clocks *clocks)
{
1603
	struct smu8_hwmgr *data = hwmgr->backend;
1604 1605 1606
	int i;
	struct phm_clock_voltage_dependency_table *table;

1607
	clocks->count = smu8_get_max_sclk_level(hwmgr);
1608 1609 1610
	switch (type) {
	case amd_pp_disp_clock:
		for (i = 0; i < clocks->count; i++)
1611
			clocks->clock[i] = data->sys_info.display_clock[i] * 10;
1612 1613 1614 1615
		break;
	case amd_pp_sys_clock:
		table = hwmgr->dyn_state.vddc_dependency_on_sclk;
		for (i = 0; i < clocks->count; i++)
1616
			clocks->clock[i] = table->entries[i].clk * 10;
1617 1618
		break;
	case amd_pp_mem_clock:
1619
		clocks->count = SMU8_NUM_NBPMEMORYCLOCK;
1620
		for (i = 0; i < clocks->count; i++)
1621
			clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i] * 10;
1622 1623 1624 1625 1626 1627 1628 1629
		break;
	default:
		return -1;
	}

	return 0;
}

1630
static int smu8_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
1631 1632 1633 1634 1635 1636 1637 1638 1639 1640
{
	struct phm_clock_voltage_dependency_table *table =
					hwmgr->dyn_state.vddc_dependency_on_sclk;
	unsigned long level;
	const struct phm_clock_and_voltage_limits *limits =
			&hwmgr->dyn_state.max_clock_voltage_on_ac;

	if ((NULL == table) || (table->count <= 0) || (clocks == NULL))
		return -EINVAL;

1641
	level = smu8_get_max_sclk_level(hwmgr) - 1;
1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652

	if (level < table->count)
		clocks->engine_max_clock = table->entries[level].clk;
	else
		clocks->engine_max_clock = table->entries[table->count - 1].clk;

	clocks->memory_max_clock = limits->mclk;

	return 0;
}

1653
static int smu8_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667
{
	int actual_temp = 0;
	uint32_t val = cgs_read_ind_register(hwmgr->device,
					     CGS_IND_REG__SMC, ixTHM_TCON_CUR_TMP);
	uint32_t temp = PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);

	if (PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
		actual_temp = ((temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
	else
		actual_temp = (temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;

	return actual_temp;
}

1668
static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1669
			  void *value, int *size)
1670
{
1671
	struct smu8_hwmgr *data = hwmgr->backend;
1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692

	struct phm_clock_voltage_dependency_table *table =
				hwmgr->dyn_state.vddc_dependency_on_sclk;

	struct phm_vce_clock_voltage_dependency_table *vce_table =
		hwmgr->dyn_state.vce_clock_voltage_dependency_table;

	struct phm_uvd_clock_voltage_dependency_table *uvd_table =
		hwmgr->dyn_state.uvd_clock_voltage_dependency_table;

	uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
					TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
	uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
					TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
	uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
					TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);

	uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
	uint16_t vddnb, vddgfx;
	int result;

1693 1694 1695 1696 1697
	/* size must be at least 4 bytes for all sensors */
	if (*size < 4)
		return -EINVAL;
	*size = 4;

1698 1699 1700 1701
	switch (idx) {
	case AMDGPU_PP_SENSOR_GFX_SCLK:
		if (sclk_index < NUM_SCLK_LEVELS) {
			sclk = table->entries[sclk_index].clk;
1702
			*((uint32_t *)value) = sclk;
1703 1704 1705 1706 1707 1708
			return 0;
		}
		return -EINVAL;
	case AMDGPU_PP_SENSOR_VDDNB:
		tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
			CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
1709
		vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp) / 4;
1710
		*((uint32_t *)value) = vddnb;
1711 1712 1713 1714
		return 0;
	case AMDGPU_PP_SENSOR_VDDGFX:
		tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
			CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
1715
		vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp) / 4;
1716
		*((uint32_t *)value) = vddgfx;
1717 1718
		return 0;
	case AMDGPU_PP_SENSOR_UVD_VCLK:
1719 1720
		if (!data->uvd_power_gated) {
			if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1721 1722 1723
				return -EINVAL;
			} else {
				vclk = uvd_table->entries[uvd_index].vclk;
1724
				*((uint32_t *)value) = vclk;
1725 1726 1727
				return 0;
			}
		}
1728
		*((uint32_t *)value) = 0;
1729 1730
		return 0;
	case AMDGPU_PP_SENSOR_UVD_DCLK:
1731 1732
		if (!data->uvd_power_gated) {
			if (uvd_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1733 1734 1735
				return -EINVAL;
			} else {
				dclk = uvd_table->entries[uvd_index].dclk;
1736
				*((uint32_t *)value) = dclk;
1737 1738 1739
				return 0;
			}
		}
1740
		*((uint32_t *)value) = 0;
1741 1742
		return 0;
	case AMDGPU_PP_SENSOR_VCE_ECCLK:
1743 1744
		if (!data->vce_power_gated) {
			if (vce_index >= SMU8_MAX_HARDWARE_POWERLEVELS) {
1745 1746 1747
				return -EINVAL;
			} else {
				ecclk = vce_table->entries[vce_index].ecclk;
1748
				*((uint32_t *)value) = ecclk;
1749 1750 1751
				return 0;
			}
		}
1752
		*((uint32_t *)value) = 0;
1753 1754
		return 0;
	case AMDGPU_PP_SENSOR_GPU_LOAD:
1755
		result = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGraphicsActivity);
1756 1757 1758 1759 1760 1761
		if (0 == result) {
			activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
			activity_percent = activity_percent > 100 ? 100 : activity_percent;
		} else {
			activity_percent = 50;
		}
1762
		*((uint32_t *)value) = activity_percent;
1763
		return 0;
1764
	case AMDGPU_PP_SENSOR_UVD_POWER:
1765
		*((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
1766 1767
		return 0;
	case AMDGPU_PP_SENSOR_VCE_POWER:
1768
		*((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
1769
		return 0;
1770
	case AMDGPU_PP_SENSOR_GPU_TEMP:
1771
		*((uint32_t *)value) = smu8_thermal_get_temperature(hwmgr);
1772
		return 0;
1773 1774 1775 1776 1777
	default:
		return -EINVAL;
	}
}

1778
static int smu8_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803
					uint32_t virtual_addr_low,
					uint32_t virtual_addr_hi,
					uint32_t mc_addr_low,
					uint32_t mc_addr_hi,
					uint32_t size)
{
	smum_send_msg_to_smc_with_parameter(hwmgr,
					PPSMC_MSG_DramAddrHiVirtual,
					mc_addr_hi);
	smum_send_msg_to_smc_with_parameter(hwmgr,
					PPSMC_MSG_DramAddrLoVirtual,
					mc_addr_low);
	smum_send_msg_to_smc_with_parameter(hwmgr,
					PPSMC_MSG_DramAddrHiPhysical,
					virtual_addr_hi);
	smum_send_msg_to_smc_with_parameter(hwmgr,
					PPSMC_MSG_DramAddrLoPhysical,
					virtual_addr_low);

	smum_send_msg_to_smc_with_parameter(hwmgr,
					PPSMC_MSG_DramBufferSize,
					size);
	return 0;
}

1804
static int smu8_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
1805 1806
		struct PP_TemperatureRange *thermal_data)
{
1807
	struct smu8_hwmgr *data = hwmgr->backend;
1808 1809 1810

	memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));

1811 1812
	thermal_data->max = (data->thermal_auto_throttling_treshold +
			data->sys_info.htc_hyst_lmt) *
1813 1814 1815 1816
			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;

	return 0;
}
1817

1818
static int smu8_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
1819
{
1820
	struct smu8_hwmgr *data = hwmgr->backend;
1821 1822 1823 1824 1825
	uint32_t dpm_features = 0;

	if (enable &&
		phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
				  PHM_PlatformCaps_UVDDPM)) {
1826
		data->dpm_flags |= DPMFlags_UVD_Enabled;
1827 1828 1829 1830 1831
		dpm_features |= UVD_DPM_MASK;
		smum_send_msg_to_smc_with_parameter(hwmgr,
			    PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
	} else {
		dpm_features |= UVD_DPM_MASK;
1832
		data->dpm_flags &= ~DPMFlags_UVD_Enabled;
1833 1834 1835 1836 1837 1838
		smum_send_msg_to_smc_with_parameter(hwmgr,
			   PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
	}
	return 0;
}

1839
int smu8_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
1840
{
1841
	struct smu8_hwmgr *data = hwmgr->backend;
1842 1843 1844 1845 1846 1847 1848
	struct phm_uvd_clock_voltage_dependency_table *ptable =
		hwmgr->dyn_state.uvd_clock_voltage_dependency_table;

	if (!bgate) {
		/* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
		if (PP_CAP(PHM_PlatformCaps_StablePState) ||
		    hwmgr->en_umd_pstate) {
1849
			data->uvd_dpm.hard_min_clk =
1850 1851 1852 1853
				   ptable->entries[ptable->count - 1].vclk;

			smum_send_msg_to_smc_with_parameter(hwmgr,
				PPSMC_MSG_SetUvdHardMin,
1854 1855
				smu8_get_uvd_level(hwmgr,
					data->uvd_dpm.hard_min_clk,
1856 1857
					PPSMC_MSG_SetUvdHardMin));

1858
			smu8_enable_disable_uvd_dpm(hwmgr, true);
1859
		} else {
1860
			smu8_enable_disable_uvd_dpm(hwmgr, true);
1861 1862
		}
	} else {
1863
		smu8_enable_disable_uvd_dpm(hwmgr, false);
1864 1865 1866 1867 1868
	}

	return 0;
}

1869
static int smu8_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
1870
{
1871
	struct smu8_hwmgr *data = hwmgr->backend;
1872 1873 1874 1875 1876
	uint32_t dpm_features = 0;

	if (enable && phm_cap_enabled(
				hwmgr->platform_descriptor.platformCaps,
				PHM_PlatformCaps_VCEDPM)) {
1877
		data->dpm_flags |= DPMFlags_VCE_Enabled;
1878 1879 1880 1881 1882
		dpm_features |= VCE_DPM_MASK;
		smum_send_msg_to_smc_with_parameter(hwmgr,
			    PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
	} else {
		dpm_features |= VCE_DPM_MASK;
1883
		data->dpm_flags &= ~DPMFlags_VCE_Enabled;
1884 1885 1886 1887 1888 1889 1890 1891
		smum_send_msg_to_smc_with_parameter(hwmgr,
			   PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
	}

	return 0;
}


1892
static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
1893
{
1894
	struct smu8_hwmgr *data = hwmgr->backend;
1895

1896
	data->uvd_power_gated = bgate;
1897 1898

	if (bgate) {
1899
		amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1900 1901
						AMD_IP_BLOCK_TYPE_UVD,
						AMD_PG_STATE_GATE);
1902
		amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1903 1904
						AMD_IP_BLOCK_TYPE_UVD,
						AMD_CG_STATE_GATE);
1905 1906
		smu8_dpm_update_uvd_dpm(hwmgr, true);
		smu8_dpm_powerdown_uvd(hwmgr);
1907
	} else {
1908
		smu8_dpm_powerup_uvd(hwmgr);
1909
		amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1910 1911
						AMD_IP_BLOCK_TYPE_UVD,
						AMD_CG_STATE_UNGATE);
1912
		amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1913 1914
						AMD_IP_BLOCK_TYPE_UVD,
						AMD_PG_STATE_UNGATE);
1915
		smu8_dpm_update_uvd_dpm(hwmgr, false);
1916 1917 1918 1919
	}

}

1920
static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
1921
{
1922
	struct smu8_hwmgr *data = hwmgr->backend;
1923 1924

	if (bgate) {
1925
		amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1926 1927
					AMD_IP_BLOCK_TYPE_VCE,
					AMD_PG_STATE_GATE);
1928
		amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1929 1930
					AMD_IP_BLOCK_TYPE_VCE,
					AMD_CG_STATE_GATE);
1931 1932 1933
		smu8_enable_disable_vce_dpm(hwmgr, false);
		smu8_dpm_powerdown_vce(hwmgr);
		data->vce_power_gated = true;
1934
	} else {
1935 1936
		smu8_dpm_powerup_vce(hwmgr);
		data->vce_power_gated = false;
1937
		amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
1938 1939
					AMD_IP_BLOCK_TYPE_VCE,
					AMD_CG_STATE_UNGATE);
1940
		amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1941 1942
					AMD_IP_BLOCK_TYPE_VCE,
					AMD_PG_STATE_UNGATE);
1943 1944
		smu8_dpm_update_vce_dpm(hwmgr);
		smu8_enable_disable_vce_dpm(hwmgr, true);
1945 1946 1947
	}
}

1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978
static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
	.backend_init = smu8_hwmgr_backend_init,
	.backend_fini = smu8_hwmgr_backend_fini,
	.apply_state_adjust_rules = smu8_apply_state_adjust_rules,
	.force_dpm_level = smu8_dpm_force_dpm_level,
	.get_power_state_size = smu8_get_power_state_size,
	.powerdown_uvd = smu8_dpm_powerdown_uvd,
	.powergate_uvd = smu8_dpm_powergate_uvd,
	.powergate_vce = smu8_dpm_powergate_vce,
	.get_mclk = smu8_dpm_get_mclk,
	.get_sclk = smu8_dpm_get_sclk,
	.patch_boot_state = smu8_dpm_patch_boot_state,
	.get_pp_table_entry = smu8_dpm_get_pp_table_entry,
	.get_num_of_pp_table_entries = smu8_dpm_get_num_of_pp_table_entries,
	.set_cpu_power_state = smu8_set_cpu_power_state,
	.store_cc6_data = smu8_store_cc6_data,
	.force_clock_level = smu8_force_clock_level,
	.print_clock_levels = smu8_print_clock_levels,
	.get_dal_power_level = smu8_get_dal_power_level,
	.get_performance_level = smu8_get_performance_level,
	.get_current_shallow_sleep_clocks = smu8_get_current_shallow_sleep_clocks,
	.get_clock_by_type = smu8_get_clock_by_type,
	.get_max_high_clocks = smu8_get_max_high_clocks,
	.read_sensor = smu8_read_sensor,
	.power_off_asic = smu8_power_off_asic,
	.asic_setup = smu8_setup_asic_task,
	.dynamic_state_management_enable = smu8_enable_dpm_tasks,
	.power_state_set = smu8_set_power_state_tasks,
	.dynamic_state_management_disable = smu8_disable_dpm_tasks,
	.notify_cac_buffer_info = smu8_notify_cac_buffer_info,
	.get_thermal_temperature_range = smu8_get_thermal_temperature_range,
1979 1980
};

1981
int smu8_init_function_pointers(struct pp_hwmgr *hwmgr)
1982
{
1983
	hwmgr->hwmgr_func = &smu8_hwmgr_funcs;
1984
	hwmgr->pptable_func = &pptable_funcs;
1985
	return 0;
1986
}