smu_v11_0.c 48.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright 2019 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

#include <linux/firmware.h>
24
#include <linux/module.h>
25
#include <linux/pci.h>
26

27 28
#define SMU_11_0_PARTIAL_PPTABLE

29
#include "pp_debug.h"
30 31
#include "amdgpu.h"
#include "amdgpu_smu.h"
32
#include "smu_internal.h"
33
#include "atomfirmware.h"
34
#include "amdgpu_atomfirmware.h"
35
#include "smu_v11_0.h"
36
#include "smu_v11_0_pptable.h"
37
#include "soc15_common.h"
38
#include "atom.h"
39
#include "amd_pcie.h"
40
#include "amdgpu_ras.h"
41 42 43

#include "asic_reg/thm/thm_11_0_2_offset.h"
#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
44 45
#include "asic_reg/mp/mp_11_0_offset.h"
#include "asic_reg/mp/mp_11_0_sh_mask.h"
46
#include "asic_reg/nbio/nbio_7_4_offset.h"
47
#include "asic_reg/nbio/nbio_7_4_sh_mask.h"
48 49
#include "asic_reg/smuio/smuio_11_0_0_offset.h"
#include "asic_reg/smuio/smuio_11_0_0_sh_mask.h"
50

51
MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
52
MODULE_FIRMWARE("amdgpu/arcturus_smc.bin");
53
MODULE_FIRMWARE("amdgpu/navi10_smc.bin");
54
MODULE_FIRMWARE("amdgpu/navi14_smc.bin");
55
MODULE_FIRMWARE("amdgpu/navi12_smc.bin");
56

57
#define SMU11_VOLTAGE_SCALE 4
58

59 60 61 62 63 64 65 66
static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
					      uint16_t msg)
{
	struct amdgpu_device *adev = smu->adev;
	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
	return 0;
}

67
int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
68 69 70 71 72 73 74
{
	struct amdgpu_device *adev = smu->adev;

	*arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
	return 0;
}

75 76 77
static int smu_v11_0_wait_for_response(struct smu_context *smu)
{
	struct amdgpu_device *adev = smu->adev;
78
	uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
79

80
	for (i = 0; i < timeout; i++) {
81 82 83 84 85 86 87
		cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
		if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
			break;
		udelay(1);
	}

	/* timeout means wrong logic */
88
	if (i == timeout)
89 90
		return -ETIME;

91
	return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
92 93
}

94
int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
95 96
{
	struct amdgpu_device *adev = smu->adev;
97 98 99 100 101
	int ret = 0, index = 0;

	index = smu_msg_get_index(smu, msg);
	if (index < 0)
		return index;
102 103 104 105 106

	smu_v11_0_wait_for_response(smu);

	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);

107
	smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
108 109 110 111

	ret = smu_v11_0_wait_for_response(smu);

	if (ret)
112 113
		pr_err("failed send message: %10s (%d) response %#x\n",
		       smu_get_message_name(smu, msg), index, ret);
114 115 116 117 118

	return ret;

}

119
int
120 121 122 123 124
smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
			      uint32_t param)
{

	struct amdgpu_device *adev = smu->adev;
125 126 127 128 129
	int ret = 0, index = 0;

	index = smu_msg_get_index(smu, msg);
	if (index < 0)
		return index;
130 131 132

	ret = smu_v11_0_wait_for_response(smu);
	if (ret)
133 134
		pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
		       smu_get_message_name(smu, msg), index, param, ret);
135 136 137 138 139

	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);

	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);

140
	smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
141 142 143

	ret = smu_v11_0_wait_for_response(smu);
	if (ret)
144 145
		pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
		       smu_get_message_name(smu, msg), index, param, ret);
146 147 148 149

	return ret;
}

150
int smu_v11_0_init_microcode(struct smu_context *smu)
151 152
{
	struct amdgpu_device *adev = smu->adev;
153 154 155 156 157 158
	const char *chip_name;
	char fw_name[30];
	int err = 0;
	const struct smc_firmware_header_v1_0 *hdr;
	const struct common_firmware_header *header;
	struct amdgpu_firmware_info *ucode = NULL;
159

160 161 162 163
	switch (adev->asic_type) {
	case CHIP_VEGA20:
		chip_name = "vega20";
		break;
164 165 166
	case CHIP_ARCTURUS:
		chip_name = "arcturus";
		break;
167 168 169
	case CHIP_NAVI10:
		chip_name = "navi10";
		break;
170 171 172
	case CHIP_NAVI14:
		chip_name = "navi14";
		break;
173 174 175
	case CHIP_NAVI12:
		chip_name = "navi12";
		break;
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
	default:
		BUG();
	}

	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);

	err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
	if (err)
		goto out;
	err = amdgpu_ucode_validate(adev->pm.fw);
	if (err)
		goto out;

	hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
	amdgpu_ucode_print_smc_hdr(&hdr->header);
	adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);

	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
		ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
		ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
		ucode->fw = adev->pm.fw;
		header = (const struct common_firmware_header *)ucode->fw->data;
		adev->firmware.fw_size +=
			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
	}

out:
	if (err) {
		DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
			  fw_name);
		release_firmware(adev->pm.fw);
		adev->pm.fw = NULL;
	}
	return err;
210 211
}

212
int smu_v11_0_load_microcode(struct smu_context *smu)
213
{
214 215 216 217 218 219 220
	struct amdgpu_device *adev = smu->adev;
	const uint32_t *src;
	const struct smc_firmware_header_v1_0 *hdr;
	uint32_t addr_start = MP1_SRAM;
	uint32_t i;
	uint32_t mp1_fw_flags;

221
	hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
	src = (const uint32_t *)(adev->pm.fw->data +
		le32_to_cpu(hdr->header.ucode_array_offset_bytes));

	for (i = 1; i < MP1_SMC_SIZE/4 - 1; i++) {
		WREG32_PCIE(addr_start, src[i]);
		addr_start += 4;
	}

	WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
		1 & MP1_SMN_PUB_CTRL__RESET_MASK);
	WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
		1 & ~MP1_SMN_PUB_CTRL__RESET_MASK);

	for (i = 0; i < adev->usec_timeout; i++) {
		mp1_fw_flags = RREG32_PCIE(MP1_Public |
			(smnMP1_FIRMWARE_FLAGS & 0xffffffff));
		if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
			MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
			break;
		udelay(1);
	}

	if (i == adev->usec_timeout)
		return -ETIME;

247 248 249
	return 0;
}

250
int smu_v11_0_check_fw_status(struct smu_context *smu)
251
{
252 253 254
	struct amdgpu_device *adev = smu->adev;
	uint32_t mp1_fw_flags;

255 256
	mp1_fw_flags = RREG32_PCIE(MP1_Public |
				   (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
257 258 259 260

	if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
	    MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
		return 0;
261

262
	return -EIO;
263 264
}

265
int smu_v11_0_check_fw_version(struct smu_context *smu)
266
{
267 268 269
	uint32_t if_version = 0xff, smu_version = 0xff;
	uint16_t smu_major;
	uint8_t smu_minor, smu_debug;
270 271
	int ret = 0;

272
	ret = smu_get_smc_version(smu, &if_version, &smu_version);
273
	if (ret)
274
		return ret;
275

276 277 278 279
	smu_major = (smu_version >> 16) & 0xffff;
	smu_minor = (smu_version >> 8) & 0xff;
	smu_debug = (smu_version >> 0) & 0xff;

280 281 282 283
	switch (smu->adev->asic_type) {
	case CHIP_VEGA20:
		smu->smc_if_version = SMU11_DRIVER_IF_VERSION_VG20;
		break;
284 285 286
	case CHIP_ARCTURUS:
		smu->smc_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
		break;
287 288 289 290 291 292 293
	case CHIP_NAVI10:
		smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10;
		break;
	case CHIP_NAVI14:
		smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14;
		break;
	default:
294
		pr_err("smu unsupported asic type:%d.\n", smu->adev->asic_type);
295 296 297 298
		smu->smc_if_version = SMU11_DRIVER_IF_VERSION_INV;
		break;
	}

299 300 301 302 303 304 305 306
	/*
	 * 1. if_version mismatch is not critical as our fw is designed
	 * to be backward compatible.
	 * 2. New fw usually brings some optimizations. But that's visible
	 * only on the paired driver.
	 * Considering above, we just leave user a warning message instead
	 * of halt driver loading.
	 */
307
	if (if_version != smu->smc_if_version) {
308 309 310 311
		pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
			"smu fw version = 0x%08x (%d.%d.%d)\n",
			smu->smc_if_version, if_version,
			smu_version, smu_major, smu_minor, smu_debug);
312
		pr_warn("SMU driver if version not matched\n");
313 314
	}

315 316 317
	return ret;
}

318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
{
	struct amdgpu_device *adev = smu->adev;
	uint32_t ppt_offset_bytes;
	const struct smc_firmware_header_v2_0 *v2;

	v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;

	ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
	*size = le32_to_cpu(v2->ppt_size_bytes);
	*table = (uint8_t *)v2 + ppt_offset_bytes;

	return 0;
}

333 334
static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table,
				      uint32_t *size, uint32_t pptable_id)
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
{
	struct amdgpu_device *adev = smu->adev;
	const struct smc_firmware_header_v2_1 *v2_1;
	struct smc_soft_pptable_entry *entries;
	uint32_t pptable_count = 0;
	int i = 0;

	v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
	entries = (struct smc_soft_pptable_entry *)
		((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
	pptable_count = le32_to_cpu(v2_1->pptable_count);
	for (i = 0; i < pptable_count; i++) {
		if (le32_to_cpu(entries[i].id) == pptable_id) {
			*table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
			*size = le32_to_cpu(entries[i].ppt_size_bytes);
			break;
		}
	}

	if (i == pptable_count)
		return -EINVAL;

	return 0;
}

360
int smu_v11_0_setup_pptable(struct smu_context *smu)
361
{
362 363
	struct amdgpu_device *adev = smu->adev;
	const struct smc_firmware_header_v1_0 *hdr;
364
	int ret, index;
365
	uint32_t size = 0;
366
	uint16_t atom_table_size;
367
	uint8_t frev, crev;
368
	void *table;
369 370 371 372 373 374
	uint16_t version_major, version_minor;

	hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
	version_major = le16_to_cpu(hdr->header.header_version_major);
	version_minor = le16_to_cpu(hdr->header.header_version_minor);
	if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
375
		pr_info("use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
376 377 378 379 380 381 382 383 384 385 386 387 388 389
		switch (version_minor) {
		case 0:
			ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
			break;
		case 1:
			ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size,
							 smu->smu_table.boot_values.pp_table_id);
			break;
		default:
			ret = -EINVAL;
			break;
		}
		if (ret)
			return ret;
390

391
	} else {
392
		pr_info("use vbios provided pptable\n");
393 394
		index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
						    powerplayinfo);
395

396
		ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev,
397 398 399
					      (uint8_t **)&table);
		if (ret)
			return ret;
400
		size = atom_table_size;
401
	}
402

403 404 405 406
	if (!smu->smu_table.power_play_table)
		smu->smu_table.power_play_table = table;
	if (!smu->smu_table.power_play_table_size)
		smu->smu_table.power_play_table_size = size;
407 408 409 410

	return 0;
}

411 412 413 414 415 416 417
static int smu_v11_0_init_dpm_context(struct smu_context *smu)
{
	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;

	if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0)
		return -EINVAL;

418
	return smu_alloc_dpm_context(smu);
419 420 421 422 423 424 425 426 427 428
}

static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
{
	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;

	if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0)
		return -EINVAL;

	kfree(smu_dpm->dpm_context);
429
	kfree(smu_dpm->golden_dpm_context);
430 431
	kfree(smu_dpm->dpm_current_power_state);
	kfree(smu_dpm->dpm_request_power_state);
432
	smu_dpm->dpm_context = NULL;
433
	smu_dpm->golden_dpm_context = NULL;
434
	smu_dpm->dpm_context_size = 0;
435 436
	smu_dpm->dpm_current_power_state = NULL;
	smu_dpm->dpm_request_power_state = NULL;
437 438 439 440

	return 0;
}

441
int smu_v11_0_init_smc_tables(struct smu_context *smu)
442 443 444
{
	struct smu_table_context *smu_table = &smu->smu_table;
	struct smu_table *tables = NULL;
445
	int ret = 0;
446

447
	if (smu_table->tables)
448 449
		return -EINVAL;

450 451
	tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
			 GFP_KERNEL);
452 453 454 455 456
	if (!tables)
		return -ENOMEM;

	smu_table->tables = tables;

457 458 459
	ret = smu_tables_init(smu, tables);
	if (ret)
		return ret;
460

461 462 463 464
	ret = smu_v11_0_init_dpm_context(smu);
	if (ret)
		return ret;

465 466 467
	return 0;
}

468
int smu_v11_0_fini_smc_tables(struct smu_context *smu)
469 470
{
	struct smu_table_context *smu_table = &smu->smu_table;
471
	int ret = 0;
472

473
	if (!smu_table->tables)
474 475 476
		return -EINVAL;

	kfree(smu_table->tables);
477
	kfree(smu_table->metrics_table);
478
	smu_table->tables = NULL;
479 480
	smu_table->metrics_table = NULL;
	smu_table->metrics_time = 0;
481

482 483 484
	ret = smu_v11_0_fini_dpm_context(smu);
	if (ret)
		return ret;
485 486
	return 0;
}
487

488
int smu_v11_0_init_power(struct smu_context *smu)
489 490 491
{
	struct smu_power_context *smu_power = &smu->smu_power;

492 493
	if (!smu->pm_enabled)
		return 0;
494 495 496 497 498 499 500 501 502 503 504 505
	if (smu_power->power_context || smu_power->power_context_size != 0)
		return -EINVAL;

	smu_power->power_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
					   GFP_KERNEL);
	if (!smu_power->power_context)
		return -ENOMEM;
	smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context);

	return 0;
}

506
int smu_v11_0_fini_power(struct smu_context *smu)
507 508 509
{
	struct smu_power_context *smu_power = &smu->smu_power;

510 511
	if (!smu->pm_enabled)
		return 0;
512 513 514 515 516 517 518 519 520 521
	if (!smu_power->power_context || smu_power->power_context_size == 0)
		return -EINVAL;

	kfree(smu_power->power_context);
	smu_power->power_context = NULL;
	smu_power->power_context_size = 0;

	return 0;
}

522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
{
	int ret, index;
	uint16_t size;
	uint8_t frev, crev;
	struct atom_common_table_header *header;
	struct atom_firmware_info_v3_3 *v_3_3;
	struct atom_firmware_info_v3_1 *v_3_1;

	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
					    firmwareinfo);

	ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
				      (uint8_t **)&header);
	if (ret)
		return ret;

	if (header->format_revision != 3) {
		pr_err("unknown atom_firmware_info version! for smu11\n");
		return -EINVAL;
	}

	switch (header->content_revision) {
	case 0:
	case 1:
	case 2:
		v_3_1 = (struct atom_firmware_info_v3_1 *)header;
		smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
		smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
		smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
		smu->smu_table.boot_values.socclk = 0;
		smu->smu_table.boot_values.dcefclk = 0;
		smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
		smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
		smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
		smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
		smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
		smu->smu_table.boot_values.pp_table_id = 0;
		break;
	case 3:
	default:
		v_3_3 = (struct atom_firmware_info_v3_3 *)header;
		smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
		smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
		smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
		smu->smu_table.boot_values.socclk = 0;
		smu->smu_table.boot_values.dcefclk = 0;
		smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
		smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
		smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
		smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
		smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
		smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
	}

577 578 579
	smu->smu_table.boot_values.format_revision = header->format_revision;
	smu->smu_table.boot_values.content_revision = header->content_revision;

580 581 582
	return 0;
}

583
int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
{
	int ret, index;
	struct amdgpu_device *adev = smu->adev;
	struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
	struct atom_get_smu_clock_info_output_parameters_v3_1 *output;

	input.clk_id = SMU11_SYSPLL0_SOCCLK_ID;
	input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
	index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
					    getsmuclockinfo);

	ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
					(uint32_t *)&input);
	if (ret)
		return -EINVAL;

	output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
	smu->smu_table.boot_values.socclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;

	memset(&input, 0, sizeof(input));
	input.clk_id = SMU11_SYSPLL0_DCEFCLK_ID;
	input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
	index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
					    getsmuclockinfo);

	ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
					(uint32_t *)&input);
	if (ret)
		return -EINVAL;

	output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
	smu->smu_table.boot_values.dcefclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;

617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
	memset(&input, 0, sizeof(input));
	input.clk_id = SMU11_SYSPLL0_ECLK_ID;
	input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
	index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
					    getsmuclockinfo);

	ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
					(uint32_t *)&input);
	if (ret)
		return -EINVAL;

	output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
	smu->smu_table.boot_values.eclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;

	memset(&input, 0, sizeof(input));
	input.clk_id = SMU11_SYSPLL0_VCLK_ID;
	input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
	index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
					    getsmuclockinfo);

	ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
					(uint32_t *)&input);
	if (ret)
		return -EINVAL;

	output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
	smu->smu_table.boot_values.vclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;

	memset(&input, 0, sizeof(input));
	input.clk_id = SMU11_SYSPLL0_DCLK_ID;
	input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
	index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
					    getsmuclockinfo);

	ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
					(uint32_t *)&input);
	if (ret)
		return -EINVAL;

	output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
	smu->smu_table.boot_values.dclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;

659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
	if ((smu->smu_table.boot_values.format_revision == 3) &&
	    (smu->smu_table.boot_values.content_revision >= 2)) {
		memset(&input, 0, sizeof(input));
		input.clk_id = SMU11_SYSPLL1_0_FCLK_ID;
		input.syspll_id = SMU11_SYSPLL1_2_ID;
		input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
		index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
						    getsmuclockinfo);

		ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
						(uint32_t *)&input);
		if (ret)
			return -EINVAL;

		output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
		smu->smu_table.boot_values.fclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
	}

677 678 679
	return 0;
}

680
int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
681 682 683 684 685 686 687 688 689 690
{
	struct smu_table_context *smu_table = &smu->smu_table;
	struct smu_table *memory_pool = &smu_table->memory_pool;
	int ret = 0;
	uint64_t address;
	uint32_t address_low, address_high;

	if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
		return ret;

691
	address = (uintptr_t)memory_pool->cpu_addr;
692 693 694 695
	address_high = (uint32_t)upper_32_bits(address);
	address_low  = (uint32_t)lower_32_bits(address);

	ret = smu_send_smc_msg_with_param(smu,
696
					  SMU_MSG_SetSystemVirtualDramAddrHigh,
697 698 699 700
					  address_high);
	if (ret)
		return ret;
	ret = smu_send_smc_msg_with_param(smu,
701
					  SMU_MSG_SetSystemVirtualDramAddrLow,
702 703 704 705 706 707 708 709
					  address_low);
	if (ret)
		return ret;

	address = memory_pool->mc_address;
	address_high = (uint32_t)upper_32_bits(address);
	address_low  = (uint32_t)lower_32_bits(address);

710
	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
711 712 713
					  address_high);
	if (ret)
		return ret;
714
	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
715 716 717
					  address_low);
	if (ret)
		return ret;
718
	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
719 720 721 722 723 724 725
					  (uint32_t)memory_pool->size);
	if (ret)
		return ret;

	return ret;
}

726
int smu_v11_0_check_pptable(struct smu_context *smu)
727 728 729 730 731 732 733
{
	int ret;

	ret = smu_check_powerplay_table(smu);
	return ret;
}

734
int smu_v11_0_parse_pptable(struct smu_context *smu)
735 736 737 738
{
	int ret;

	struct smu_table_context *table_context = &smu->smu_table;
739
	struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE];
740 741 742 743

	if (table_context->driver_pptable)
		return -EINVAL;

744
	table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL);
745 746 747 748 749

	if (!table_context->driver_pptable)
		return -ENOMEM;

	ret = smu_store_powerplay_table(smu);
750 751 752 753
	if (ret)
		return -EINVAL;

	ret = smu_append_powerplay_table(smu);
754 755 756 757

	return ret;
}

758
int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
759
{
760
	int ret;
761

762
	ret = smu_set_default_dpm_table(smu);
763

764
	return ret;
765 766
}

767
int smu_v11_0_write_pptable(struct smu_context *smu)
768
{
769
	struct smu_table_context *table_context = &smu->smu_table;
770 771
	int ret = 0;

772
	ret = smu_update_table(smu, SMU_TABLE_PPTABLE, 0,
773
			       table_context->driver_pptable, true);
774 775 776 777

	return ret;
}

778
int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
779 780 781 782 783 784 785 786 787 788 789
{
	int ret;

	ret = smu_send_smc_msg_with_param(smu,
					  SMU_MSG_SetMinDeepSleepDcefclk, clk);
	if (ret)
		pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");

	return ret;
}

790
int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
791 792 793
{
	struct smu_table_context *table_context = &smu->smu_table;

794 795
	if (!smu->pm_enabled)
		return 0;
796 797 798
	if (!table_context)
		return -EINVAL;

799
	return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100);
800 801
}

802
int smu_v11_0_set_tool_table_location(struct smu_context *smu)
803 804
{
	int ret = 0;
805
	struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
806 807 808

	if (tool_table->mc_address) {
		ret = smu_send_smc_msg_with_param(smu,
809
				SMU_MSG_SetToolsDramAddrHigh,
810 811 812
				upper_32_bits(tool_table->mc_address));
		if (!ret)
			ret = smu_send_smc_msg_with_param(smu,
813
				SMU_MSG_SetToolsDramAddrLow,
814 815 816 817 818 819
				lower_32_bits(tool_table->mc_address));
	}

	return ret;
}

820
int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
821 822
{
	int ret = 0;
823 824 825

	if (!smu->pm_enabled)
		return ret;
826

827
	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count);
828 829 830
	return ret;
}

831

832
int smu_v11_0_set_allowed_mask(struct smu_context *smu)
833 834 835 836 837
{
	struct smu_feature *feature = &smu->smu_feature;
	int ret = 0;
	uint32_t feature_mask[2];

838
	mutex_lock(&feature->mutex);
839
	if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
840
		goto failed;
841 842 843 844 845 846

	bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);

	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
					  feature_mask[1]);
	if (ret)
847
		goto failed;
848 849 850 851

	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
					  feature_mask[0]);
	if (ret)
852
		goto failed;
853

854 855
failed:
	mutex_unlock(&feature->mutex);
856 857 858
	return ret;
}

859
int smu_v11_0_get_enabled_mask(struct smu_context *smu,
860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887
				      uint32_t *feature_mask, uint32_t num)
{
	uint32_t feature_mask_high = 0, feature_mask_low = 0;
	int ret = 0;

	if (!feature_mask || num < 2)
		return -EINVAL;

	ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
	if (ret)
		return ret;
	ret = smu_read_smc_arg(smu, &feature_mask_high);
	if (ret)
		return ret;

	ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
	if (ret)
		return ret;
	ret = smu_read_smc_arg(smu, &feature_mask_low);
	if (ret)
		return ret;

	feature_mask[0] = feature_mask_low;
	feature_mask[1] = feature_mask_high;

	return ret;
}

888
int smu_v11_0_system_features_control(struct smu_context *smu,
889
					     bool en)
890 891 892 893 894
{
	struct smu_feature *feature = &smu->smu_feature;
	uint32_t feature_mask[2];
	int ret = 0;

895 896 897 898 899 900 901
	if (smu->pm_enabled) {
		ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
					     SMU_MSG_DisableAllSmuFeatures));
		if (ret)
			return ret;
	}

902 903 904 905 906 907 908 909 910 911 912 913
	ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
	if (ret)
		return ret;

	bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
		    feature->feature_num);
	bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
		    feature->feature_num);

	return ret;
}

914
int smu_v11_0_notify_display_change(struct smu_context *smu)
915 916 917
{
	int ret = 0;

918 919
	if (!smu->pm_enabled)
		return ret;
920 921 922
	if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
	    smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
923 924 925 926

	return ret;
}

927 928
static int
smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
929
				    enum smu_clk_type clock_select)
930 931
{
	int ret = 0;
932
	int clk_id;
933

934 935
	if (!smu->pm_enabled)
		return ret;
936

937 938 939 940
	if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
	    (smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0))
		return 0;

941 942 943 944
	clk_id = smu_clk_get_index(smu, clock_select);
	if (clk_id < 0)
		return -EINVAL;

945
	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
946
					  clk_id << 16);
947 948 949 950 951 952 953 954 955 956 957 958 959 960
	if (ret) {
		pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
		return ret;
	}

	ret = smu_read_smc_arg(smu, clock);
	if (ret)
		return ret;

	if (*clock != 0)
		return 0;

	/* if DC limit is zero, return AC limit */
	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
961
					  clk_id << 16);
962 963 964 965 966 967 968 969 970 971
	if (ret) {
		pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
		return ret;
	}

	ret = smu_read_smc_arg(smu, clock);

	return ret;
}

972
int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
973 974 975 976 977 978 979 980 981 982 983 984 985 986 987
{
	struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
	int ret = 0;

	max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
					 GFP_KERNEL);
	smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;

	max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
	max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
	max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
	max_sustainable_clocks->display_clock = 0xFFFFFFFF;
	max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
	max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;

988
	if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
989 990
		ret = smu_v11_0_get_max_sustainable_clock(smu,
							  &(max_sustainable_clocks->uclock),
991
							  SMU_UCLK);
992 993 994 995 996 997 998
		if (ret) {
			pr_err("[%s] failed to get max UCLK from SMC!",
			       __func__);
			return ret;
		}
	}

999
	if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1000 1001
		ret = smu_v11_0_get_max_sustainable_clock(smu,
							  &(max_sustainable_clocks->soc_clock),
1002
							  SMU_SOCCLK);
1003 1004 1005 1006 1007 1008 1009
		if (ret) {
			pr_err("[%s] failed to get max SOCCLK from SMC!",
			       __func__);
			return ret;
		}
	}

1010
	if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
1011 1012
		ret = smu_v11_0_get_max_sustainable_clock(smu,
							  &(max_sustainable_clocks->dcef_clock),
1013
							  SMU_DCEFCLK);
1014 1015 1016 1017 1018 1019 1020 1021
		if (ret) {
			pr_err("[%s] failed to get max DCEFCLK from SMC!",
			       __func__);
			return ret;
		}

		ret = smu_v11_0_get_max_sustainable_clock(smu,
							  &(max_sustainable_clocks->display_clock),
1022
							  SMU_DISPCLK);
1023 1024 1025 1026 1027 1028 1029
		if (ret) {
			pr_err("[%s] failed to get max DISPCLK from SMC!",
			       __func__);
			return ret;
		}
		ret = smu_v11_0_get_max_sustainable_clock(smu,
							  &(max_sustainable_clocks->phy_clock),
1030
							  SMU_PHYCLK);
1031 1032 1033 1034 1035 1036 1037
		if (ret) {
			pr_err("[%s] failed to get max PHYCLK from SMC!",
			       __func__);
			return ret;
		}
		ret = smu_v11_0_get_max_sustainable_clock(smu,
							  &(max_sustainable_clocks->pixel_clock),
1038
							  SMU_PIXCLK);
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
		if (ret) {
			pr_err("[%s] failed to get max PIXCLK from SMC!",
			       __func__);
			return ret;
		}
	}

	if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
		max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;

	return 0;
}

1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078
uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu) {
	uint32_t od_limit, max_power_limit;
	struct smu_11_0_powerplay_table *powerplay_table = NULL;
	struct smu_table_context *table_context = &smu->smu_table;
	powerplay_table = table_context->power_play_table;

	max_power_limit = smu_get_pptable_power_limit(smu);

	if (!max_power_limit) {
		// If we couldn't get the table limit, fall back on first-read value
		if (!smu->default_power_limit)
			smu->default_power_limit = smu->power_limit;
		max_power_limit = smu->default_power_limit;
	}

	if (smu->od_enabled) {
		od_limit = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);

		pr_debug("ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_limit, smu->default_power_limit);

		max_power_limit *= (100 + od_limit);
		max_power_limit /= 100;
	}

	return max_power_limit;
}

1079
int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
1080
{
1081
	int ret = 0;
1082 1083 1084
	uint32_t max_power_limit;

	max_power_limit = smu_v11_0_get_max_power_limit(smu);
1085

1086 1087 1088 1089
	if (n > max_power_limit) {
		pr_err("New power limit (%d) is over the max allowed %d\n",
				n,
				max_power_limit);
1090
		return -EINVAL;
1091 1092
	}

1093 1094 1095
	if (n == 0)
		n = smu->default_power_limit;

1096 1097 1098
	if (!smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
		pr_err("Setting new power limit is not supported!\n");
		return -EOPNOTSUPP;
1099 1100
	}

1101
	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
1102
	if (ret) {
1103
		pr_err("[%s] Set power limit Failed!\n", __func__);
1104 1105
		return ret;
	}
1106
	smu->power_limit = n;
1107

1108
	return 0;
1109 1110
}

1111
int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
1112 1113
					  enum smu_clk_type clk_id,
					  uint32_t *value)
1114 1115
{
	int ret = 0;
1116
	uint32_t freq = 0;
1117
	int asic_clk_id;
1118

1119
	if (clk_id >= SMU_CLK_COUNT || !value)
1120 1121
		return -EINVAL;

1122 1123 1124 1125
	asic_clk_id = smu_clk_get_index(smu, clk_id);
	if (asic_clk_id < 0)
		return -EINVAL;

1126
	/* if don't has GetDpmClockFreq Message, try get current clock by SmuMetrics_t */
1127
	if (smu_msg_get_index(smu, SMU_MSG_GetDpmClockFreq) < 0)
1128 1129 1130
		ret =  smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
	else {
		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq,
1131
						  (asic_clk_id << 16));
1132 1133
		if (ret)
			return ret;
1134

1135 1136 1137 1138
		ret = smu_read_smc_arg(smu, &freq);
		if (ret)
			return ret;
	}
1139 1140 1141 1142 1143 1144 1145

	freq *= 100;
	*value = freq;

	return ret;
}

1146
static int smu_v11_0_set_thermal_range(struct smu_context *smu,
1147
				       struct smu_temperature_range range)
1148 1149
{
	struct amdgpu_device *adev = smu->adev;
1150 1151
	int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
	int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
1152 1153
	uint32_t val;

1154 1155 1156 1157
	low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
			range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
	high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP,
			range.max / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
1158

1159 1160 1161 1162 1163 1164
	if (low > high)
		return -EINVAL;

	val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
	val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
	val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1165 1166
	val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
	val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
1167 1168
	val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
	val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
1169 1170 1171 1172 1173 1174 1175
	val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);

	WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);

	return 0;
}

1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
{
	struct amdgpu_device *adev = smu->adev;
	uint32_t val = 0;

	val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
	val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
	val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);

	WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);

	return 0;
}

1190
int smu_v11_0_start_thermal_control(struct smu_context *smu)
1191 1192
{
	int ret = 0;
1193
	struct smu_temperature_range range;
1194 1195
	struct amdgpu_device *adev = smu->adev;

1196 1197
	if (!smu->pm_enabled)
		return ret;
1198

1199 1200
	memcpy(&range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));

1201
	ret = smu_get_thermal_temperature_range(smu, &range);
1202 1203
	if (ret)
		return ret;
1204 1205

	if (smu->smu_table.thermal_controller_type) {
1206
		ret = smu_v11_0_set_thermal_range(smu, range);
1207 1208 1209 1210 1211 1212
		if (ret)
			return ret;

		ret = smu_v11_0_enable_thermal_alert(smu);
		if (ret)
			return ret;
1213

1214
		ret = smu_set_thermal_fan_table(smu);
1215 1216 1217 1218
		if (ret)
			return ret;
	}

1219 1220 1221 1222 1223 1224 1225 1226 1227
	adev->pm.dpm.thermal.min_temp = range.min;
	adev->pm.dpm.thermal.max_temp = range.max;
	adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
	adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
	adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
	adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
	adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
	adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
	adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
1228 1229 1230 1231

	return ret;
}

1232
int smu_v11_0_stop_thermal_control(struct smu_context *smu)
1233 1234 1235 1236 1237 1238 1239 1240
{
	struct amdgpu_device *adev = smu->adev;

	WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);

	return 0;
}

1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264
static uint16_t convert_to_vddc(uint8_t vid)
{
	return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
}

static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
{
	struct amdgpu_device *adev = smu->adev;
	uint32_t vdd = 0, val_vid = 0;

	if (!value)
		return -EINVAL;
	val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
		SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
		SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;

	vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);

	*value = vdd;

	return 0;

}

1265
int smu_v11_0_read_sensor(struct smu_context *smu,
1266 1267 1268 1269
				 enum amd_pp_sensors sensor,
				 void *data, uint32_t *size)
{
	int ret = 0;
1270 1271 1272 1273

	if(!data || !size)
		return -EINVAL;

1274
	switch (sensor) {
1275
	case AMDGPU_PP_SENSOR_GFX_MCLK:
1276
		ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data);
1277 1278 1279
		*size = 4;
		break;
	case AMDGPU_PP_SENSOR_GFX_SCLK:
1280
		ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, (uint32_t *)data);
1281
		*size = 4;
1282
		break;
1283 1284 1285
	case AMDGPU_PP_SENSOR_VDDGFX:
		ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
		*size = 4;
1286
		break;
1287 1288 1289 1290
	case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
		*(uint32_t *)data = 0;
		*size = 4;
		break;
1291
	default:
1292
		ret = smu_common_read_sensor(smu, sensor, data, size);
1293 1294 1295 1296 1297 1298 1299 1300 1301
		break;
	}

	if (ret)
		*size = 0;

	return ret;
}

1302
int
1303 1304 1305 1306 1307 1308
smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
					struct pp_display_clock_request
					*clock_req)
{
	enum amd_pp_clock_type clk_type = clock_req->clock_type;
	int ret = 0;
1309
	enum smu_clk_type clk_select = 0;
1310 1311
	uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;

1312 1313
	if (!smu->pm_enabled)
		return -EINVAL;
1314

1315
	if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
1316
		smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1317 1318
		switch (clk_type) {
		case amd_pp_dcef_clock:
1319
			clk_select = SMU_DCEFCLK;
1320 1321
			break;
		case amd_pp_disp_clock:
1322
			clk_select = SMU_DISPCLK;
1323 1324
			break;
		case amd_pp_pixel_clock:
1325
			clk_select = SMU_PIXCLK;
1326 1327
			break;
		case amd_pp_phy_clock:
1328
			clk_select = SMU_PHYCLK;
1329
			break;
1330 1331 1332
		case amd_pp_mem_clock:
			clk_select = SMU_UCLK;
			break;
1333 1334 1335 1336 1337 1338 1339 1340 1341
		default:
			pr_info("[%s] Invalid Clock Type!", __func__);
			ret = -EINVAL;
			break;
		}

		if (ret)
			goto failed;

1342 1343 1344
		if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
			return 0;

1345
		ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0);
1346 1347 1348

		if(clk_select == SMU_UCLK)
			smu->hard_min_uclk_req_from_dal = clk_freq;
1349 1350 1351 1352 1353 1354
	}

failed:
	return ret;
}

1355
int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
1356 1357
{
	int ret = 0;
1358
	struct amdgpu_device *adev = smu->adev;
1359

1360 1361 1362 1363
	switch (adev->asic_type) {
	case CHIP_VEGA20:
		break;
	case CHIP_NAVI10:
1364
	case CHIP_NAVI14:
1365
	case CHIP_NAVI12:
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375
		if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
			return 0;
		if (enable)
			ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
		else
			ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
		break;
	default:
		break;
	}
1376 1377 1378 1379

	return ret;
}

1380
uint32_t
1381 1382
smu_v11_0_get_fan_control_mode(struct smu_context *smu)
{
1383
	if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1384 1385 1386 1387 1388 1389
		return AMD_FAN_CTRL_MANUAL;
	else
		return AMD_FAN_CTRL_AUTO;
}

static int
1390
smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
1391 1392 1393
{
	int ret = 0;

1394
	if (!smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1395 1396
		return 0;

1397
	ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
1398 1399
	if (ret)
		pr_err("[%s]%s smc FAN CONTROL feature failed!",
1400
		       __func__, (auto_fan_control ? "Start" : "Stop"));
1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419

	return ret;
}

static int
smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
{
	struct amdgpu_device *adev = smu->adev;

	WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
		     REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
				   CG_FDO_CTRL2, TMIN, 0));
	WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
		     REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
				   CG_FDO_CTRL2, FDO_PWM_MODE, mode));

	return 0;
}

1420
int
1421 1422 1423
smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
{
	struct amdgpu_device *adev = smu->adev;
1424
	uint32_t duty100, duty;
1425 1426 1427 1428 1429
	uint64_t tmp64;

	if (speed > 100)
		speed = 100;

1430
	if (smu_v11_0_auto_fan_control(smu, 0))
1431
		return -EINVAL;
1432

1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448
	duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
				CG_FDO_CTRL1, FMAX_DUTY100);
	if (!duty100)
		return -EINVAL;

	tmp64 = (uint64_t)speed * duty100;
	do_div(tmp64, 100);
	duty = (uint32_t)tmp64;

	WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
		     REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
				   CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));

	return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
}

1449
int
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
			       uint32_t mode)
{
	int ret = 0;

	switch (mode) {
	case AMD_FAN_CTRL_NONE:
		ret = smu_v11_0_set_fan_speed_percent(smu, 100);
		break;
	case AMD_FAN_CTRL_MANUAL:
1460
		ret = smu_v11_0_auto_fan_control(smu, 0);
1461 1462
		break;
	case AMD_FAN_CTRL_AUTO:
1463
		ret = smu_v11_0_auto_fan_control(smu, 1);
1464 1465 1466 1467 1468 1469
		break;
	default:
		break;
	}

	if (ret) {
1470
		pr_err("[%s]Set fan control mode failed!", __func__);
1471 1472 1473 1474 1475 1476
		return -EINVAL;
	}

	return ret;
}

1477
int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
1478 1479 1480 1481 1482 1483 1484 1485 1486
				       uint32_t speed)
{
	struct amdgpu_device *adev = smu->adev;
	int ret;
	uint32_t tach_period, crystal_clock_freq;

	if (!speed)
		return -EINVAL;

1487
	ret = smu_v11_0_auto_fan_control(smu, 0);
1488
	if (ret)
1489
		return ret;
1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502

	crystal_clock_freq = amdgpu_asic_get_xclk(adev);
	tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
	WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
		     REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
				   CG_TACH_CTRL, TARGET_PERIOD,
				   tach_period));

	ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);

	return ret;
}

1503
int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
1504 1505
				     uint32_t pstate)
{
1506 1507 1508
	int ret = 0;
	ret = smu_send_smc_msg_with_param(smu,
					  SMU_MSG_SetXgmiMode,
1509
					  pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
1510
	return ret;
1511 1512
}

1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555
#define THM_11_0__SRCID__THM_DIG_THERM_L2H		0		/* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH  */
#define THM_11_0__SRCID__THM_DIG_THERM_H2L		1		/* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL  */

static int smu_v11_0_irq_process(struct amdgpu_device *adev,
				 struct amdgpu_irq_src *source,
				 struct amdgpu_iv_entry *entry)
{
	uint32_t client_id = entry->client_id;
	uint32_t src_id = entry->src_id;

	if (client_id == SOC15_IH_CLIENTID_THM) {
		switch (src_id) {
		case THM_11_0__SRCID__THM_DIG_THERM_L2H:
			pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
				PCI_BUS_NUM(adev->pdev->devfn),
				PCI_SLOT(adev->pdev->devfn),
				PCI_FUNC(adev->pdev->devfn));
		break;
		case THM_11_0__SRCID__THM_DIG_THERM_H2L:
			pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
				PCI_BUS_NUM(adev->pdev->devfn),
				PCI_SLOT(adev->pdev->devfn),
				PCI_FUNC(adev->pdev->devfn));
		break;
		default:
			pr_warn("GPU under temperature range unknown src id (%d), detected on PCIe %d:%d.%d!\n",
				src_id,
				PCI_BUS_NUM(adev->pdev->devfn),
				PCI_SLOT(adev->pdev->devfn),
				PCI_FUNC(adev->pdev->devfn));
		break;

		}
	}

	return 0;
}

static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs =
{
	.process = smu_v11_0_irq_process,
};

1556
int smu_v11_0_register_irq_handler(struct smu_context *smu)
1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587
{
	struct amdgpu_device *adev = smu->adev;
	struct amdgpu_irq_src *irq_src = smu->irq_source;
	int ret = 0;

	/* already register */
	if (irq_src)
		return 0;

	irq_src = kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
	if (!irq_src)
		return -ENOMEM;
	smu->irq_source = irq_src;

	irq_src->funcs = &smu_v11_0_irq_funcs;

	ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
				THM_11_0__SRCID__THM_DIG_THERM_L2H,
				irq_src);
	if (ret)
		return ret;

	ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
				THM_11_0__SRCID__THM_DIG_THERM_H2L,
				irq_src);
	if (ret)
		return ret;

	return ret;
}

1588
int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617
		struct pp_smu_nv_clock_table *max_clocks)
{
	struct smu_table_context *table_context = &smu->smu_table;
	struct smu_11_0_max_sustainable_clocks *sustainable_clocks = NULL;

	if (!max_clocks || !table_context->max_sustainable_clocks)
		return -EINVAL;

	sustainable_clocks = table_context->max_sustainable_clocks;

	max_clocks->dcfClockInKhz =
			(unsigned int) sustainable_clocks->dcef_clock * 1000;
	max_clocks->displayClockInKhz =
			(unsigned int) sustainable_clocks->display_clock * 1000;
	max_clocks->phyClockInKhz =
			(unsigned int) sustainable_clocks->phy_clock * 1000;
	max_clocks->pixelClockInKhz =
			(unsigned int) sustainable_clocks->pixel_clock * 1000;
	max_clocks->uClockInKhz =
			(unsigned int) sustainable_clocks->uclock * 1000;
	max_clocks->socClockInKhz =
			(unsigned int) sustainable_clocks->soc_clock * 1000;
	max_clocks->dscClockInKhz = 0;
	max_clocks->dppClockInKhz = 0;
	max_clocks->fabricClockInKhz = 0;

	return 0;
}

1618
int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
1619 1620 1621 1622 1623 1624 1625 1626
{
	int ret = 0;

	ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME);

	return ret;
}

1627 1628 1629 1630 1631
static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
{
	return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq);
}

1632
bool smu_v11_0_baco_is_support(struct smu_context *smu)
1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645
{
	struct amdgpu_device *adev = smu->adev;
	struct smu_baco_context *smu_baco = &smu->smu_baco;
	uint32_t val;
	bool baco_support;

	mutex_lock(&smu_baco->mutex);
	baco_support = smu_baco->platform_support;
	mutex_unlock(&smu_baco->mutex);

	if (!baco_support)
		return false;

1646 1647 1648
	/* Arcturus does not support this bit mask */
	if (smu_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
	   !smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
1649 1650 1651 1652 1653 1654 1655 1656 1657
		return false;

	val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
	if (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
		return true;

	return false;
}

1658
enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
1659 1660
{
	struct smu_baco_context *smu_baco = &smu->smu_baco;
1661
	enum smu_baco_state baco_state;
1662 1663 1664 1665 1666 1667 1668 1669

	mutex_lock(&smu_baco->mutex);
	baco_state = smu_baco->state;
	mutex_unlock(&smu_baco->mutex);

	return baco_state;
}

1670
int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
1671 1672 1673
{

	struct smu_baco_context *smu_baco = &smu->smu_baco;
1674 1675 1676 1677
	struct amdgpu_device *adev = smu->adev;
	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
	uint32_t bif_doorbell_intr_cntl;
	uint32_t data;
1678 1679 1680 1681 1682 1683 1684
	int ret = 0;

	if (smu_v11_0_baco_get_state(smu) == state)
		return 0;

	mutex_lock(&smu_baco->mutex);

1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702
	bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);

	if (state == SMU_BACO_STATE_ENTER) {
		bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
						BIF_DOORBELL_INT_CNTL,
						DOORBELL_INTERRUPT_DISABLE, 1);
		WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);

		if (!ras || !ras->supported) {
			data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
			data |= 0x80000000;
			WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);

			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0);
		} else {
			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1);
		}
	} else {
1703
		ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco);
1704 1705 1706 1707 1708
		bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
						BIF_DOORBELL_INT_CNTL,
						DOORBELL_INTERRUPT_DISABLE, 0);
		WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
	}
1709 1710 1711 1712 1713 1714 1715 1716 1717
	if (ret)
		goto out;

	smu_baco->state = state;
out:
	mutex_unlock(&smu_baco->mutex);
	return ret;
}

1718
int smu_v11_0_baco_enter(struct smu_context *smu)
1719
{
1720
	struct amdgpu_device *adev = smu->adev;
1721 1722
	int ret = 0;

1723 1724 1725 1726 1727 1728
	/* Arcturus does not need this audio workaround */
	if (adev->asic_type != CHIP_ARCTURUS) {
		ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
		if (ret)
			return ret;
	}
1729 1730 1731 1732 1733 1734 1735

	ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
	if (ret)
		return ret;

	msleep(10);

1736 1737 1738 1739 1740 1741 1742
	return ret;
}

int smu_v11_0_baco_exit(struct smu_context *smu)
{
	int ret = 0;

1743 1744 1745 1746 1747 1748 1749
	ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
	if (ret)
		return ret;

	return ret;
}

1750
int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784
						 uint32_t *min, uint32_t *max)
{
	int ret = 0, clk_id = 0;
	uint32_t param = 0;

	clk_id = smu_clk_get_index(smu, clk_type);
	if (clk_id < 0) {
		ret = -EINVAL;
		goto failed;
	}
	param = (clk_id & 0xffff) << 16;

	if (max) {
		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
		if (ret)
			goto failed;
		ret = smu_read_smc_arg(smu, max);
		if (ret)
			goto failed;
	}

	if (min) {
		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
		if (ret)
			goto failed;
		ret = smu_read_smc_arg(smu, min);
		if (ret)
			goto failed;
	}

failed:
	return ret;
}

1785
int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813
			    uint32_t min, uint32_t max)
{
	int ret = 0, clk_id = 0;
	uint32_t param;

	clk_id = smu_clk_get_index(smu, clk_type);
	if (clk_id < 0)
		return clk_id;

	if (max > 0) {
		param = (uint32_t)((clk_id << 16) | (max & 0xffff));
		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
						  param);
		if (ret)
			return ret;
	}

	if (min > 0) {
		param = (uint32_t)((clk_id << 16) | (min & 0xffff));
		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
						  param);
		if (ret)
			return ret;
	}

	return ret;
}

1814
int smu_v11_0_override_pcie_parameters(struct smu_context *smu)
1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853
{
	struct amdgpu_device *adev = smu->adev;
	uint32_t pcie_gen = 0, pcie_width = 0;
	int ret;

	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
		pcie_gen = 3;
	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
		pcie_gen = 2;
	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
		pcie_gen = 1;
	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
		pcie_gen = 0;

	/* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
	 * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
	 * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
	 */
	if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
		pcie_width = 6;
	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
		pcie_width = 5;
	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
		pcie_width = 4;
	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
		pcie_width = 3;
	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
		pcie_width = 2;
	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
		pcie_width = 1;

	ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);

	if (ret)
		pr_err("[%s] Attempt to override pcie params failed!\n", __func__);

	return ret;

}
1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880

int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size)
{
	struct smu_table_context *table_context = &smu->smu_table;
	int ret = 0;

	if (initialize) {
		if (table_context->overdrive_table) {
			return -EINVAL;
		}
		table_context->overdrive_table = kzalloc(overdrive_table_size, GFP_KERNEL);
		if (!table_context->overdrive_table) {
			return -ENOMEM;
		}
		ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false);
		if (ret) {
			pr_err("Failed to export overdrive table!\n");
			return ret;
		}
	}
	ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true);
	if (ret) {
		pr_err("Failed to import overdrive table!\n");
		return ret;
	}
	return ret;
}