smu_v11_0.c 47.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright 2019 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

#include <linux/firmware.h>
24
#include <linux/module.h>
25
#include <linux/pci.h>
26 27

#include "pp_debug.h"
28 29
#include "amdgpu.h"
#include "amdgpu_smu.h"
30
#include "atomfirmware.h"
31
#include "amdgpu_atomfirmware.h"
32
#include "smu_v11_0.h"
33
#include "soc15_common.h"
34
#include "atom.h"
35
#include "vega20_ppt.h"
36
#include "arcturus_ppt.h"
37
#include "navi10_ppt.h"
38 39 40

#include "asic_reg/thm/thm_11_0_2_offset.h"
#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
41 42
#include "asic_reg/mp/mp_11_0_offset.h"
#include "asic_reg/mp/mp_11_0_sh_mask.h"
43
#include "asic_reg/nbio/nbio_7_4_offset.h"
44
#include "asic_reg/nbio/nbio_7_4_sh_mask.h"
45 46
#include "asic_reg/smuio/smuio_11_0_0_offset.h"
#include "asic_reg/smuio/smuio_11_0_0_sh_mask.h"
47

48
MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
49
MODULE_FIRMWARE("amdgpu/arcturus_smc.bin");
50
MODULE_FIRMWARE("amdgpu/navi10_smc.bin");
51
MODULE_FIRMWARE("amdgpu/navi14_smc.bin");
52

53
#define SMU11_VOLTAGE_SCALE 4
54

55 56 57 58 59 60 61 62
static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
					      uint16_t msg)
{
	struct amdgpu_device *adev = smu->adev;
	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
	return 0;
}

63 64 65 66 67 68 69 70
static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
	struct amdgpu_device *adev = smu->adev;

	*arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
	return 0;
}

71 72 73
static int smu_v11_0_wait_for_response(struct smu_context *smu)
{
	struct amdgpu_device *adev = smu->adev;
74
	uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
75

76
	for (i = 0; i < timeout; i++) {
77 78 79 80 81 82 83
		cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
		if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
			break;
		udelay(1);
	}

	/* timeout means wrong logic */
84
	if (i == timeout)
85 86
		return -ETIME;

87
	return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
88 89 90 91 92
}

static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
{
	struct amdgpu_device *adev = smu->adev;
93 94 95 96 97
	int ret = 0, index = 0;

	index = smu_msg_get_index(smu, msg);
	if (index < 0)
		return index;
98 99 100 101 102

	smu_v11_0_wait_for_response(smu);

	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);

103
	smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
104 105 106 107

	ret = smu_v11_0_wait_for_response(smu);

	if (ret)
108 109
		pr_err("failed send message: %10s (%d) response %#x\n",
		       smu_get_message_name(smu, msg), index, ret);
110 111 112 113 114 115 116 117 118 119 120

	return ret;

}

static int
smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
			      uint32_t param)
{

	struct amdgpu_device *adev = smu->adev;
121 122 123 124 125
	int ret = 0, index = 0;

	index = smu_msg_get_index(smu, msg);
	if (index < 0)
		return index;
126 127 128

	ret = smu_v11_0_wait_for_response(smu);
	if (ret)
129 130
		pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
		       smu_get_message_name(smu, msg), index, param, ret);
131 132 133 134 135

	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);

	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);

136
	smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
137 138 139

	ret = smu_v11_0_wait_for_response(smu);
	if (ret)
140 141
		pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
		       smu_get_message_name(smu, msg), index, param, ret);
142 143 144 145

	return ret;
}

146 147 148
static int smu_v11_0_init_microcode(struct smu_context *smu)
{
	struct amdgpu_device *adev = smu->adev;
149 150 151 152 153 154
	const char *chip_name;
	char fw_name[30];
	int err = 0;
	const struct smc_firmware_header_v1_0 *hdr;
	const struct common_firmware_header *header;
	struct amdgpu_firmware_info *ucode = NULL;
155

156 157 158 159
	switch (adev->asic_type) {
	case CHIP_VEGA20:
		chip_name = "vega20";
		break;
160 161 162
	case CHIP_ARCTURUS:
		chip_name = "arcturus";
		break;
163 164 165
	case CHIP_NAVI10:
		chip_name = "navi10";
		break;
166 167 168
	case CHIP_NAVI14:
		chip_name = "navi14";
		break;
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
	default:
		BUG();
	}

	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);

	err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
	if (err)
		goto out;
	err = amdgpu_ucode_validate(adev->pm.fw);
	if (err)
		goto out;

	hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
	amdgpu_ucode_print_smc_hdr(&hdr->header);
	adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);

	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
		ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
		ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
		ucode->fw = adev->pm.fw;
		header = (const struct common_firmware_header *)ucode->fw->data;
		adev->firmware.fw_size +=
			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
	}

out:
	if (err) {
		DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
			  fw_name);
		release_firmware(adev->pm.fw);
		adev->pm.fw = NULL;
	}
	return err;
203 204
}

205 206
static int smu_v11_0_load_microcode(struct smu_context *smu)
{
207 208 209 210 211 212 213
	struct amdgpu_device *adev = smu->adev;
	const uint32_t *src;
	const struct smc_firmware_header_v1_0 *hdr;
	uint32_t addr_start = MP1_SRAM;
	uint32_t i;
	uint32_t mp1_fw_flags;

214
	hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
	src = (const uint32_t *)(adev->pm.fw->data +
		le32_to_cpu(hdr->header.ucode_array_offset_bytes));

	for (i = 1; i < MP1_SMC_SIZE/4 - 1; i++) {
		WREG32_PCIE(addr_start, src[i]);
		addr_start += 4;
	}

	WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
		1 & MP1_SMN_PUB_CTRL__RESET_MASK);
	WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
		1 & ~MP1_SMN_PUB_CTRL__RESET_MASK);

	for (i = 0; i < adev->usec_timeout; i++) {
		mp1_fw_flags = RREG32_PCIE(MP1_Public |
			(smnMP1_FIRMWARE_FLAGS & 0xffffffff));
		if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
			MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
			break;
		udelay(1);
	}

	if (i == adev->usec_timeout)
		return -ETIME;

240 241 242
	return 0;
}

243 244
static int smu_v11_0_check_fw_status(struct smu_context *smu)
{
245 246 247
	struct amdgpu_device *adev = smu->adev;
	uint32_t mp1_fw_flags;

248 249
	mp1_fw_flags = RREG32_PCIE(MP1_Public |
				   (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
250 251 252 253

	if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
	    MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
		return 0;
254

255
	return -EIO;
256 257
}

258 259
static int smu_v11_0_check_fw_version(struct smu_context *smu)
{
260 261 262
	uint32_t if_version = 0xff, smu_version = 0xff;
	uint16_t smu_major;
	uint8_t smu_minor, smu_debug;
263 264
	int ret = 0;

265
	ret = smu_get_smc_version(smu, &if_version, &smu_version);
266
	if (ret)
267
		return ret;
268

269 270 271 272
	smu_major = (smu_version >> 16) & 0xffff;
	smu_minor = (smu_version >> 8) & 0xff;
	smu_debug = (smu_version >> 0) & 0xff;

273 274 275 276 277 278 279 280
	/*
	 * 1. if_version mismatch is not critical as our fw is designed
	 * to be backward compatible.
	 * 2. New fw usually brings some optimizations. But that's visible
	 * only on the paired driver.
	 * Considering above, we just leave user a warning message instead
	 * of halt driver loading.
	 */
281
	if (if_version != smu->smc_if_version) {
282 283 284 285
		pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
			"smu fw version = 0x%08x (%d.%d.%d)\n",
			smu->smc_if_version, if_version,
			smu_version, smu_major, smu_minor, smu_debug);
286
		pr_warn("SMU driver if version not matched\n");
287 288
	}

289 290 291
	return ret;
}

292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
{
	struct amdgpu_device *adev = smu->adev;
	uint32_t ppt_offset_bytes;
	const struct smc_firmware_header_v2_0 *v2;

	v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;

	ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
	*size = le32_to_cpu(v2->ppt_size_bytes);
	*table = (uint8_t *)v2 + ppt_offset_bytes;

	return 0;
}

307 308
static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table,
				      uint32_t *size, uint32_t pptable_id)
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
{
	struct amdgpu_device *adev = smu->adev;
	const struct smc_firmware_header_v2_1 *v2_1;
	struct smc_soft_pptable_entry *entries;
	uint32_t pptable_count = 0;
	int i = 0;

	v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
	entries = (struct smc_soft_pptable_entry *)
		((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
	pptable_count = le32_to_cpu(v2_1->pptable_count);
	for (i = 0; i < pptable_count; i++) {
		if (le32_to_cpu(entries[i].id) == pptable_id) {
			*table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
			*size = le32_to_cpu(entries[i].ppt_size_bytes);
			break;
		}
	}

	if (i == pptable_count)
		return -EINVAL;

	return 0;
}

static int smu_v11_0_setup_pptable(struct smu_context *smu)
335
{
336 337
	struct amdgpu_device *adev = smu->adev;
	const struct smc_firmware_header_v1_0 *hdr;
338
	int ret, index;
339
	uint32_t size;
340
	uint8_t frev, crev;
341
	void *table;
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
	uint16_t version_major, version_minor;

	hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
	version_major = le16_to_cpu(hdr->header.header_version_major);
	version_minor = le16_to_cpu(hdr->header.header_version_minor);
	if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
		switch (version_minor) {
		case 0:
			ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
			break;
		case 1:
			ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size,
							 smu->smu_table.boot_values.pp_table_id);
			break;
		default:
			ret = -EINVAL;
			break;
		}
		if (ret)
			return ret;
362

363 364 365
	} else {
		index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
						    powerplayinfo);
366

367
		ret = smu_get_atom_data_table(smu, index, (uint16_t *)&size, &frev, &crev,
368 369 370 371
					      (uint8_t **)&table);
		if (ret)
			return ret;
	}
372

373 374 375 376
	if (!smu->smu_table.power_play_table)
		smu->smu_table.power_play_table = table;
	if (!smu->smu_table.power_play_table_size)
		smu->smu_table.power_play_table_size = size;
377 378 379 380

	return 0;
}

381 382 383 384 385 386 387
static int smu_v11_0_init_dpm_context(struct smu_context *smu)
{
	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;

	if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0)
		return -EINVAL;

388
	return smu_alloc_dpm_context(smu);
389 390 391 392 393 394 395 396 397 398
}

static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
{
	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;

	if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0)
		return -EINVAL;

	kfree(smu_dpm->dpm_context);
399
	kfree(smu_dpm->golden_dpm_context);
400 401
	kfree(smu_dpm->dpm_current_power_state);
	kfree(smu_dpm->dpm_request_power_state);
402
	smu_dpm->dpm_context = NULL;
403
	smu_dpm->golden_dpm_context = NULL;
404
	smu_dpm->dpm_context_size = 0;
405 406
	smu_dpm->dpm_current_power_state = NULL;
	smu_dpm->dpm_request_power_state = NULL;
407 408 409 410

	return 0;
}

411 412 413 414
static int smu_v11_0_init_smc_tables(struct smu_context *smu)
{
	struct smu_table_context *smu_table = &smu->smu_table;
	struct smu_table *tables = NULL;
415
	int ret = 0;
416

417
	if (smu_table->tables || smu_table->table_count == 0)
418 419
		return -EINVAL;

420 421
	tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
			 GFP_KERNEL);
422 423 424 425 426
	if (!tables)
		return -ENOMEM;

	smu_table->tables = tables;

427 428 429
	ret = smu_tables_init(smu, tables);
	if (ret)
		return ret;
430

431 432 433 434
	ret = smu_v11_0_init_dpm_context(smu);
	if (ret)
		return ret;

435 436 437 438 439 440
	return 0;
}

static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
{
	struct smu_table_context *smu_table = &smu->smu_table;
441
	int ret = 0;
442 443 444 445 446

	if (!smu_table->tables || smu_table->table_count == 0)
		return -EINVAL;

	kfree(smu_table->tables);
447
	kfree(smu_table->metrics_table);
448 449
	smu_table->tables = NULL;
	smu_table->table_count = 0;
450 451
	smu_table->metrics_table = NULL;
	smu_table->metrics_time = 0;
452

453 454 455
	ret = smu_v11_0_fini_dpm_context(smu);
	if (ret)
		return ret;
456 457
	return 0;
}
458 459 460 461 462

static int smu_v11_0_init_power(struct smu_context *smu)
{
	struct smu_power_context *smu_power = &smu->smu_power;

463 464
	if (!smu->pm_enabled)
		return 0;
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
	if (smu_power->power_context || smu_power->power_context_size != 0)
		return -EINVAL;

	smu_power->power_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
					   GFP_KERNEL);
	if (!smu_power->power_context)
		return -ENOMEM;
	smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context);

	return 0;
}

static int smu_v11_0_fini_power(struct smu_context *smu)
{
	struct smu_power_context *smu_power = &smu->smu_power;

481 482
	if (!smu->pm_enabled)
		return 0;
483 484 485 486 487 488 489 490 491 492
	if (!smu_power->power_context || smu_power->power_context_size == 0)
		return -EINVAL;

	kfree(smu_power->power_context);
	smu_power->power_context = NULL;
	smu_power->power_context_size = 0;

	return 0;
}

493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
{
	int ret, index;
	uint16_t size;
	uint8_t frev, crev;
	struct atom_common_table_header *header;
	struct atom_firmware_info_v3_3 *v_3_3;
	struct atom_firmware_info_v3_1 *v_3_1;

	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
					    firmwareinfo);

	ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
				      (uint8_t **)&header);
	if (ret)
		return ret;

	if (header->format_revision != 3) {
		pr_err("unknown atom_firmware_info version! for smu11\n");
		return -EINVAL;
	}

	switch (header->content_revision) {
	case 0:
	case 1:
	case 2:
		v_3_1 = (struct atom_firmware_info_v3_1 *)header;
		smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
		smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
		smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
		smu->smu_table.boot_values.socclk = 0;
		smu->smu_table.boot_values.dcefclk = 0;
		smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
		smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
		smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
		smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
		smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
		smu->smu_table.boot_values.pp_table_id = 0;
		break;
	case 3:
	default:
		v_3_3 = (struct atom_firmware_info_v3_3 *)header;
		smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
		smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
		smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
		smu->smu_table.boot_values.socclk = 0;
		smu->smu_table.boot_values.dcefclk = 0;
		smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
		smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
		smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
		smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
		smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
		smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
	}

	return 0;
}

551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
{
	int ret, index;
	struct amdgpu_device *adev = smu->adev;
	struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
	struct atom_get_smu_clock_info_output_parameters_v3_1 *output;

	input.clk_id = SMU11_SYSPLL0_SOCCLK_ID;
	input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
	index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
					    getsmuclockinfo);

	ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
					(uint32_t *)&input);
	if (ret)
		return -EINVAL;

	output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
	smu->smu_table.boot_values.socclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;

	memset(&input, 0, sizeof(input));
	input.clk_id = SMU11_SYSPLL0_DCEFCLK_ID;
	input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
	index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
					    getsmuclockinfo);

	ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
					(uint32_t *)&input);
	if (ret)
		return -EINVAL;

	output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
	smu->smu_table.boot_values.dcefclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;

585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
	memset(&input, 0, sizeof(input));
	input.clk_id = SMU11_SYSPLL0_ECLK_ID;
	input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
	index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
					    getsmuclockinfo);

	ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
					(uint32_t *)&input);
	if (ret)
		return -EINVAL;

	output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
	smu->smu_table.boot_values.eclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;

	memset(&input, 0, sizeof(input));
	input.clk_id = SMU11_SYSPLL0_VCLK_ID;
	input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
	index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
					    getsmuclockinfo);

	ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
					(uint32_t *)&input);
	if (ret)
		return -EINVAL;

	output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
	smu->smu_table.boot_values.vclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;

	memset(&input, 0, sizeof(input));
	input.clk_id = SMU11_SYSPLL0_DCLK_ID;
	input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
	index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
					    getsmuclockinfo);

	ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
					(uint32_t *)&input);
	if (ret)
		return -EINVAL;

	output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
	smu->smu_table.boot_values.dclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;

627 628 629
	return 0;
}

630 631 632 633 634 635 636 637 638 639 640
static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
{
	struct smu_table_context *smu_table = &smu->smu_table;
	struct smu_table *memory_pool = &smu_table->memory_pool;
	int ret = 0;
	uint64_t address;
	uint32_t address_low, address_high;

	if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
		return ret;

641
	address = (uintptr_t)memory_pool->cpu_addr;
642 643 644 645
	address_high = (uint32_t)upper_32_bits(address);
	address_low  = (uint32_t)lower_32_bits(address);

	ret = smu_send_smc_msg_with_param(smu,
646
					  SMU_MSG_SetSystemVirtualDramAddrHigh,
647 648 649 650
					  address_high);
	if (ret)
		return ret;
	ret = smu_send_smc_msg_with_param(smu,
651
					  SMU_MSG_SetSystemVirtualDramAddrLow,
652 653 654 655 656 657 658 659
					  address_low);
	if (ret)
		return ret;

	address = memory_pool->mc_address;
	address_high = (uint32_t)upper_32_bits(address);
	address_low  = (uint32_t)lower_32_bits(address);

660
	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
661 662 663
					  address_high);
	if (ret)
		return ret;
664
	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
665 666 667
					  address_low);
	if (ret)
		return ret;
668
	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
669 670 671 672 673 674 675
					  (uint32_t)memory_pool->size);
	if (ret)
		return ret;

	return ret;
}

676 677 678 679 680 681 682 683
static int smu_v11_0_check_pptable(struct smu_context *smu)
{
	int ret;

	ret = smu_check_powerplay_table(smu);
	return ret;
}

684 685 686 687 688
static int smu_v11_0_parse_pptable(struct smu_context *smu)
{
	int ret;

	struct smu_table_context *table_context = &smu->smu_table;
689
	struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE];
690 691 692 693

	if (table_context->driver_pptable)
		return -EINVAL;

694
	table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL);
695 696 697 698 699

	if (!table_context->driver_pptable)
		return -ENOMEM;

	ret = smu_store_powerplay_table(smu);
700 701 702 703
	if (ret)
		return -EINVAL;

	ret = smu_append_powerplay_table(smu);
704 705 706 707

	return ret;
}

708 709
static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
{
710
	int ret;
711

712
	ret = smu_set_default_dpm_table(smu);
713

714
	return ret;
715 716
}

717 718
static int smu_v11_0_write_pptable(struct smu_context *smu)
{
719
	struct smu_table_context *table_context = &smu->smu_table;
720 721
	int ret = 0;

722
	ret = smu_update_table(smu, SMU_TABLE_PPTABLE, 0,
723
			       table_context->driver_pptable, true);
724 725 726 727

	return ret;
}

728 729
static int smu_v11_0_write_watermarks_table(struct smu_context *smu)
{
730 731 732 733 734 735 736 737 738 739 740
	int ret = 0;
	struct smu_table_context *smu_table = &smu->smu_table;
	struct smu_table *table = NULL;

	table = &smu_table->tables[SMU_TABLE_WATERMARKS];
	if (!table)
		return -EINVAL;

	if (!table->cpu_addr)
		return -EINVAL;

741
	ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
742
				true);
743 744

	return ret;
745 746
}

747 748 749 750 751 752 753 754 755 756 757 758
static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
{
	int ret;

	ret = smu_send_smc_msg_with_param(smu,
					  SMU_MSG_SetMinDeepSleepDcefclk, clk);
	if (ret)
		pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");

	return ret;
}

759 760 761 762
static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
{
	struct smu_table_context *table_context = &smu->smu_table;

763 764
	if (!smu->pm_enabled)
		return 0;
765 766 767
	if (!table_context)
		return -EINVAL;

768
	return smu_set_deep_sleep_dcefclk(smu,
769 770 771
					  table_context->boot_values.dcefclk / 100);
}

772 773 774
static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
{
	int ret = 0;
775
	struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
776 777 778

	if (tool_table->mc_address) {
		ret = smu_send_smc_msg_with_param(smu,
779
				SMU_MSG_SetToolsDramAddrHigh,
780 781 782
				upper_32_bits(tool_table->mc_address));
		if (!ret)
			ret = smu_send_smc_msg_with_param(smu,
783
				SMU_MSG_SetToolsDramAddrLow,
784 785 786 787 788 789
				lower_32_bits(tool_table->mc_address));
	}

	return ret;
}

790
static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
791 792
{
	int ret = 0;
793 794 795

	if (!smu->pm_enabled)
		return ret;
796

797
	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count);
798 799 800
	return ret;
}

801

802 803 804 805 806 807
static int smu_v11_0_set_allowed_mask(struct smu_context *smu)
{
	struct smu_feature *feature = &smu->smu_feature;
	int ret = 0;
	uint32_t feature_mask[2];

808
	mutex_lock(&feature->mutex);
809
	if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
810
		goto failed;
811 812 813 814 815 816

	bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);

	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
					  feature_mask[1]);
	if (ret)
817
		goto failed;
818 819 820 821

	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
					  feature_mask[0]);
	if (ret)
822
		goto failed;
823

824 825
failed:
	mutex_unlock(&feature->mutex);
826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857
	return ret;
}

static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
				      uint32_t *feature_mask, uint32_t num)
{
	uint32_t feature_mask_high = 0, feature_mask_low = 0;
	int ret = 0;

	if (!feature_mask || num < 2)
		return -EINVAL;

	ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
	if (ret)
		return ret;
	ret = smu_read_smc_arg(smu, &feature_mask_high);
	if (ret)
		return ret;

	ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
	if (ret)
		return ret;
	ret = smu_read_smc_arg(smu, &feature_mask_low);
	if (ret)
		return ret;

	feature_mask[0] = feature_mask_low;
	feature_mask[1] = feature_mask_high;

	return ret;
}

858 859
static int smu_v11_0_system_features_control(struct smu_context *smu,
					     bool en)
860 861 862 863 864
{
	struct smu_feature *feature = &smu->smu_feature;
	uint32_t feature_mask[2];
	int ret = 0;

865 866 867 868 869 870 871
	if (smu->pm_enabled) {
		ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
					     SMU_MSG_DisableAllSmuFeatures));
		if (ret)
			return ret;
	}

872 873 874 875 876 877 878 879 880 881 882 883
	ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
	if (ret)
		return ret;

	bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
		    feature->feature_num);
	bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
		    feature->feature_num);

	return ret;
}

884 885 886 887
static int smu_v11_0_notify_display_change(struct smu_context *smu)
{
	int ret = 0;

888 889
	if (!smu->pm_enabled)
		return ret;
890 891 892
	if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
	    smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
893 894 895 896

	return ret;
}

897 898
static int
smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
899
				    enum smu_clk_type clock_select)
900 901
{
	int ret = 0;
902
	int clk_id;
903

904 905
	if (!smu->pm_enabled)
		return ret;
906 907 908 909 910

	clk_id = smu_clk_get_index(smu, clock_select);
	if (clk_id < 0)
		return -EINVAL;

911
	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
912
					  clk_id << 16);
913 914 915 916 917 918 919 920 921 922 923 924 925 926
	if (ret) {
		pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
		return ret;
	}

	ret = smu_read_smc_arg(smu, clock);
	if (ret)
		return ret;

	if (*clock != 0)
		return 0;

	/* if DC limit is zero, return AC limit */
	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
927
					  clk_id << 16);
928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953
	if (ret) {
		pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
		return ret;
	}

	ret = smu_read_smc_arg(smu, clock);

	return ret;
}

static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
{
	struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
	int ret = 0;

	max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
					 GFP_KERNEL);
	smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;

	max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
	max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
	max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
	max_sustainable_clocks->display_clock = 0xFFFFFFFF;
	max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
	max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;

954
	if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
955 956
		ret = smu_v11_0_get_max_sustainable_clock(smu,
							  &(max_sustainable_clocks->uclock),
957
							  SMU_UCLK);
958 959 960 961 962 963 964
		if (ret) {
			pr_err("[%s] failed to get max UCLK from SMC!",
			       __func__);
			return ret;
		}
	}

965
	if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
966 967
		ret = smu_v11_0_get_max_sustainable_clock(smu,
							  &(max_sustainable_clocks->soc_clock),
968
							  SMU_SOCCLK);
969 970 971 972 973 974 975
		if (ret) {
			pr_err("[%s] failed to get max SOCCLK from SMC!",
			       __func__);
			return ret;
		}
	}

976
	if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
977 978
		ret = smu_v11_0_get_max_sustainable_clock(smu,
							  &(max_sustainable_clocks->dcef_clock),
979
							  SMU_DCEFCLK);
980 981 982 983 984 985 986 987
		if (ret) {
			pr_err("[%s] failed to get max DCEFCLK from SMC!",
			       __func__);
			return ret;
		}

		ret = smu_v11_0_get_max_sustainable_clock(smu,
							  &(max_sustainable_clocks->display_clock),
988
							  SMU_DISPCLK);
989 990 991 992 993 994 995
		if (ret) {
			pr_err("[%s] failed to get max DISPCLK from SMC!",
			       __func__);
			return ret;
		}
		ret = smu_v11_0_get_max_sustainable_clock(smu,
							  &(max_sustainable_clocks->phy_clock),
996
							  SMU_PHYCLK);
997 998 999 1000 1001 1002 1003
		if (ret) {
			pr_err("[%s] failed to get max PHYCLK from SMC!",
			       __func__);
			return ret;
		}
		ret = smu_v11_0_get_max_sustainable_clock(smu,
							  &(max_sustainable_clocks->pixel_clock),
1004
							  SMU_PIXCLK);
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
		if (ret) {
			pr_err("[%s] failed to get max PIXCLK from SMC!",
			       __func__);
			return ret;
		}
	}

	if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
		max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;

	return 0;
}

1018 1019 1020
static int smu_v11_0_get_power_limit(struct smu_context *smu,
				     uint32_t *limit,
				     bool get_default)
1021
{
1022
	int ret = 0;
1023 1024 1025 1026 1027
	int power_src;

	power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
	if (power_src < 0)
		return -EINVAL;
1028

1029 1030 1031
	if (get_default) {
		mutex_lock(&smu->mutex);
		*limit = smu->default_power_limit;
1032 1033 1034 1035
		if (smu->od_enabled) {
			*limit *= (100 + smu->smu_table.TDPODLimit);
			*limit /= 100;
		}
1036 1037 1038
		mutex_unlock(&smu->mutex);
	} else {
		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
1039
			power_src << 16);
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
		if (ret) {
			pr_err("[%s] get PPT limit failed!", __func__);
			return ret;
		}
		smu_read_smc_arg(smu, limit);
		smu->power_limit = *limit;
	}

	return ret;
}

static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
{
1053
	uint32_t max_power_limit;
1054 1055
	int ret = 0;

1056 1057 1058 1059 1060 1061 1062 1063 1064
	if (n == 0)
		n = smu->default_power_limit;

	max_power_limit = smu->default_power_limit;

	if (smu->od_enabled) {
		max_power_limit *= (100 + smu->smu_table.TDPODLimit);
		max_power_limit /= 100;
	}
1065 1066
	if (n > max_power_limit)
		return -EINVAL;
1067

1068
	if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
1069
		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
1070
	if (ret) {
1071
		pr_err("[%s] Set power limit Failed!", __func__);
1072 1073 1074
		return ret;
	}

1075
	return ret;
1076 1077
}

1078 1079 1080
static int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
					  enum smu_clk_type clk_id,
					  uint32_t *value)
1081 1082
{
	int ret = 0;
1083
	uint32_t freq = 0;
1084
	int asic_clk_id;
1085

1086
	if (clk_id >= SMU_CLK_COUNT || !value)
1087 1088
		return -EINVAL;

1089 1090 1091 1092
	asic_clk_id = smu_clk_get_index(smu, clk_id);
	if (asic_clk_id < 0)
		return -EINVAL;

1093
	/* if don't has GetDpmClockFreq Message, try get current clock by SmuMetrics_t */
1094
	if (smu_msg_get_index(smu, SMU_MSG_GetDpmClockFreq) < 0)
1095 1096 1097
		ret =  smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
	else {
		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq,
1098
						  (asic_clk_id << 16));
1099 1100
		if (ret)
			return ret;
1101

1102 1103 1104 1105
		ret = smu_read_smc_arg(smu, &freq);
		if (ret)
			return ret;
	}
1106 1107 1108 1109 1110 1111 1112

	freq *= 100;
	*value = freq;

	return ret;
}

1113
static int smu_v11_0_set_thermal_range(struct smu_context *smu,
1114
				       struct smu_temperature_range *range)
1115 1116
{
	struct amdgpu_device *adev = smu->adev;
1117 1118
	int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
	int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
1119 1120
	uint32_t val;

1121 1122 1123
	if (!range)
		return -EINVAL;

1124 1125 1126 1127 1128
	if (low < range->min)
		low = range->min;
	if (high > range->max)
		high = range->max;

1129 1130 1131
	low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, range->min);
	high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, range->max);

1132 1133 1134 1135 1136 1137
	if (low > high)
		return -EINVAL;

	val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
	val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
	val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1138 1139
	val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
	val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
1140 1141
	val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
	val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
1142 1143 1144 1145 1146 1147 1148
	val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);

	WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);

	return 0;
}

1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
{
	struct amdgpu_device *adev = smu->adev;
	uint32_t val = 0;

	val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
	val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
	val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);

	WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);

	return 0;
}

1163 1164 1165
static int smu_v11_0_start_thermal_control(struct smu_context *smu)
{
	int ret = 0;
1166
	struct smu_temperature_range range = {
1167 1168 1169 1170 1171 1172 1173 1174 1175
		TEMP_RANGE_MIN,
		TEMP_RANGE_MAX,
		TEMP_RANGE_MAX,
		TEMP_RANGE_MIN,
		TEMP_RANGE_MAX,
		TEMP_RANGE_MAX,
		TEMP_RANGE_MIN,
		TEMP_RANGE_MAX,
		TEMP_RANGE_MAX};
1176 1177
	struct amdgpu_device *adev = smu->adev;

1178 1179
	if (!smu->pm_enabled)
		return ret;
1180

1181
	ret = smu_get_thermal_temperature_range(smu, &range);
1182 1183
	if (ret)
		return ret;
1184 1185 1186 1187 1188 1189 1190 1191 1192

	if (smu->smu_table.thermal_controller_type) {
		ret = smu_v11_0_set_thermal_range(smu, &range);
		if (ret)
			return ret;

		ret = smu_v11_0_enable_thermal_alert(smu);
		if (ret)
			return ret;
1193

1194
		ret = smu_set_thermal_fan_table(smu);
1195 1196 1197 1198
		if (ret)
			return ret;
	}

1199 1200 1201 1202 1203 1204 1205 1206 1207
	adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
	adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
	adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
	adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
	adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
	adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
	adev->pm.dpm.thermal.min_mem_temp = range.mem_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
	adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
	adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1208 1209
	adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
	adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1210 1211 1212 1213

	return ret;
}

1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
static uint16_t convert_to_vddc(uint8_t vid)
{
	return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
}

static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
{
	struct amdgpu_device *adev = smu->adev;
	uint32_t vdd = 0, val_vid = 0;

	if (!value)
		return -EINVAL;
	val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
		SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
		SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;

	vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);

	*value = vdd;

	return 0;

}

1238 1239 1240 1241 1242 1243
static int smu_v11_0_read_sensor(struct smu_context *smu,
				 enum amd_pp_sensors sensor,
				 void *data, uint32_t *size)
{
	int ret = 0;
	switch (sensor) {
1244
	case AMDGPU_PP_SENSOR_GFX_MCLK:
1245
		ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data);
1246 1247 1248
		*size = 4;
		break;
	case AMDGPU_PP_SENSOR_GFX_SCLK:
1249
		ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, (uint32_t *)data);
1250
		*size = 4;
1251
		break;
1252 1253 1254
	case AMDGPU_PP_SENSOR_VDDGFX:
		ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
		*size = 4;
1255
		break;
1256 1257 1258 1259
	case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
		*(uint32_t *)data = 0;
		*size = 4;
		break;
1260
	default:
1261
		ret = smu_common_read_sensor(smu, sensor, data, size);
1262 1263 1264
		break;
	}

1265 1266 1267 1268
	/* try get sensor data by asic */
	if (ret)
		ret = smu_asic_read_sensor(smu, sensor, data, size);

1269 1270 1271 1272 1273 1274
	if (ret)
		*size = 0;

	return ret;
}

1275 1276 1277 1278 1279 1280 1281
static int
smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
					struct pp_display_clock_request
					*clock_req)
{
	enum amd_pp_clock_type clk_type = clock_req->clock_type;
	int ret = 0;
1282
	enum smu_clk_type clk_select = 0;
1283
	uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1284
	int clk_id;
1285

1286 1287
	if (!smu->pm_enabled)
		return -EINVAL;
1288

1289
	if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
1290
		smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1291 1292
		switch (clk_type) {
		case amd_pp_dcef_clock:
1293
			clk_select = SMU_DCEFCLK;
1294 1295
			break;
		case amd_pp_disp_clock:
1296
			clk_select = SMU_DISPCLK;
1297 1298
			break;
		case amd_pp_pixel_clock:
1299
			clk_select = SMU_PIXCLK;
1300 1301
			break;
		case amd_pp_phy_clock:
1302
			clk_select = SMU_PHYCLK;
1303
			break;
1304 1305 1306
		case amd_pp_mem_clock:
			clk_select = SMU_UCLK;
			break;
1307 1308 1309 1310 1311 1312 1313 1314 1315
		default:
			pr_info("[%s] Invalid Clock Type!", __func__);
			ret = -EINVAL;
			break;
		}

		if (ret)
			goto failed;

1316 1317 1318
		if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
			return 0;

1319 1320 1321 1322 1323 1324
		clk_id = smu_clk_get_index(smu, clk_select);
		if (clk_id < 0) {
			ret = -EINVAL;
			goto failed;
		}

1325

1326
		mutex_lock(&smu->mutex);
1327
		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
1328
			(clk_id << 16) | clk_freq);
1329
		mutex_unlock(&smu->mutex);
1330 1331 1332

		if(clk_select == SMU_UCLK)
			smu->hard_min_uclk_req_from_dal = clk_freq;
1333 1334 1335 1336 1337 1338
	}

failed:
	return ret;
}

1339 1340 1341 1342 1343 1344
static int
smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct
					  dm_pp_wm_sets_with_clock_ranges_soc15
					  *clock_ranges)
{
	int ret = 0;
1345
	struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
1346
	void *table = watermarks->cpu_addr;
1347 1348

	if (!smu->disable_watermark &&
1349 1350
	    smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
	    smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1351
		smu_set_watermarks_table(smu, table, clock_ranges);
1352 1353 1354 1355 1356 1357 1358
		smu->watermarks_bitmap |= WATERMARKS_EXIST;
		smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
	}

	return ret;
}

1359 1360 1361
static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
{
	int ret = 0;
1362
	struct amdgpu_device *adev = smu->adev;
1363

1364 1365 1366 1367
	switch (adev->asic_type) {
	case CHIP_VEGA20:
		break;
	case CHIP_NAVI10:
1368
	case CHIP_NAVI14:
1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380
		if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
			return 0;
		mutex_lock(&smu->mutex);
		if (enable)
			ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
		else
			ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
		mutex_unlock(&smu->mutex);
		break;
	default:
		break;
	}
1381 1382 1383 1384

	return ret;
}

1385 1386 1387
static uint32_t
smu_v11_0_get_fan_control_mode(struct smu_context *smu)
{
1388
	if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1389 1390 1391 1392 1393 1394
		return AMD_FAN_CTRL_MANUAL;
	else
		return AMD_FAN_CTRL_AUTO;
}

static int
1395
smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
1396 1397 1398
{
	int ret = 0;

1399
	if (smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1400 1401
		return 0;

1402
	ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
1403 1404
	if (ret)
		pr_err("[%s]%s smc FAN CONTROL feature failed!",
1405
		       __func__, (auto_fan_control ? "Start" : "Stop"));
1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428

	return ret;
}

static int
smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
{
	struct amdgpu_device *adev = smu->adev;

	WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
		     REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
				   CG_FDO_CTRL2, TMIN, 0));
	WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
		     REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
				   CG_FDO_CTRL2, FDO_PWM_MODE, mode));

	return 0;
}

static int
smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
{
	struct amdgpu_device *adev = smu->adev;
1429
	uint32_t duty100, duty;
1430 1431 1432 1433 1434
	uint64_t tmp64;

	if (speed > 100)
		speed = 100;

1435
	if (smu_v11_0_auto_fan_control(smu, 0))
1436
		return -EINVAL;
1437

1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453
	duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
				CG_FDO_CTRL1, FMAX_DUTY100);
	if (!duty100)
		return -EINVAL;

	tmp64 = (uint64_t)speed * duty100;
	do_div(tmp64, 100);
	duty = (uint32_t)tmp64;

	WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
		     REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
				   CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));

	return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
}

1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464
static int
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
			       uint32_t mode)
{
	int ret = 0;

	switch (mode) {
	case AMD_FAN_CTRL_NONE:
		ret = smu_v11_0_set_fan_speed_percent(smu, 100);
		break;
	case AMD_FAN_CTRL_MANUAL:
1465
		ret = smu_v11_0_auto_fan_control(smu, 0);
1466 1467
		break;
	case AMD_FAN_CTRL_AUTO:
1468
		ret = smu_v11_0_auto_fan_control(smu, 1);
1469 1470 1471 1472 1473 1474
		break;
	default:
		break;
	}

	if (ret) {
1475
		pr_err("[%s]Set fan control mode failed!", __func__);
1476 1477 1478 1479 1480 1481
		return -EINVAL;
	}

	return ret;
}

1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492
static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
				       uint32_t speed)
{
	struct amdgpu_device *adev = smu->adev;
	int ret;
	uint32_t tach_period, crystal_clock_freq;

	if (!speed)
		return -EINVAL;

	mutex_lock(&(smu->mutex));
1493
	ret = smu_v11_0_auto_fan_control(smu, 0);
1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510
	if (ret)
		goto set_fan_speed_rpm_failed;

	crystal_clock_freq = amdgpu_asic_get_xclk(adev);
	tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
	WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
		     REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
				   CG_TACH_CTRL, TARGET_PERIOD,
				   tach_period));

	ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);

set_fan_speed_rpm_failed:
	mutex_unlock(&(smu->mutex));
	return ret;
}

1511 1512 1513
#define XGMI_STATE_D0 1
#define XGMI_STATE_D3 0

1514 1515 1516
static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
				     uint32_t pstate)
{
1517 1518 1519 1520 1521 1522 1523
	int ret = 0;
	mutex_lock(&(smu->mutex));
	ret = smu_send_smc_msg_with_param(smu,
					  SMU_MSG_SetXgmiMode,
					  pstate ? XGMI_STATE_D0 : XGMI_STATE_D3);
	mutex_unlock(&(smu->mutex));
	return ret;
1524 1525
}

1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600
#define THM_11_0__SRCID__THM_DIG_THERM_L2H		0		/* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH  */
#define THM_11_0__SRCID__THM_DIG_THERM_H2L		1		/* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL  */

static int smu_v11_0_irq_process(struct amdgpu_device *adev,
				 struct amdgpu_irq_src *source,
				 struct amdgpu_iv_entry *entry)
{
	uint32_t client_id = entry->client_id;
	uint32_t src_id = entry->src_id;

	if (client_id == SOC15_IH_CLIENTID_THM) {
		switch (src_id) {
		case THM_11_0__SRCID__THM_DIG_THERM_L2H:
			pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
				PCI_BUS_NUM(adev->pdev->devfn),
				PCI_SLOT(adev->pdev->devfn),
				PCI_FUNC(adev->pdev->devfn));
		break;
		case THM_11_0__SRCID__THM_DIG_THERM_H2L:
			pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
				PCI_BUS_NUM(adev->pdev->devfn),
				PCI_SLOT(adev->pdev->devfn),
				PCI_FUNC(adev->pdev->devfn));
		break;
		default:
			pr_warn("GPU under temperature range unknown src id (%d), detected on PCIe %d:%d.%d!\n",
				src_id,
				PCI_BUS_NUM(adev->pdev->devfn),
				PCI_SLOT(adev->pdev->devfn),
				PCI_FUNC(adev->pdev->devfn));
		break;

		}
	}

	return 0;
}

static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs =
{
	.process = smu_v11_0_irq_process,
};

static int smu_v11_0_register_irq_handler(struct smu_context *smu)
{
	struct amdgpu_device *adev = smu->adev;
	struct amdgpu_irq_src *irq_src = smu->irq_source;
	int ret = 0;

	/* already register */
	if (irq_src)
		return 0;

	irq_src = kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
	if (!irq_src)
		return -ENOMEM;
	smu->irq_source = irq_src;

	irq_src->funcs = &smu_v11_0_irq_funcs;

	ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
				THM_11_0__SRCID__THM_DIG_THERM_L2H,
				irq_src);
	if (ret)
		return ret;

	ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
				THM_11_0__SRCID__THM_DIG_THERM_H2L,
				irq_src);
	if (ret)
		return ret;

	return ret;
}

1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
static int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
		struct pp_smu_nv_clock_table *max_clocks)
{
	struct smu_table_context *table_context = &smu->smu_table;
	struct smu_11_0_max_sustainable_clocks *sustainable_clocks = NULL;

	if (!max_clocks || !table_context->max_sustainable_clocks)
		return -EINVAL;

	sustainable_clocks = table_context->max_sustainable_clocks;

	max_clocks->dcfClockInKhz =
			(unsigned int) sustainable_clocks->dcef_clock * 1000;
	max_clocks->displayClockInKhz =
			(unsigned int) sustainable_clocks->display_clock * 1000;
	max_clocks->phyClockInKhz =
			(unsigned int) sustainable_clocks->phy_clock * 1000;
	max_clocks->pixelClockInKhz =
			(unsigned int) sustainable_clocks->pixel_clock * 1000;
	max_clocks->uClockInKhz =
			(unsigned int) sustainable_clocks->uclock * 1000;
	max_clocks->socClockInKhz =
			(unsigned int) sustainable_clocks->soc_clock * 1000;
	max_clocks->dscClockInKhz = 0;
	max_clocks->dppClockInKhz = 0;
	max_clocks->fabricClockInKhz = 0;

	return 0;
}

1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641
static int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
{
	int ret = 0;

	mutex_lock(&smu->mutex);
	ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME);
	mutex_unlock(&smu->mutex);

	return ret;
}

1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727
static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
{
	return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq);
}

static bool smu_v11_0_baco_is_support(struct smu_context *smu)
{
	struct amdgpu_device *adev = smu->adev;
	struct smu_baco_context *smu_baco = &smu->smu_baco;
	uint32_t val;
	bool baco_support;

	mutex_lock(&smu_baco->mutex);
	baco_support = smu_baco->platform_support;
	mutex_unlock(&smu_baco->mutex);

	if (!baco_support)
		return false;

	if (!smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
		return false;

	val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
	if (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
		return true;

	return false;
}

static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
{
	struct smu_baco_context *smu_baco = &smu->smu_baco;
	enum smu_baco_state baco_state = SMU_BACO_STATE_EXIT;

	mutex_lock(&smu_baco->mutex);
	baco_state = smu_baco->state;
	mutex_unlock(&smu_baco->mutex);

	return baco_state;
}

static int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
{

	struct smu_baco_context *smu_baco = &smu->smu_baco;
	int ret = 0;

	if (smu_v11_0_baco_get_state(smu) == state)
		return 0;

	mutex_lock(&smu_baco->mutex);

	if (state == SMU_BACO_STATE_ENTER)
		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, BACO_SEQ_BACO);
	else
		ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco);
	if (ret)
		goto out;

	smu_baco->state = state;
out:
	mutex_unlock(&smu_baco->mutex);
	return ret;
}

static int smu_v11_0_baco_reset(struct smu_context *smu)
{
	int ret = 0;

	ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
	if (ret)
		return ret;

	ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
	if (ret)
		return ret;

	msleep(10);

	ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
	if (ret)
		return ret;

	return ret;
}

1728 1729
static const struct smu_funcs smu_v11_0_funcs = {
	.init_microcode = smu_v11_0_init_microcode,
1730
	.load_microcode = smu_v11_0_load_microcode,
1731
	.check_fw_status = smu_v11_0_check_fw_status,
1732
	.check_fw_version = smu_v11_0_check_fw_version,
1733 1734
	.send_smc_msg = smu_v11_0_send_msg,
	.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
1735
	.read_smc_arg = smu_v11_0_read_arg,
1736
	.setup_pptable = smu_v11_0_setup_pptable,
1737 1738
	.init_smc_tables = smu_v11_0_init_smc_tables,
	.fini_smc_tables = smu_v11_0_fini_smc_tables,
1739 1740
	.init_power = smu_v11_0_init_power,
	.fini_power = smu_v11_0_fini_power,
1741
	.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
1742
	.get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
1743
	.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
1744
	.check_pptable = smu_v11_0_check_pptable,
1745
	.parse_pptable = smu_v11_0_parse_pptable,
1746
	.populate_smc_pptable = smu_v11_0_populate_smc_pptable,
1747
	.write_pptable = smu_v11_0_write_pptable,
1748
	.write_watermarks_table = smu_v11_0_write_watermarks_table,
1749
	.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
1750
	.set_tool_table_location = smu_v11_0_set_tool_table_location,
1751
	.init_display_count = smu_v11_0_init_display_count,
1752 1753
	.set_allowed_mask = smu_v11_0_set_allowed_mask,
	.get_enabled_mask = smu_v11_0_get_enabled_mask,
1754
	.system_features_control = smu_v11_0_system_features_control,
1755
	.notify_display_change = smu_v11_0_notify_display_change,
1756
	.get_power_limit = smu_v11_0_get_power_limit,
1757
	.set_power_limit = smu_v11_0_set_power_limit,
1758
	.get_current_clk_freq = smu_v11_0_get_current_clk_freq,
1759
	.init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
1760
	.start_thermal_control = smu_v11_0_start_thermal_control,
1761
	.read_sensor = smu_v11_0_read_sensor,
1762
	.set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
1763
	.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
1764
	.set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
1765
	.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
1766
	.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
1767
	.set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
1768
	.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
1769
	.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
1770
	.gfx_off_control = smu_v11_0_gfx_off_control,
1771
	.register_irq_handler = smu_v11_0_register_irq_handler,
1772
	.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
1773
	.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
1774 1775 1776 1777
	.baco_is_support = smu_v11_0_baco_is_support,
	.baco_get_state = smu_v11_0_baco_get_state,
	.baco_set_state = smu_v11_0_baco_set_state,
	.baco_reset = smu_v11_0_baco_reset,
1778 1779 1780 1781
};

void smu_v11_0_set_smu_funcs(struct smu_context *smu)
{
1782 1783
	struct amdgpu_device *adev = smu->adev;

1784
	smu->funcs = &smu_v11_0_funcs;
1785 1786 1787 1788
	switch (adev->asic_type) {
	case CHIP_VEGA20:
		vega20_set_ppt_funcs(smu);
		break;
1789 1790 1791
	case CHIP_ARCTURUS:
		arcturus_set_ppt_funcs(smu);
		break;
1792
	case CHIP_NAVI10:
1793
	case CHIP_NAVI14:
1794 1795
		navi10_set_ppt_funcs(smu);
		break;
1796
	default:
1797
		pr_warn("Unknown asic for smu11\n");
1798
	}
1799
}