amdgpu_smu.c 30.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright 2019 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

#include "pp_debug.h"
#include <linux/firmware.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "soc15_common.h"
29
#include "smu_v11_0.h"
30
#include "atom.h"
31
#include "amd_pcie.h"
32

33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
{
	int ret = 0;

	if (!if_version && !smu_version)
		return -EINVAL;

	if (if_version) {
		ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
		if (ret)
			return ret;

		ret = smu_read_smc_arg(smu, if_version);
		if (ret)
			return ret;
	}

	if (smu_version) {
		ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
		if (ret)
			return ret;

		ret = smu_read_smc_arg(smu, smu_version);
		if (ret)
			return ret;
	}

	return ret;
}

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
			   bool gate)
{
	int ret = 0;

	switch (block_type) {
	case AMD_IP_BLOCK_TYPE_UVD:
		ret = smu_dpm_set_uvd_enable(smu, gate);
		break;
	case AMD_IP_BLOCK_TYPE_VCE:
		ret = smu_dpm_set_vce_enable(smu, gate);
		break;
	default:
		break;
	}

	return ret;
}

82 83 84 85 86 87
enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
{
	/* not support power state */
	return POWER_STATE_TYPE_DEFAULT;
}

88 89 90 91 92 93 94 95 96 97 98 99 100
int smu_get_power_num_states(struct smu_context *smu,
			     struct pp_states_info *state_info)
{
	if (!state_info)
		return -EINVAL;

	/* not support power state */
	memset(state_info, 0, sizeof(struct pp_states_info));
	state_info->nums = 0;

	return 0;
}

101 102 103 104 105 106
int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
			   void *data, uint32_t *size)
{
	int ret = 0;

	switch (sensor) {
107 108 109 110 111 112 113 114
	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
		*((uint32_t *)data) = smu->pstate_sclk;
		*size = 4;
		break;
	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
		*((uint32_t *)data) = smu->pstate_mclk;
		*size = 4;
		break;
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
	case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
		ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
		*size = 8;
		break;
	default:
		ret = -EINVAL;
		break;
	}

	if (ret)
		*size = 0;

	return ret;
}

130
int smu_update_table(struct smu_context *smu, enum smu_table_id table_index,
131 132 133 134 135
		     void *table_data, bool drv2smu)
{
	struct smu_table_context *smu_table = &smu->smu_table;
	struct smu_table *table = NULL;
	int ret = 0;
136
	int table_id = smu_table_get_index(smu, table_index);
137 138 139 140

	if (!table_data || table_id >= smu_table->table_count)
		return -EINVAL;

141
	table = &smu_table->tables[table_index];
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156

	if (drv2smu)
		memcpy(table->cpu_addr, table_data, table->size);

	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
					  upper_32_bits(table->mc_address));
	if (ret)
		return ret;
	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
					  lower_32_bits(table->mc_address));
	if (ret)
		return ret;
	ret = smu_send_smc_msg_with_param(smu, drv2smu ?
					  SMU_MSG_TransferTableDram2Smu :
					  SMU_MSG_TransferTableSmu2Dram,
157
					  table_id);
158 159 160 161 162 163 164 165 166
	if (ret)
		return ret;

	if (!drv2smu)
		memcpy(table_data, table->cpu_addr, table->size);

	return ret;
}

167 168
bool is_support_sw_smu(struct amdgpu_device *adev)
{
169 170 171
	if (adev->asic_type == CHIP_VEGA20)
		return (amdgpu_dpm == 2) ? true : false;
	else if (adev->asic_type >= CHIP_NAVI10)
172
		return true;
173 174
	else
		return false;
175 176
}

177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
int smu_sys_get_pp_table(struct smu_context *smu, void **table)
{
	struct smu_table_context *smu_table = &smu->smu_table;

	if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
		return -EINVAL;

	if (smu_table->hardcode_pptable)
		*table = smu_table->hardcode_pptable;
	else
		*table = smu_table->power_play_table;

	return smu_table->power_play_table_size;
}

int smu_sys_set_pp_table(struct smu_context *smu,  void *buf, size_t size)
{
	struct smu_table_context *smu_table = &smu->smu_table;
	ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
	int ret = 0;

198 199
	if (!smu->pm_enabled)
		return -EINVAL;
200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
	if (header->usStructureSize != size) {
		pr_err("pp table size not matched !\n");
		return -EIO;
	}

	mutex_lock(&smu->mutex);
	if (!smu_table->hardcode_pptable)
		smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
	if (!smu_table->hardcode_pptable) {
		ret = -ENOMEM;
		goto failed;
	}

	memcpy(smu_table->hardcode_pptable, buf, size);
	smu_table->power_play_table = smu_table->hardcode_pptable;
	smu_table->power_play_table_size = size;
	mutex_unlock(&smu->mutex);

	ret = smu_reset(smu);
	if (ret)
		pr_info("smu reset failed, ret = %d\n", ret);

222 223
	return ret;

224 225 226 227 228
failed:
	mutex_unlock(&smu->mutex);
	return ret;
}

229 230 231 232
int smu_feature_init_dpm(struct smu_context *smu)
{
	struct smu_feature *feature = &smu->smu_feature;
	int ret = 0;
233
	uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
234

235 236
	if (!smu->pm_enabled)
		return ret;
237
	mutex_lock(&feature->mutex);
238
	bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
239
	mutex_unlock(&feature->mutex);
240

241
	ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
242 243 244 245
					     SMU_FEATURE_MAX/32);
	if (ret)
		return ret;

246
	mutex_lock(&feature->mutex);
247 248
	bitmap_or(feature->allowed, feature->allowed,
		      (unsigned long *)allowed_feature_mask,
249
		      feature->feature_num);
250
	mutex_unlock(&feature->mutex);
251 252 253 254

	return ret;
}

255
int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
256 257
{
	struct smu_feature *feature = &smu->smu_feature;
258
	uint32_t feature_id;
259 260
	int ret = 0;

261 262
	feature_id = smu_feature_get_index(smu, mask);

263
	WARN_ON(feature_id > feature->feature_num);
264 265 266 267 268 269

	mutex_lock(&feature->mutex);
	ret = test_bit(feature_id, feature->enabled);
	mutex_unlock(&feature->mutex);

	return ret;
270 271
}

272 273
int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
			    bool enable)
274 275
{
	struct smu_feature *feature = &smu->smu_feature;
276
	uint32_t feature_id;
277 278
	int ret = 0;

279 280
	feature_id = smu_feature_get_index(smu, mask);

281
	WARN_ON(feature_id > feature->feature_num);
282 283 284 285 286 287

	mutex_lock(&feature->mutex);
	ret = smu_feature_update_enable_state(smu, feature_id, enable);
	if (ret)
		goto failed;

288 289 290 291
	if (enable)
		test_and_set_bit(feature_id, feature->enabled);
	else
		test_and_clear_bit(feature_id, feature->enabled);
292 293 294 295 296

failed:
	mutex_unlock(&feature->mutex);

	return ret;
297 298
}

299
int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
300 301
{
	struct smu_feature *feature = &smu->smu_feature;
302
	uint32_t feature_id;
303 304
	int ret = 0;

305 306
	feature_id = smu_feature_get_index(smu, mask);

307
	WARN_ON(feature_id > feature->feature_num);
308 309 310 311 312 313

	mutex_lock(&feature->mutex);
	ret = test_bit(feature_id, feature->supported);
	mutex_unlock(&feature->mutex);

	return ret;
314 315
}

316 317
int smu_feature_set_supported(struct smu_context *smu,
			      enum smu_feature_mask mask,
318 319 320
			      bool enable)
{
	struct smu_feature *feature = &smu->smu_feature;
321
	uint32_t feature_id;
322 323
	int ret = 0;

324 325
	feature_id = smu_feature_get_index(smu, mask);

326
	WARN_ON(feature_id > feature->feature_num);
327

328
	mutex_lock(&feature->mutex);
329 330 331 332
	if (enable)
		test_and_set_bit(feature_id, feature->supported);
	else
		test_and_clear_bit(feature_id, feature->supported);
333 334 335
	mutex_unlock(&feature->mutex);

	return ret;
336 337
}

338 339
static int smu_set_funcs(struct amdgpu_device *adev)
{
340 341 342 343
	struct smu_context *smu = &adev->smu;

	switch (adev->asic_type) {
	case CHIP_VEGA20:
344
	case CHIP_NAVI10:
345 346
		if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
			smu->od_enabled = true;
347 348 349 350 351 352
		smu_v11_0_set_smu_funcs(smu);
		break;
	default:
		return -EINVAL;
	}

353 354 355 356 357 358 359 360 361
	return 0;
}

static int smu_early_init(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
	struct smu_context *smu = &adev->smu;

	smu->adev = adev;
362
	smu->pm_enabled = !!amdgpu_dpm;
363 364
	mutex_init(&smu->mutex);

365
	return smu_set_funcs(adev);
366 367
}

368 369 370 371
static int smu_late_init(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
	struct smu_context *smu = &adev->smu;
372 373 374

	if (!smu->pm_enabled)
		return 0;
375 376 377 378 379 380 381 382 383
	mutex_lock(&smu->mutex);
	smu_handle_task(&adev->smu,
			smu->smu_dpm.dpm_level,
			AMD_PP_TASK_COMPLETE_INIT);
	mutex_unlock(&smu->mutex);

	return 0;
}

384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
			    uint16_t *size, uint8_t *frev, uint8_t *crev,
			    uint8_t **addr)
{
	struct amdgpu_device *adev = smu->adev;
	uint16_t data_start;

	if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
					   size, frev, crev, &data_start))
		return -EINVAL;

	*addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;

	return 0;
}

400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
static int smu_initialize_pptable(struct smu_context *smu)
{
	/* TODO */
	return 0;
}

static int smu_smc_table_sw_init(struct smu_context *smu)
{
	int ret;

	ret = smu_initialize_pptable(smu);
	if (ret) {
		pr_err("Failed to init smu_initialize_pptable!\n");
		return ret;
	}

416 417 418 419 420 421 422 423 424 425
	/**
	 * Create smu_table structure, and init smc tables such as
	 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
	 */
	ret = smu_init_smc_tables(smu);
	if (ret) {
		pr_err("Failed to init smc tables!\n");
		return ret;
	}

426 427 428 429 430 431 432 433 434 435
	/**
	 * Create smu_power_context structure, and allocate smu_dpm_context and
	 * context size to fill the smu_power_context data.
	 */
	ret = smu_init_power(smu);
	if (ret) {
		pr_err("Failed to init smu_init_power!\n");
		return ret;
	}

436 437 438
	return 0;
}

439 440 441 442 443 444 445 446 447 448 449 450 451
static int smu_smc_table_sw_fini(struct smu_context *smu)
{
	int ret;

	ret = smu_fini_smc_tables(smu);
	if (ret) {
		pr_err("Failed to smu_fini_smc_tables!\n");
		return ret;
	}

	return 0;
}

452 453 454 455 456 457
static int smu_sw_init(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
	struct smu_context *smu = &adev->smu;
	int ret;

458
	smu->pool_size = adev->pm.smu_prv_buffer_size;
459
	smu->smu_feature.feature_num = SMU_FEATURE_MAX;
460
	mutex_init(&smu->smu_feature.mutex);
461 462 463
	bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
	bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
	bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
464
	smu->watermarks_bitmap = 0;
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
	smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
	smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;

	smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
	smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
	smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
	smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
	smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
	smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
	smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
	smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;

	smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
	smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
	smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
	smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
	smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
	smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
	smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
484
	smu->display_config = &adev->pm.pm_display_cfg;
485

486 487
	smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
	smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
488 489 490 491 492 493
	ret = smu_init_microcode(smu);
	if (ret) {
		pr_err("Failed to load smu firmware!\n");
		return ret;
	}

494 495 496 497 498 499
	ret = smu_smc_table_sw_init(smu);
	if (ret) {
		pr_err("Failed to sw init smc table!\n");
		return ret;
	}

500 501 502 503 504 505
	return 0;
}

static int smu_sw_fini(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
506 507
	struct smu_context *smu = &adev->smu;
	int ret;
508

509 510 511 512 513 514
	ret = smu_smc_table_sw_fini(smu);
	if (ret) {
		pr_err("Failed to sw fini smc table!\n");
		return ret;
	}

515 516 517 518 519 520
	ret = smu_fini_power(smu);
	if (ret) {
		pr_err("Failed to init smu_fini_power!\n");
		return ret;
	}

521 522 523
	return 0;
}

524 525
static int smu_init_fb_allocations(struct smu_context *smu)
{
526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549
	struct amdgpu_device *adev = smu->adev;
	struct smu_table_context *smu_table = &smu->smu_table;
	struct smu_table *tables = smu_table->tables;
	uint32_t table_count = smu_table->table_count;
	uint32_t i = 0;
	int32_t ret = 0;

	if (table_count <= 0)
		return -EINVAL;

	for (i = 0 ; i < table_count; i++) {
		if (tables[i].size == 0)
			continue;
		ret = amdgpu_bo_create_kernel(adev,
					      tables[i].size,
					      tables[i].align,
					      tables[i].domain,
					      &tables[i].bo,
					      &tables[i].mc_address,
					      &tables[i].cpu_addr);
		if (ret)
			goto failed;
	}

550
	return 0;
551 552 553 554 555 556 557 558 559 560
failed:
	for (; i > 0; i--) {
		if (tables[i].size == 0)
			continue;
		amdgpu_bo_free_kernel(&tables[i].bo,
				      &tables[i].mc_address,
				      &tables[i].cpu_addr);

	}
	return ret;
561 562
}

563 564 565 566 567 568 569 570
static int smu_fini_fb_allocations(struct smu_context *smu)
{
	struct smu_table_context *smu_table = &smu->smu_table;
	struct smu_table *tables = smu_table->tables;
	uint32_t table_count = smu_table->table_count;
	uint32_t i = 0;

	if (table_count == 0 || tables == NULL)
571
		return 0;
572 573 574 575 576 577 578 579 580 581 582

	for (i = 0 ; i < table_count; i++) {
		if (tables[i].size == 0)
			continue;
		amdgpu_bo_free_kernel(&tables[i].bo,
				      &tables[i].mc_address,
				      &tables[i].cpu_addr);
	}

	return 0;
}
583

584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624
static int smu_override_pcie_parameters(struct smu_context *smu)
{
	struct amdgpu_device *adev = smu->adev;
	uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
	int ret;

	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
		pcie_gen = 3;
	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
		pcie_gen = 2;
	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
		pcie_gen = 1;
	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
		pcie_gen = 0;

	/* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
	 * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
	 * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
	 */
	if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
		pcie_width = 6;
	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
		pcie_width = 5;
	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
		pcie_width = 4;
	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
		pcie_width = 3;
	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
		pcie_width = 2;
	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
		pcie_width = 1;

	smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
	ret = smu_send_smc_msg_with_param(smu,
					  SMU_MSG_OverridePcieParameters,
					  smu_pcie_arg);
	if (ret)
		pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
	return ret;
}

625 626
static int smu_smc_table_hw_init(struct smu_context *smu,
				 bool initialize)
627
{
628
	struct amdgpu_device *adev = smu->adev;
629 630
	int ret;

631 632 633 634 635
	if (smu_is_dpm_running(smu) && adev->in_suspend) {
		pr_info("dpm has been enabled\n");
		return 0;
	}

636 637 638 639
	ret = smu_init_display(smu);
	if (ret)
		return ret;

640
	if (initialize) {
641 642
		/* get boot_values from vbios to set revision, gfxclk, and etc. */
		ret = smu_get_vbios_bootup_values(smu);
643 644
		if (ret)
			return ret;
645

646
		ret = smu_setup_pptable(smu);
647 648
		if (ret)
			return ret;
649

650 651 652 653 654 655 656
		/*
		 * check if the format_revision in vbios is up to pptable header
		 * version, and the structure size is not 0.
		 */
		ret = smu_check_pptable(smu);
		if (ret)
			return ret;
657

658 659 660 661 662 663
		/*
		 * allocate vram bos to store smc table contents.
		 */
		ret = smu_init_fb_allocations(smu);
		if (ret)
			return ret;
664

665 666 667 668 669 670 671 672
		/*
		 * Parse pptable format and fill PPTable_t smc_pptable to
		 * smu_table_context structure. And read the smc_dpm_table from vbios,
		 * then fill it into smc_pptable.
		 */
		ret = smu_parse_pptable(smu);
		if (ret)
			return ret;
673

674 675 676 677 678 679 680 681
		/*
		 * Send msg GetDriverIfVersion to check if the return value is equal
		 * with DRIVER_IF_VERSION of smc header.
		 */
		ret = smu_check_fw_version(smu);
		if (ret)
			return ret;
	}
682

683 684 685 686 687 688 689 690
	/*
	 * Copy pptable bo in the vram to smc with SMU MSGs such as
	 * SetDriverDramAddr and TransferTableDram2Smu.
	 */
	ret = smu_write_pptable(smu);
	if (ret)
		return ret;

691 692 693 694 695
	/* issue RunAfllBtc msg */
	ret = smu_run_afll_btc(smu);
	if (ret)
		return ret;

696 697 698 699
	ret = smu_feature_set_allowed_mask(smu);
	if (ret)
		return ret;

700
	ret = smu_system_features_control(smu, true);
701 702 703
	if (ret)
		return ret;

704 705 706 707
	ret = smu_override_pcie_parameters(smu);
	if (ret)
		return ret;

708 709 710 711
	ret = smu_notify_display_change(smu);
	if (ret)
		return ret;

712 713 714 715 716 717 718 719
	/*
	 * Set min deep sleep dce fclk with bootup value from vbios via
	 * SetMinDeepSleepDcefclk MSG.
	 */
	ret = smu_set_min_dcef_deep_sleep(smu);
	if (ret)
		return ret;

720 721 722 723 724
	/*
	 * Set initialized values (get from vbios) to dpm tables context such as
	 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
	 * type of clks.
	 */
725 726 727 728
	if (initialize) {
		ret = smu_populate_smc_pptable(smu);
		if (ret)
			return ret;
729

730 731 732 733
		ret = smu_init_max_sustainable_clocks(smu);
		if (ret)
			return ret;
	}
734

735
	ret = smu_set_od8_default_settings(smu, initialize);
736 737 738
	if (ret)
		return ret;

739 740 741 742
	if (initialize) {
		ret = smu_populate_umd_state_clk(smu);
		if (ret)
			return ret;
743

744 745 746 747
		ret = smu_get_power_limit(smu, &smu->default_power_limit, false);
		if (ret)
			return ret;
	}
748

749 750 751 752 753
	/*
	 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
	 */
	ret = smu_set_tool_table_location(smu);

754 755 756
	if (!smu_is_dpm_running(smu))
		pr_info("dpm has been disabled\n");

757
	return ret;
758 759
}

760 761 762 763 764 765 766 767 768 769 770 771
/**
 * smu_alloc_memory_pool - allocate memory pool in the system memory
 *
 * @smu: amdgpu_device pointer
 *
 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
 * and DramLogSetDramAddr can notify it changed.
 *
 * Returns 0 on success, error on failure.
 */
static int smu_alloc_memory_pool(struct smu_context *smu)
{
772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
	struct amdgpu_device *adev = smu->adev;
	struct smu_table_context *smu_table = &smu->smu_table;
	struct smu_table *memory_pool = &smu_table->memory_pool;
	uint64_t pool_size = smu->pool_size;
	int ret = 0;

	if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
		return ret;

	memory_pool->size = pool_size;
	memory_pool->align = PAGE_SIZE;
	memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;

	switch (pool_size) {
	case SMU_MEMORY_POOL_SIZE_256_MB:
	case SMU_MEMORY_POOL_SIZE_512_MB:
	case SMU_MEMORY_POOL_SIZE_1_GB:
	case SMU_MEMORY_POOL_SIZE_2_GB:
		ret = amdgpu_bo_create_kernel(adev,
					      memory_pool->size,
					      memory_pool->align,
					      memory_pool->domain,
					      &memory_pool->bo,
					      &memory_pool->mc_address,
					      &memory_pool->cpu_addr);
		break;
	default:
		break;
	}

	return ret;
803 804
}

805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
static int smu_free_memory_pool(struct smu_context *smu)
{
	struct smu_table_context *smu_table = &smu->smu_table;
	struct smu_table *memory_pool = &smu_table->memory_pool;
	int ret = 0;

	if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
		return ret;

	amdgpu_bo_free_kernel(&memory_pool->bo,
			      &memory_pool->mc_address,
			      &memory_pool->cpu_addr);

	memset(memory_pool, 0, sizeof(struct smu_table));

	return ret;
}
822

823 824 825 826 827 828
static int smu_hw_init(void *handle)
{
	int ret;
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
	struct smu_context *smu = &adev->smu;

829 830 831 832
	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
		ret = smu_check_fw_status(smu);
		if (ret) {
			pr_err("SMC firmware status is not correct\n");
833
			return ret;
834
		}
835 836
	}

837 838
	mutex_lock(&smu->mutex);

839 840 841 842
	ret = smu_feature_init_dpm(smu);
	if (ret)
		goto failed;

843
	ret = smu_smc_table_hw_init(smu, true);
844 845
	if (ret)
		goto failed;
846

847 848 849 850
	ret = smu_alloc_memory_pool(smu);
	if (ret)
		goto failed;

851 852 853 854 855 856 857 858
	/*
	 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
	 * pool location.
	 */
	ret = smu_notify_memory_pool_location(smu);
	if (ret)
		goto failed;

859 860 861 862
	ret = smu_start_thermal_control(smu);
	if (ret)
		goto failed;

863 864
	mutex_unlock(&smu->mutex);

865 866 867 868
	if (!smu->pm_enabled)
		adev->pm.dpm_enabled = false;
	else
		adev->pm.dpm_enabled = true;
869 870 871
	/* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
	if (adev->asic_type != CHIP_NAVI10)
		adev->pm.dpm_enabled = true;
872

873 874 875
	pr_info("SMU is initialized successfully!\n");

	return 0;
876 877 878 879

failed:
	mutex_unlock(&smu->mutex);
	return ret;
880 881 882 883 884 885
}

static int smu_hw_fini(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
	struct smu_context *smu = &adev->smu;
886
	struct smu_table_context *table_context = &smu->smu_table;
887
	int ret = 0;
888

889 890
	kfree(table_context->driver_pptable);
	table_context->driver_pptable = NULL;
891

892 893
	kfree(table_context->max_sustainable_clocks);
	table_context->max_sustainable_clocks = NULL;
894

895 896
	kfree(table_context->od_feature_capabilities);
	table_context->od_feature_capabilities = NULL;
897

898 899
	kfree(table_context->od_settings_max);
	table_context->od_settings_max = NULL;
900

901 902
	kfree(table_context->od_settings_min);
	table_context->od_settings_min = NULL;
903

904 905
	kfree(table_context->overdrive_table);
	table_context->overdrive_table = NULL;
906

907 908
	kfree(table_context->od8_settings);
	table_context->od8_settings = NULL;
909

910 911 912 913
	ret = smu_fini_fb_allocations(smu);
	if (ret)
		return ret;

914 915 916 917
	ret = smu_free_memory_pool(smu);
	if (ret)
		return ret;

918 919 920
	return 0;
}

921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936
int smu_reset(struct smu_context *smu)
{
	struct amdgpu_device *adev = smu->adev;
	int ret = 0;

	ret = smu_hw_fini(adev);
	if (ret)
		return ret;

	ret = smu_hw_init(adev);
	if (ret)
		return ret;

	return ret;
}

937 938
static int smu_suspend(void *handle)
{
939
	int ret;
940
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
941
	struct smu_context *smu = &adev->smu;
942

943
	ret = smu_system_features_control(smu, false);
944 945 946 947 948
	if (ret)
		return ret;

	smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);

949 950 951 952 953 954 955 956 957
	return 0;
}

static int smu_resume(void *handle)
{
	int ret;
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
	struct smu_context *smu = &adev->smu;

958 959
	pr_info("SMU is resuming...\n");

960 961
	mutex_lock(&smu->mutex);

962
	ret = smu_smc_table_hw_init(smu, false);
963 964 965
	if (ret)
		goto failed;

966
	ret = smu_start_thermal_control(smu);
967 968
	if (ret)
		goto failed;
969 970 971

	mutex_unlock(&smu->mutex);

972 973
	pr_info("SMU is resumed successfully!\n");

974
	return 0;
975 976 977
failed:
	mutex_unlock(&smu->mutex);
	return ret;
978 979
}

980 981 982 983 984 985
int smu_display_configuration_change(struct smu_context *smu,
				     const struct amd_pp_display_configuration *display_config)
{
	int index = 0;
	int num_of_active_display = 0;

986
	if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
		return -EINVAL;

	if (!display_config)
		return -EINVAL;

	mutex_lock(&smu->mutex);

	smu_set_deep_sleep_dcefclk(smu,
				   display_config->min_dcef_deep_sleep_set_clk / 100);

	for (index = 0; index < display_config->num_path_including_non_display; index++) {
		if (display_config->displays[index].controller_id != 0)
			num_of_active_display++;
	}

	smu_set_active_display_count(smu, num_of_active_display);

	smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
			   display_config->cpu_cc6_disable,
			   display_config->cpu_pstate_disable,
			   display_config->nb_pstate_switch_disable);

	mutex_unlock(&smu->mutex);

	return 0;
}

1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
static int smu_get_clock_info(struct smu_context *smu,
			      struct smu_clock_info *clk_info,
			      enum smu_perf_level_designation designation)
{
	int ret;
	struct smu_performance_level level = {0};

	if (!clk_info)
		return -EINVAL;

	ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
	if (ret)
		return -EINVAL;

	clk_info->min_mem_clk = level.memory_clock;
	clk_info->min_eng_clk = level.core_clock;
	clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;

	ret = smu_get_perf_level(smu, designation, &level);
	if (ret)
		return -EINVAL;

	clk_info->min_mem_clk = level.memory_clock;
	clk_info->min_eng_clk = level.core_clock;
	clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;

	return 0;
}

int smu_get_current_clocks(struct smu_context *smu,
			   struct amd_pp_clock_info *clocks)
{
	struct amd_pp_simple_clock_info simple_clocks = {0};
	struct smu_clock_info hw_clocks;
	int ret = 0;

	if (!is_support_sw_smu(smu->adev))
		return -EINVAL;

	mutex_lock(&smu->mutex);

	smu_get_dal_power_level(smu, &simple_clocks);

	if (smu->support_power_containment)
		ret = smu_get_clock_info(smu, &hw_clocks,
					 PERF_LEVEL_POWER_CONTAINMENT);
	else
		ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);

	if (ret) {
		pr_err("Error in smu_get_clock_info\n");
		goto failed;
	}

	clocks->min_engine_clock = hw_clocks.min_eng_clk;
	clocks->max_engine_clock = hw_clocks.max_eng_clk;
	clocks->min_memory_clock = hw_clocks.min_mem_clk;
	clocks->max_memory_clock = hw_clocks.max_mem_clk;
	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;

        if (simple_clocks.level == 0)
                clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
        else
                clocks->max_clocks_state = simple_clocks.level;

        if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
                clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
                clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
        }

failed:
	mutex_unlock(&smu->mutex);
	return ret;
}

1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
static int smu_set_clockgating_state(void *handle,
				     enum amd_clockgating_state state)
{
	return 0;
}

static int smu_set_powergating_state(void *handle,
				     enum amd_powergating_state state)
{
	return 0;
}

1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
static int smu_enable_umd_pstate(void *handle,
		      enum amd_dpm_forced_level *level)
{
	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;

	struct smu_context *smu = (struct smu_context*)(handle);
	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1114
	if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
		return -EINVAL;

	if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
		/* enter umd pstate, save current level, disable gfx cg*/
		if (*level & profile_mode_mask) {
			smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
			smu_dpm_ctx->enable_umd_pstate = true;
			amdgpu_device_ip_set_clockgating_state(smu->adev,
							       AMD_IP_BLOCK_TYPE_GFX,
							       AMD_CG_STATE_UNGATE);
			amdgpu_device_ip_set_powergating_state(smu->adev,
							       AMD_IP_BLOCK_TYPE_GFX,
							       AMD_PG_STATE_UNGATE);
		}
	} else {
		/* exit umd pstate, restore level, enable gfx cg*/
		if (!(*level & profile_mode_mask)) {
			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
				*level = smu_dpm_ctx->saved_dpm_level;
			smu_dpm_ctx->enable_umd_pstate = false;
			amdgpu_device_ip_set_clockgating_state(smu->adev,
							       AMD_IP_BLOCK_TYPE_GFX,
							       AMD_CG_STATE_GATE);
			amdgpu_device_ip_set_powergating_state(smu->adev,
							       AMD_IP_BLOCK_TYPE_GFX,
							       AMD_PG_STATE_GATE);
		}
	}

	return 0;
}

1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
int smu_adjust_power_state_dynamic(struct smu_context *smu,
				   enum amd_dpm_forced_level level,
				   bool skip_display_settings)
{
	int ret = 0;
	int index = 0;
	uint32_t sclk_mask, mclk_mask, soc_mask;
	long workload;
	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);

1157 1158
	if (!smu->pm_enabled)
		return -EINVAL;
1159 1160 1161 1162 1163 1164 1165 1166
	if (!skip_display_settings) {
		ret = smu_display_config_changed(smu);
		if (ret) {
			pr_err("Failed to change display config!");
			return ret;
		}
	}

1167 1168
	if (!smu->pm_enabled)
		return -EINVAL;
1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
	ret = smu_apply_clocks_adjust_rules(smu);
	if (ret) {
		pr_err("Failed to apply clocks adjust rules!");
		return ret;
	}

	if (!skip_display_settings) {
		ret = smu_notify_smc_dispaly_config(smu);
		if (ret) {
			pr_err("Failed to notify smc display config!");
			return ret;
		}
	}

	if (smu_dpm_ctx->dpm_level != level) {
		switch (level) {
		case AMD_DPM_FORCED_LEVEL_HIGH:
			ret = smu_force_dpm_limit_value(smu, true);
			break;
		case AMD_DPM_FORCED_LEVEL_LOW:
			ret = smu_force_dpm_limit_value(smu, false);
			break;

		case AMD_DPM_FORCED_LEVEL_AUTO:
			ret = smu_unforce_dpm_levels(smu);
			break;

		case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
		case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
		case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
		case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
			ret = smu_get_profiling_clk_mask(smu, level,
							 &sclk_mask,
							 &mclk_mask,
							 &soc_mask);
			if (ret)
				return ret;
			smu_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask);
			smu_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask);
			break;

		case AMD_DPM_FORCED_LEVEL_MANUAL:
		case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
		default:
			break;
		}

		if (!ret)
			smu_dpm_ctx->dpm_level = level;
	}

	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
		index = fls(smu->workload_mask);
		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
		workload = smu->workload_setting[index];

		if (smu->power_profile_mode != workload)
			smu_set_power_profile_mode(smu, &workload, 0);
	}

	return ret;
}

int smu_handle_task(struct smu_context *smu,
		    enum amd_dpm_forced_level level,
		    enum amd_pp_task task_id)
{
	int ret = 0;

	switch (task_id) {
	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
		ret = smu_pre_display_config_changed(smu);
		if (ret)
			return ret;
		ret = smu_set_cpu_power_state(smu);
		if (ret)
			return ret;
		ret = smu_adjust_power_state_dynamic(smu, level, false);
		break;
	case AMD_PP_TASK_COMPLETE_INIT:
	case AMD_PP_TASK_READJUST_POWER_STATE:
		ret = smu_adjust_power_state_dynamic(smu, level, true);
		break;
	default:
		break;
	}

	return ret;
}

1259 1260 1261
const struct amd_ip_funcs smu_ip_funcs = {
	.name = "smu",
	.early_init = smu_early_init,
1262
	.late_init = smu_late_init,
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274
	.sw_init = smu_sw_init,
	.sw_fini = smu_sw_fini,
	.hw_init = smu_hw_init,
	.hw_fini = smu_hw_fini,
	.suspend = smu_suspend,
	.resume = smu_resume,
	.is_idle = NULL,
	.check_soft_reset = NULL,
	.wait_for_idle = NULL,
	.soft_reset = NULL,
	.set_clockgating_state = smu_set_clockgating_state,
	.set_powergating_state = smu_set_powergating_state,
1275
	.enable_umd_pstate = smu_enable_umd_pstate,
1276
};
1277 1278 1279 1280 1281 1282 1283 1284 1285

const struct amdgpu_ip_block_version smu_v11_0_ip_block =
{
	.type = AMD_IP_BLOCK_TYPE_SMC,
	.major = 11,
	.minor = 0,
	.rev = 0,
	.funcs = &smu_ip_funcs,
};