smu7_smumgr.c 17.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */


25
#include "pp_debug.h"
26 27 28 29 30 31 32 33 34 35 36
#include "smumgr.h"
#include "smu_ucode_xfer_vi.h"
#include "smu/smu_7_1_3_d.h"
#include "smu/smu_7_1_3_sh_mask.h"
#include "ppatomctrl.h"
#include "cgs_common.h"
#include "smu7_ppsmc.h"
#include "smu7_smumgr.h"

#define SMU7_SMC_SIZE 0x20000

37
static int smu7_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t limit)
38 39 40 41
{
	PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
	PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);

42
	cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, smc_addr);
43
	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */
44 45 46 47
	return 0;
}


48
int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
49 50 51 52 53 54 55 56 57 58 59 60 61
{
	uint32_t data;
	uint32_t addr;
	uint8_t *dest_byte;
	uint8_t i, data_byte[4] = {0};
	uint32_t *pdata = (uint32_t *)&data_byte;

	PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
	PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);

	addr = smc_start_address;

	while (byte_count >= 4) {
62
		smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
63 64 65 66 67 68 69 70 71

		*dest = PP_SMC_TO_HOST_UL(data);

		dest += 1;
		byte_count -= 4;
		addr += 4;
	}

	if (byte_count) {
72
		smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
73 74 75 76 77 78 79 80 81 82 83
		*pdata = PP_SMC_TO_HOST_UL(data);
	/* Cast dest into byte type in dest_byte.  This way, we don't overflow if the allocated memory is not 4-byte aligned. */
		dest_byte = (uint8_t *)dest;
		for (i = 0; i < byte_count; i++)
			dest_byte[i] = data_byte[i];
	}

	return 0;
}


84
int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
				const uint8_t *src, uint32_t byte_count, uint32_t limit)
{
	int result;
	uint32_t data = 0;
	uint32_t original_data;
	uint32_t addr = 0;
	uint32_t extra_shift;

	PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
	PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);

	addr = smc_start_address;

	while (byte_count >= 4) {
	/* Bytes are written into the SMC addres space with the MSB first. */
		data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];

102
		result = smu7_set_smc_sram_address(hwmgr, addr, limit);
103 104 105 106

		if (0 != result)
			return result;

107
		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data);
108 109 110 111 112 113 114 115 116 117

		src += 4;
		byte_count -= 4;
		addr += 4;
	}

	if (0 != byte_count) {

		data = 0;

118
		result = smu7_set_smc_sram_address(hwmgr, addr, limit);
119 120 121 122 123

		if (0 != result)
			return result;


124
		original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
125 126 127 128 129 130 131 132 133 134 135 136 137

		extra_shift = 8 * (4 - byte_count);

		while (byte_count > 0) {
			/* Bytes are written into the SMC addres space with the MSB first. */
			data = (0x100 * data) + *src++;
			byte_count--;
		}

		data <<= extra_shift;

		data |= (original_data & ~((~0UL) << extra_shift));

138
		result = smu7_set_smc_sram_address(hwmgr, addr, limit);
139 140 141 142

		if (0 != result)
			return result;

143
		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data);
144 145 146 147 148 149
	}

	return 0;
}


150
int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr)
151 152 153
{
	static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };

154
	smu7_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
155 156 157 158

	return 0;
}

159
bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr)
160
{
161
	return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
162
	&& (0x20100 <= cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
163 164
}

165
int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
166 167 168
{
	int ret;

169
	if (!smu7_is_smc_ram_running(hwmgr))
170 171 172
		return -EINVAL;


173
	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
174

175
	ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
176 177

	if (ret != 1)
178
		pr_info("\n failed to send pre message %x ret is %d \n",  msg, ret);
179

180
	cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
181

182
	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
183

184
	ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
185 186

	if (ret != 1)
187
		pr_info("\n failed to send message %x ret is %d \n",  msg, ret);
188 189 190 191

	return 0;
}

192
int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg)
193
{
194
	cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
195 196 197 198

	return 0;
}

199
int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
200
{
201
	if (!smu7_is_smc_ram_running(hwmgr)) {
202 203 204
		return -EINVAL;
	}

205
	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
206

207
	cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
208

209
	return smu7_send_msg_to_smc(hwmgr, msg);
210 211
}

212
int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
213
{
214
	cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
215

216
	return smu7_send_msg_to_smc_without_waiting(hwmgr, msg);
217 218
}

219
int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr)
220
{
221
	cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000);
222

223
	cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
224

225
	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
226

227
	if (1 != PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP))
228
		pr_info("Failed to send Message.\n");
229 230 231 232

	return 0;
}

233
int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr)
234
{
235
	if (!smu7_is_smc_ram_running(hwmgr))
236 237
		return -EINVAL;

238
	PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
	return 0;
}


enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
{
	enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;

	switch (fw_type) {
	case UCODE_ID_SMU:
		result = CGS_UCODE_ID_SMU;
		break;
	case UCODE_ID_SMU_SK:
		result = CGS_UCODE_ID_SMU_SK;
		break;
	case UCODE_ID_SDMA0:
		result = CGS_UCODE_ID_SDMA0;
		break;
	case UCODE_ID_SDMA1:
		result = CGS_UCODE_ID_SDMA1;
		break;
	case UCODE_ID_CP_CE:
		result = CGS_UCODE_ID_CP_CE;
		break;
	case UCODE_ID_CP_PFP:
		result = CGS_UCODE_ID_CP_PFP;
		break;
	case UCODE_ID_CP_ME:
		result = CGS_UCODE_ID_CP_ME;
		break;
	case UCODE_ID_CP_MEC:
		result = CGS_UCODE_ID_CP_MEC;
		break;
	case UCODE_ID_CP_MEC_JT1:
		result = CGS_UCODE_ID_CP_MEC_JT1;
		break;
	case UCODE_ID_CP_MEC_JT2:
		result = CGS_UCODE_ID_CP_MEC_JT2;
		break;
	case UCODE_ID_RLC_G:
		result = CGS_UCODE_ID_RLC_G;
		break;
281 282 283
	case UCODE_ID_MEC_STORAGE:
		result = CGS_UCODE_ID_STORAGE;
		break;
284 285 286 287 288 289 290 291
	default:
		break;
	}

	return result;
}


292
int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
293 294 295
{
	int result;

296
	result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit);
297 298 299 300

	if (result)
		return result;

301
	*value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
302 303 304
	return 0;
}

305
int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
306 307 308
{
	int result;

309
	result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit);
310 311 312 313

	if (result)
		return result;

314
	cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, value);
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349

	return 0;
}

/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */

static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type)
{
	uint32_t result = 0;

	switch (fw_type) {
	case UCODE_ID_SDMA0:
		result = UCODE_ID_SDMA0_MASK;
		break;
	case UCODE_ID_SDMA1:
		result = UCODE_ID_SDMA1_MASK;
		break;
	case UCODE_ID_CP_CE:
		result = UCODE_ID_CP_CE_MASK;
		break;
	case UCODE_ID_CP_PFP:
		result = UCODE_ID_CP_PFP_MASK;
		break;
	case UCODE_ID_CP_ME:
		result = UCODE_ID_CP_ME_MASK;
		break;
	case UCODE_ID_CP_MEC:
	case UCODE_ID_CP_MEC_JT1:
	case UCODE_ID_CP_MEC_JT2:
		result = UCODE_ID_CP_MEC_MASK;
		break;
	case UCODE_ID_RLC_G:
		result = UCODE_ID_RLC_G_MASK;
		break;
	default:
350
		pr_info("UCode type is out of range! \n");
351 352 353 354 355 356
		result = 0;
	}

	return result;
}

357
static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr,
358 359 360 361 362 363
						uint32_t fw_type,
						struct SMU_Entry *entry)
{
	int result = 0;
	struct cgs_firmware_info info = {0};

364
	result = cgs_get_firmware_info(hwmgr->device,
365 366 367 368
				smu7_convert_fw_type_to_cgs(fw_type),
				&info);

	if (!result) {
369
		entry->version = info.fw_version;
370 371 372 373 374
		entry->id = (uint16_t)fw_type;
		entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
		entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
		entry->meta_data_addr_high = 0;
		entry->meta_data_addr_low = 0;
X
Xiangliang Yu 已提交
375 376

		/* digest need be excluded out */
377
		if (cgs_is_virtualization_enabled(hwmgr->device))
X
Xiangliang Yu 已提交
378
			info.image_size -= 20;
379 380 381 382
		entry->data_size_byte = info.image_size;
		entry->num_register_entries = 0;
	}

383 384
	if ((fw_type == UCODE_ID_RLC_G)
		|| (fw_type == UCODE_ID_CP_MEC))
385 386 387 388 389 390 391
		entry->flags = 1;
	else
		entry->flags = 0;

	return 0;
}

392
int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
393
{
394
	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
395 396 397 398
	uint32_t fw_to_load;
	int result = 0;
	struct SMU_DRAMData_TOC *toc;

399
	if (!hwmgr->reload_fw) {
400
		pr_info("skip reloading...\n");
401 402 403 404
		return 0;
	}

	if (smu_data->soft_regs_start)
405 406
		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
					smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
407 408 409
					SMU_SoftRegisters, UcodeLoadStatus),
					0x0);

410 411 412
	if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
		if (!cgs_is_virtualization_enabled(hwmgr->device)) {
			smu7_send_msg_to_smc_with_parameter(hwmgr,
413 414
						PPSMC_MSG_SMU_DRAM_ADDR_HI,
						smu_data->smu_buffer.mc_addr_high);
415
			smu7_send_msg_to_smc_with_parameter(hwmgr,
416 417 418
						PPSMC_MSG_SMU_DRAM_ADDR_LO,
						smu_data->smu_buffer.mc_addr_low);
		}
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
		fw_to_load = UCODE_ID_RLC_G_MASK
			   + UCODE_ID_SDMA0_MASK
			   + UCODE_ID_SDMA1_MASK
			   + UCODE_ID_CP_CE_MASK
			   + UCODE_ID_CP_ME_MASK
			   + UCODE_ID_CP_PFP_MASK
			   + UCODE_ID_CP_MEC_MASK;
	} else {
		fw_to_load = UCODE_ID_RLC_G_MASK
			   + UCODE_ID_SDMA0_MASK
			   + UCODE_ID_SDMA1_MASK
			   + UCODE_ID_CP_CE_MASK
			   + UCODE_ID_CP_ME_MASK
			   + UCODE_ID_CP_PFP_MASK
			   + UCODE_ID_CP_MEC_MASK
			   + UCODE_ID_CP_MEC_JT1_MASK
			   + UCODE_ID_CP_MEC_JT2_MASK;
	}

	toc = (struct SMU_DRAMData_TOC *)smu_data->header;
	toc->num_entries = 0;
	toc->structure_version = 1;

442
	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
443 444
				UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
				"Failed to Get Firmware Entry.", return -EINVAL);
445
	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
446 447
				UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
				"Failed to Get Firmware Entry.", return -EINVAL);
448
	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
449 450
				UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
				"Failed to Get Firmware Entry.", return -EINVAL);
451
	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
452 453
				UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
				"Failed to Get Firmware Entry.", return -EINVAL);
454
	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
455 456
				UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
				"Failed to Get Firmware Entry.", return -EINVAL);
457
	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
458 459
				UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
				"Failed to Get Firmware Entry.", return -EINVAL);
460
	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
461 462
				UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
				"Failed to Get Firmware Entry.", return -EINVAL);
463
	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
464 465
				UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
				"Failed to Get Firmware Entry.", return -EINVAL);
466
	PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
467 468
				UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
				"Failed to Get Firmware Entry.", return -EINVAL);
469 470
	if (cgs_is_virtualization_enabled(hwmgr->device))
		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
471 472
				UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
				"Failed to Get Firmware Entry.", return -EINVAL);
473

474 475
	smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
	smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
476

477
	if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load))
478
		pr_err("Fail to Request SMU Load uCode");
479 480 481 482 483

	return result;
}

/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
484
int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type)
485
{
486
	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
487 488 489
	uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type);
	uint32_t ret;

490
	ret = phm_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11,
491
					smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
492 493 494 495 496
					SMU_SoftRegisters, UcodeLoadStatus),
					fw_mask, fw_mask);
	return ret;
}

497
int smu7_reload_firmware(struct pp_hwmgr *hwmgr)
498
{
499
	return hwmgr->smumgr_funcs->start_smu(hwmgr);
500 501
}

502
static int smu7_upload_smc_firmware_data(struct pp_hwmgr *hwmgr, uint32_t length, uint32_t *src, uint32_t limit)
503 504 505 506 507
{
	uint32_t byte_count = length;

	PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);

508
	cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, 0x20000);
509
	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
510 511

	for (; byte_count >= 4; byte_count -= 4)
512
		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, *src++);
513

514
	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
515 516 517 518 519 520 521

	PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);

	return 0;
}


522
int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr)
523 524
{
	int result = 0;
525
	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
526 527 528 529

	struct cgs_firmware_info info = {0};

	if (smu_data->security_hard_key == 1)
530
		cgs_get_firmware_info(hwmgr->device,
531 532
			smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
	else
533
		cgs_get_firmware_info(hwmgr->device,
534 535
			smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);

536
	hwmgr->is_kicker = info.is_kicker;
537

538
	result = smu7_upload_smc_firmware_data(hwmgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
539 540 541 542

	return result;
}

543
int smu7_init(struct pp_hwmgr *hwmgr)
544 545 546 547 548 549
{
	struct smu7_smumgr *smu_data;
	uint8_t *internal_buf;
	uint64_t mc_addr = 0;

	/* Allocate memory for backend private data */
550
	smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
551 552 553 554 555
	smu_data->header_buffer.data_size =
			((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;

/* Allocate FW image data structure and header buffer and
 * send the header buffer address to SMU */
556
	smu_allocate_memory(hwmgr->device,
557 558 559 560 561 562 563 564 565 566 567 568 569
		smu_data->header_buffer.data_size,
		CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
		PAGE_SIZE,
		&mc_addr,
		&smu_data->header_buffer.kaddr,
		&smu_data->header_buffer.handle);

	smu_data->header = smu_data->header_buffer.kaddr;
	smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
	smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);

	PP_ASSERT_WITH_CODE((NULL != smu_data->header),
		"Out of memory.",
570
		kfree(hwmgr->smu_backend);
571
		cgs_free_gpu_mem(hwmgr->device,
572 573 574
		(cgs_handle_t)smu_data->header_buffer.handle);
		return -EINVAL);

575
	if (cgs_is_virtualization_enabled(hwmgr->device))
576 577
		return 0;

578
	smu_data->smu_buffer.data_size = 200*4096;
579
	smu_allocate_memory(hwmgr->device,
580 581 582 583 584 585 586 587 588 589 590 591 592
		smu_data->smu_buffer.data_size,
		CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
		PAGE_SIZE,
		&mc_addr,
		&smu_data->smu_buffer.kaddr,
		&smu_data->smu_buffer.handle);

	internal_buf = smu_data->smu_buffer.kaddr;
	smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
	smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);

	PP_ASSERT_WITH_CODE((NULL != internal_buf),
		"Out of memory.",
593
		kfree(hwmgr->smu_backend);
594
		cgs_free_gpu_mem(hwmgr->device,
595 596 597
		(cgs_handle_t)smu_data->smu_buffer.handle);
		return -EINVAL);

598
	if (smum_is_hw_avfs_present(hwmgr))
599 600 601 602
		smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
	else
		smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;

603 604 605 606
	return 0;
}


607
int smu7_smu_fini(struct pp_hwmgr *hwmgr)
608
{
609 610
	kfree(hwmgr->smu_backend);
	hwmgr->smu_backend = NULL;
611
	cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
612 613
	return 0;
}