smu_v12_0.c 9.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright 2019 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
26
#include "smu_internal.h"
27 28 29 30 31 32 33 34
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v12_0.h"
#include "soc15_common.h"
#include "atom.h"

#include "asic_reg/mp/mp_12_0_0_offset.h"
#include "asic_reg/mp/mp_12_0_0_sh_mask.h"
35 36
#include "asic_reg/smuio/smuio_12_0_0_offset.h"
#include "asic_reg/smuio/smuio_12_0_0_sh_mask.h"
37

38 39 40 41 42 43 44 45 46 47
/*
 * DO NOT use these for err/warn/info/debug messages.
 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
 * They are more MGPU friendly.
 */
#undef pr_err
#undef pr_warn
#undef pr_info
#undef pr_debug

48 49 50 51
// because some SMU12 based ASICs use older ip offset tables
// we should undefine this register from the smuio12 header
// to prevent confusion down the road
#undef mmPWR_MISC_CNTL_STATUS
52

53
#define smnMP1_FIRMWARE_FLAGS                                0x3010024
54

55
int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
56 57 58 59 60 61 62 63
					      uint16_t msg)
{
	struct amdgpu_device *adev = smu->adev;

	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
	return 0;
}

64
static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
65 66 67 68 69 70 71
{
	struct amdgpu_device *adev = smu->adev;

	*arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
	return 0;
}

72
int smu_v12_0_wait_for_response(struct smu_context *smu)
73 74 75 76 77 78 79
{
	struct amdgpu_device *adev = smu->adev;
	uint32_t cur_value, i;

	for (i = 0; i < adev->usec_timeout; i++) {
		cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
		if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
80 81
			return cur_value == 0x1 ? 0 : -EIO;

82 83 84 85
		udelay(1);
	}

	/* timeout means wrong logic */
86
	return -ETIME;
87 88
}

89
int
90 91
smu_v12_0_send_msg_with_param(struct smu_context *smu,
			      enum smu_message_type msg,
92 93
			      uint32_t param,
			      uint32_t *read_arg)
94 95 96 97 98 99 100 101
{
	struct amdgpu_device *adev = smu->adev;
	int ret = 0, index = 0;

	index = smu_msg_get_index(smu, msg);
	if (index < 0)
		return index;

102
	mutex_lock(&smu->message_lock);
103
	ret = smu_v12_0_wait_for_response(smu);
104
	if (ret) {
105
		dev_err(adev->dev, "Msg issuing pre-check failed and "
106
		       "SMU may be not in the right state!\n");
107
		goto out;
108
	}
109 110 111 112 113 114 115 116

	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);

	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);

	smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);

	ret = smu_v12_0_wait_for_response(smu);
117
	if (ret) {
118
		dev_err(adev->dev, "Failed to send message 0x%x, response 0x%x param 0x%x\n",
119
		       index, ret, param);
120
		goto out;
121 122 123 124
	}
	if (read_arg) {
		ret = smu_v12_0_read_arg(smu, read_arg);
		if (ret) {
125
			dev_err(adev->dev, "Failed to read message arg 0x%x, response 0x%x param 0x%x\n",
126
			       index, ret, param);
127
			goto out;
128 129
		}
	}
130 131
out:
	mutex_unlock(&smu->message_lock);
132 133 134
	return ret;
}

135
int smu_v12_0_check_fw_status(struct smu_context *smu)
136 137 138 139 140 141 142 143 144 145 146 147 148 149
{
	struct amdgpu_device *adev = smu->adev;
	uint32_t mp1_fw_flags;

	mp1_fw_flags = RREG32_PCIE(MP1_Public |
		(smnMP1_FIRMWARE_FLAGS & 0xffffffff));

	if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
		MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
		return 0;

	return -EIO;
}

150
int smu_v12_0_check_fw_version(struct smu_context *smu)
151
{
152 153 154
	uint32_t if_version = 0xff, smu_version = 0xff;
	uint16_t smu_major;
	uint8_t smu_minor, smu_debug;
155 156
	int ret = 0;

157
	ret = smu_get_smc_version(smu, &if_version, &smu_version);
158
	if (ret)
159 160 161 162 163 164 165 166 167 168 169 170 171 172
		return ret;

	smu_major = (smu_version >> 16) & 0xffff;
	smu_minor = (smu_version >> 8) & 0xff;
	smu_debug = (smu_version >> 0) & 0xff;

	/*
	 * 1. if_version mismatch is not critical as our fw is designed
	 * to be backward compatible.
	 * 2. New fw usually brings some optimizations. But that's visible
	 * only on the paired driver.
	 * Considering above, we just leave user a warning message instead
	 * of halt driver loading.
	 */
173
	if (if_version != smu->smc_driver_if_version) {
174
		dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
175
			"smu fw version = 0x%08x (%d.%d.%d)\n",
176
			smu->smc_driver_if_version, if_version,
177
			smu_version, smu_major, smu_minor, smu_debug);
178
		dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
179
	}
180 181 182 183

	return ret;
}

184
int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
185
{
186
	if (!smu->is_apu)
187 188 189
		return 0;

	if (gate)
190
		return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma, NULL);
191
	else
192
		return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL);
193 194
}

195
int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
196 197 198 199 200
{
	if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
		return 0;

	return smu_v12_0_send_msg_with_param(smu,
201 202 203
		SMU_MSG_SetGfxCGPG,
		enable ? 1 : 0,
		NULL);
204 205
}

206 207 208 209 210 211 212 213 214 215 216 217
/**
 * smu_v12_0_get_gfxoff_status - get gfxoff status
 *
 * @smu: amdgpu_device pointer
 *
 * This function will be used to get gfxoff status
 *
 * Returns 0=GFXOFF(default).
 * Returns 1=Transition out of GFX State.
 * Returns 2=Not in GFXOFF.
 * Returns 3=Transition into GFXOFF.
 */
218
uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
219 220
{
	uint32_t reg;
221
	uint32_t gfxOff_Status = 0;
222 223
	struct amdgpu_device *adev = smu->adev;

224 225 226
	reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL);
	gfxOff_Status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK)
		>> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT;
227

228
	return gfxOff_Status;
229 230
}

231
int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
232
{
233
	int ret = 0, timeout = 500;
234 235

	if (enable) {
236
		ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
237

238
	} else {
239
		ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
240

241 242
		/* confirm gfx is back to "on" state, timeout is 0.5 second */
		while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
243 244 245 246 247 248 249 250 251 252 253 254
			msleep(1);
			timeout--;
			if (timeout == 0) {
				DRM_ERROR("disable gfxoff timeout and failed!\n");
				break;
			}
		}
	}

	return ret;
}

255
int smu_v12_0_init_smc_tables(struct smu_context *smu)
256 257 258 259
{
	struct smu_table_context *smu_table = &smu->smu_table;
	struct smu_table *tables = NULL;

260
	if (smu_table->tables)
261 262 263 264 265 266 267 268 269 270 271 272
		return -EINVAL;

	tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
			 GFP_KERNEL);
	if (!tables)
		return -ENOMEM;

	smu_table->tables = tables;

	return smu_tables_init(smu, tables);
}

273
int smu_v12_0_fini_smc_tables(struct smu_context *smu)
274 275 276
{
	struct smu_table_context *smu_table = &smu->smu_table;

277
	if (!smu_table->tables)
278 279
		return -EINVAL;

280
	kfree(smu_table->clocks_table);
281
	kfree(smu_table->tables);
282 283

	smu_table->clocks_table = NULL;
284 285 286 287
	smu_table->tables = NULL;

	return 0;
}
288

289
int smu_v12_0_set_default_dpm_tables(struct smu_context *smu)
290 291 292 293 294 295
{
	struct smu_table_context *smu_table = &smu->smu_table;

	return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
}

296 297 298 299 300 301 302 303 304
int smu_v12_0_get_enabled_mask(struct smu_context *smu,
				      uint32_t *feature_mask, uint32_t num)
{
	uint32_t feature_mask_high = 0, feature_mask_low = 0;
	int ret = 0;

	if (!feature_mask || num < 2)
		return -EINVAL;

305
	ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
306 307 308
	if (ret)
		return ret;

309
	ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
310 311 312 313 314 315 316 317 318
	if (ret)
		return ret;

	feature_mask[0] = feature_mask_low;
	feature_mask[1] = feature_mask_high;

	return ret;
}

319
int smu_v12_0_mode2_reset(struct smu_context *smu){
320
	return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
321
}
322

323
int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
324 325 326 327 328 329 330
			    uint32_t min, uint32_t max)
{
	int ret = 0;

	switch (clk_type) {
	case SMU_GFXCLK:
	case SMU_SCLK:
331
		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min, NULL);
332 333 334
		if (ret)
			return ret;

335
		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max, NULL);
336 337 338 339 340
		if (ret)
			return ret;
	break;
	case SMU_FCLK:
	case SMU_MCLK:
341
		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
342 343 344
		if (ret)
			return ret;

345
		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max, NULL);
346 347 348 349
		if (ret)
			return ret;
	break;
	case SMU_SOCCLK:
350
		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min, NULL);
351 352 353
		if (ret)
			return ret;

354
		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max, NULL);
355 356 357 358
		if (ret)
			return ret;
	break;
	case SMU_VCLK:
359
		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min, NULL);
360 361 362
		if (ret)
			return ret;

363
		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max, NULL);
364 365 366 367 368 369 370 371 372
		if (ret)
			return ret;
	break;
	default:
		return -EINVAL;
	}

	return ret;
}
373 374 375 376 377 378 379 380 381

int smu_v12_0_set_driver_table_location(struct smu_context *smu)
{
	struct smu_table *driver_table = &smu->smu_table.driver_table;
	int ret = 0;

	if (driver_table->mc_address) {
		ret = smu_send_smc_msg_with_param(smu,
				SMU_MSG_SetDriverDramAddrHigh,
382 383
				upper_32_bits(driver_table->mc_address),
				NULL);
384 385 386
		if (!ret)
			ret = smu_send_smc_msg_with_param(smu,
				SMU_MSG_SetDriverDramAddrLow,
387 388
				lower_32_bits(driver_table->mc_address),
				NULL);
389 390 391 392
	}

	return ret;
}