nv.c 43.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright 2019 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
A
Alex Deucher 已提交
26 27
#include <linux/pci.h>

28 29
#include <drm/amdgpu_drm.h>

30 31 32 33 34 35 36 37 38 39 40 41
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
#include "amdgpu_ucode.h"
#include "amdgpu_psp.h"
#include "atom.h"
#include "amd_pcie.h"

#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
42
#include "mp/mp_11_0_offset.h"
43 44 45 46 47 48

#include "soc15.h"
#include "soc15_common.h"
#include "gmc_v10_0.h"
#include "gfxhub_v2_0.h"
#include "mmhub_v2_0.h"
49
#include "nbio_v2_3.h"
50
#include "nbio_v7_2.h"
51
#include "hdp_v5_0.h"
52 53 54 55
#include "nv.h"
#include "navi10_ih.h"
#include "gfx_v10_0.h"
#include "sdma_v5_0.h"
56
#include "sdma_v5_2.h"
57
#include "vcn_v2_0.h"
58
#include "jpeg_v2_0.h"
59
#include "vcn_v3_0.h"
60
#include "jpeg_v3_0.h"
61 62
#include "dce_virtual.h"
#include "mes_v10_1.h"
63
#include "mxgpu_nv.h"
64 65
#include "smuio_v11_0.h"
#include "smuio_v11_0_6.h"
66 67 68

static const struct amd_ip_funcs nv_common_ip_funcs;

69 70 71
/* Navi */
static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] =
{
72 73
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
74 75 76 77 78 79 80 81 82 83 84
};

static const struct amdgpu_video_codecs nv_video_codecs_encode =
{
	.codec_count = ARRAY_SIZE(nv_video_codecs_encode_array),
	.codec_array = nv_video_codecs_encode_array,
};

/* Navi1x */
static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] =
{
85 86 87 88 89 90 91
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
92 93 94 95 96 97 98 99 100 101 102
};

static const struct amdgpu_video_codecs nv_video_codecs_decode =
{
	.codec_count = ARRAY_SIZE(nv_video_codecs_decode_array),
	.codec_array = nv_video_codecs_decode_array,
};

/* Sienna Cichlid */
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
{
103 104 105 106 107 108 109 110
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
111 112 113 114 115 116 117 118
};

static const struct amdgpu_video_codecs sc_video_codecs_decode =
{
	.codec_count = ARRAY_SIZE(sc_video_codecs_decode_array),
	.codec_array = sc_video_codecs_decode_array,
};

119 120 121
/* SRIOV Sienna Cichlid, not const since data is controlled by host */
static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
{
122 123
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
124 125 126 127
};

static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] =
{
128 129 130 131 132 133 134 135
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
136 137 138 139 140 141 142 143 144 145 146 147 148 149
};

static struct amdgpu_video_codecs sriov_sc_video_codecs_encode =
{
	.codec_count = ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
	.codec_array = sriov_sc_video_codecs_encode_array,
};

static struct amdgpu_video_codecs sriov_sc_video_codecs_decode =
{
	.codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array),
	.codec_array = sriov_sc_video_codecs_decode_array,
};

150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
/* Beige Goby*/
static const struct amdgpu_video_codec_info bg_video_codecs_decode_array[] = {
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};

static const struct amdgpu_video_codecs bg_video_codecs_decode = {
	.codec_count = ARRAY_SIZE(bg_video_codecs_decode_array),
	.codec_array = bg_video_codecs_decode_array,
};

static const struct amdgpu_video_codecs bg_video_codecs_encode = {
	.codec_count = 0,
	.codec_array = NULL,
};

167 168 169 170 171 172 173 174 175
/* Yellow Carp*/
static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = {
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
};

static const struct amdgpu_video_codecs yc_video_codecs_decode = {
176 177
	.codec_count = ARRAY_SIZE(yc_video_codecs_decode_array),
	.codec_array = yc_video_codecs_decode_array,
178 179
};

180 181 182 183 184
static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
				 const struct amdgpu_video_codecs **codecs)
{
	switch (adev->asic_type) {
	case CHIP_SIENNA_CICHLID:
185 186 187 188 189 190 191 192 193 194 195 196
		if (amdgpu_sriov_vf(adev)) {
			if (encode)
				*codecs = &sriov_sc_video_codecs_encode;
			else
				*codecs = &sriov_sc_video_codecs_decode;
		} else {
			if (encode)
				*codecs = &nv_video_codecs_encode;
			else
				*codecs = &sc_video_codecs_decode;
		}
		return 0;
197 198 199 200 201 202 203 204
	case CHIP_NAVY_FLOUNDER:
	case CHIP_DIMGREY_CAVEFISH:
	case CHIP_VANGOGH:
		if (encode)
			*codecs = &nv_video_codecs_encode;
		else
			*codecs = &sc_video_codecs_decode;
		return 0;
205 206 207 208 209 210
	case CHIP_YELLOW_CARP:
		if (encode)
			*codecs = &nv_video_codecs_encode;
		else
			*codecs = &yc_video_codecs_decode;
		return 0;
211 212 213 214 215 216
	case CHIP_BEIGE_GOBY:
		if (encode)
			*codecs = &bg_video_codecs_encode;
		else
			*codecs = &bg_video_codecs_decode;
		return 0;
217 218 219 220 221 222 223 224 225 226 227 228 229
	case CHIP_NAVI10:
	case CHIP_NAVI14:
	case CHIP_NAVI12:
		if (encode)
			*codecs = &nv_video_codecs_encode;
		else
			*codecs = &nv_video_codecs_decode;
		return 0;
	default:
		return -EINVAL;
	}
}

230 231 232 233 234
/*
 * Indirect registers accessor
 */
static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
235
	unsigned long address, data;
236 237
	address = adev->nbio.funcs->get_pcie_index_offset(adev);
	data = adev->nbio.funcs->get_pcie_data_offset(adev);
238

239
	return amdgpu_device_indirect_rreg(adev, address, data, reg);
240 241 242 243
}

static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
244
	unsigned long address, data;
245

246 247
	address = adev->nbio.funcs->get_pcie_index_offset(adev);
	data = adev->nbio.funcs->get_pcie_data_offset(adev);
248

249
	amdgpu_device_indirect_wreg(adev, address, data, reg, v);
250 251
}

252 253
static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
{
254
	unsigned long address, data;
255 256 257
	address = adev->nbio.funcs->get_pcie_index_offset(adev);
	data = adev->nbio.funcs->get_pcie_data_offset(adev);

258
	return amdgpu_device_indirect_rreg64(adev, address, data, reg);
259 260
}

261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
static u32 nv_pcie_port_rreg(struct amdgpu_device *adev, u32 reg)
{
	unsigned long flags, address, data;
	u32 r;
	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);

	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
	WREG32(address, reg * 4);
	(void)RREG32(address);
	r = RREG32(data);
	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
	return r;
}

276 277
static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
{
278
	unsigned long address, data;
279 280 281 282

	address = adev->nbio.funcs->get_pcie_index_offset(adev);
	data = adev->nbio.funcs->get_pcie_data_offset(adev);

283
	amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
284 285
}

286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
static void nv_pcie_port_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
	unsigned long flags, address, data;

	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);

	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
	WREG32(address, reg * 4);
	(void)RREG32(address);
	WREG32(data, v);
	(void)RREG32(data);
	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}

301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
{
	unsigned long flags, address, data;
	u32 r;

	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);

	spin_lock_irqsave(&adev->didt_idx_lock, flags);
	WREG32(address, (reg));
	r = RREG32(data);
	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
	return r;
}

static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
	unsigned long flags, address, data;

	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);

	spin_lock_irqsave(&adev->didt_idx_lock, flags);
	WREG32(address, (reg));
	WREG32(data, (v));
	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
}

static u32 nv_get_config_memsize(struct amdgpu_device *adev)
{
331
	return adev->nbio.funcs->get_memsize(adev);
332 333 334 335
}

static u32 nv_get_xclk(struct amdgpu_device *adev)
{
336
	return adev->clock.spll.reference_freq;
337 338 339 340 341 342 343 344 345 346 347 348
}


void nv_grbm_select(struct amdgpu_device *adev,
		     u32 me, u32 pipe, u32 queue, u32 vmid)
{
	u32 grbm_gfx_cntl = 0;
	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);

349
	WREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
}

static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
{
	/* todo */
}

static bool nv_read_disabled_bios(struct amdgpu_device *adev)
{
	/* todo */
	return false;
}

static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
				  u8 *bios, u32 length_bytes)
{
366 367
	u32 *dw_ptr;
	u32 i, length_dw;
368
	u32 rom_index_offset, rom_data_offset;
369 370 371 372 373 374 375 376 377 378 379 380

	if (bios == NULL)
		return false;
	if (length_bytes == 0)
		return false;
	/* APU vbios image is part of sbios image */
	if (adev->flags & AMD_IS_APU)
		return false;

	dw_ptr = (u32 *)bios;
	length_dw = ALIGN(length_bytes, 4) / 4;

381 382 383 384 385
	rom_index_offset =
		adev->smuio.funcs->get_rom_index_offset(adev);
	rom_data_offset =
		adev->smuio.funcs->get_rom_data_offset(adev);

386
	/* set rom index to 0 */
387
	WREG32(rom_index_offset, 0);
388 389
	/* read out the rom data */
	for (i = 0; i < length_dw; i++)
390
		dw_ptr[i] = RREG32(rom_data_offset);
391 392

	return true;
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410
}

static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
	{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
	{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
	{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
411
	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
};

static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
					 u32 sh_num, u32 reg_offset)
{
	uint32_t val;

	mutex_lock(&adev->grbm_idx_mutex);
	if (se_num != 0xffffffff || sh_num != 0xffffffff)
		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);

	val = RREG32(reg_offset);

	if (se_num != 0xffffffff || sh_num != 0xffffffff)
		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
	mutex_unlock(&adev->grbm_idx_mutex);
	return val;
}

static uint32_t nv_get_register_value(struct amdgpu_device *adev,
				      bool indexed, u32 se_num,
				      u32 sh_num, u32 reg_offset)
{
	if (indexed) {
		return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
	} else {
		if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
			return adev->gfx.config.gb_addr_config;
		return RREG32(reg_offset);
	}
}

static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
			    u32 sh_num, u32 reg_offset, u32 *value)
{
	uint32_t i;
	struct soc15_allowed_register_entry  *en;

	*value = 0;
	for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
		en = &nv_allowed_read_registers[i];
456 457
		if ((i == 7 && (adev->sdma.num_instances == 1)) || /* some asics don't have SDMA1 */
		    reg_offset !=
458 459 460 461 462 463 464 465 466 467 468
		    (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset))
			continue;

		*value = nv_get_register_value(adev,
					       nv_allowed_read_registers[i].grbm_indexed,
					       se_num, sh_num, reg_offset);
		return 0;
	}
	return -EINVAL;
}

469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
static int nv_asic_mode2_reset(struct amdgpu_device *adev)
{
	u32 i;
	int ret = 0;

	amdgpu_atombios_scratch_regs_engine_hung(adev, true);

	/* disable BM */
	pci_clear_master(adev->pdev);

	amdgpu_device_cache_pci_state(adev->pdev);

	ret = amdgpu_dpm_mode2_reset(adev);
	if (ret)
		dev_err(adev->dev, "GPU mode2 reset failed\n");

	amdgpu_device_load_pci_state(adev->pdev);

	/* wait for asic to come out of reset */
	for (i = 0; i < adev->usec_timeout; i++) {
		u32 memsize = adev->nbio.funcs->get_memsize(adev);

		if (memsize != 0xffffffff)
			break;
		udelay(1);
	}

	amdgpu_atombios_scratch_regs_engine_hung(adev, false);

	return ret;
}

501 502 503
static enum amd_reset_method
nv_asic_reset_method(struct amdgpu_device *adev)
{
504
	if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
505
	    amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
506 507
	    amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
	    amdgpu_reset_method == AMD_RESET_METHOD_PCI)
508 509 510 511 512 513
		return amdgpu_reset_method;

	if (amdgpu_reset_method != -1)
		dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
				  amdgpu_reset_method);

514
	switch (adev->asic_type) {
515
	case CHIP_VANGOGH:
516
	case CHIP_YELLOW_CARP:
517
		return AMD_RESET_METHOD_MODE2;
518
	case CHIP_SIENNA_CICHLID:
519
	case CHIP_NAVY_FLOUNDER:
520
	case CHIP_DIMGREY_CAVEFISH:
521
	case CHIP_BEIGE_GOBY:
522
		return AMD_RESET_METHOD_MODE1;
523
	default:
524
		if (amdgpu_dpm_is_baco_supported(adev))
525 526 527 528
			return AMD_RESET_METHOD_BACO;
		else
			return AMD_RESET_METHOD_MODE1;
	}
529 530
}

531 532
static int nv_asic_reset(struct amdgpu_device *adev)
{
533
	int ret = 0;
534

535
	switch (nv_asic_reset_method(adev)) {
536 537 538 539
	case AMD_RESET_METHOD_PCI:
		dev_info(adev->dev, "PCI reset\n");
		ret = amdgpu_device_pci_reset(adev);
		break;
540
	case AMD_RESET_METHOD_BACO:
541
		dev_info(adev->dev, "BACO reset\n");
542
		ret = amdgpu_dpm_baco_reset(adev);
543 544 545
		break;
	case AMD_RESET_METHOD_MODE2:
		dev_info(adev->dev, "MODE2 reset\n");
546
		ret = nv_asic_mode2_reset(adev);
547 548
		break;
	default:
549
		dev_info(adev->dev, "MODE1 reset\n");
550
		ret = amdgpu_device_mode1_reset(adev);
551
		break;
552
	}
553 554

	return ret;
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585
}

static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
{
	/* todo */
	return 0;
}

static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
{
	/* todo */
	return 0;
}

static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
{
	if (pci_is_root_bus(adev->pdev->bus))
		return;

	if (amdgpu_pcie_gen2 == 0)
		return;

	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
		return;

	/* todo */
}

static void nv_program_aspm(struct amdgpu_device *adev)
{
586
	if (!amdgpu_aspm)
587 588
		return;

589
	if (!(adev->flags & AMD_IS_APU) &&
590 591 592
	    (adev->nbio.funcs->program_aspm))
		adev->nbio.funcs->program_aspm(adev);

593 594 595 596 597
}

static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
					bool enable)
{
598 599
	adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
600 601 602 603 604 605 606 607 608 609 610
}

static const struct amdgpu_ip_block_version nv_common_ip_block =
{
	.type = AMD_IP_BLOCK_TYPE_COMMON,
	.major = 1,
	.minor = 0,
	.rev = 0,
	.funcs = &nv_common_ip_funcs,
};

611 612 613 614 615 616 617 618 619 620
static bool nv_is_headless_sku(struct pci_dev *pdev)
{
	if ((pdev->device == 0x731E &&
	    (pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
	    (pdev->device == 0x7340 && pdev->revision == 0xC9)  ||
	    (pdev->device == 0x7360 && pdev->revision == 0xC7))
		return true;
	return false;
}

621
static int nv_reg_base_init(struct amdgpu_device *adev)
622
{
623 624 625 626 627 628 629 630 631 632
	int r;

	if (amdgpu_discovery) {
		r = amdgpu_discovery_reg_base_init(adev);
		if (r) {
			DRM_WARN("failed to init reg base from ip discovery table, "
					"fallback to legacy init method\n");
			goto legacy_init;
		}

633
		amdgpu_discovery_harvest_ip(adev);
634 635 636 637
		if (nv_is_headless_sku(adev->pdev)) {
			adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
			adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
		}
638

639 640 641 642
		return 0;
	}

legacy_init:
643 644 645 646
	switch (adev->asic_type) {
	case CHIP_NAVI10:
		navi10_reg_base_init(adev);
		break;
647 648 649
	case CHIP_NAVI14:
		navi14_reg_base_init(adev);
		break;
650 651 652
	case CHIP_NAVI12:
		navi12_reg_base_init(adev);
		break;
653
	case CHIP_SIENNA_CICHLID:
654
	case CHIP_NAVY_FLOUNDER:
655 656
		sienna_cichlid_reg_base_init(adev);
		break;
657 658 659
	case CHIP_VANGOGH:
		vangogh_reg_base_init(adev);
		break;
660 661 662
	case CHIP_DIMGREY_CAVEFISH:
		dimgrey_cavefish_reg_base_init(adev);
		break;
663 664 665
	case CHIP_BEIGE_GOBY:
		beige_goby_reg_base_init(adev);
		break;
666 667 668
	case CHIP_YELLOW_CARP:
		yellow_carp_reg_base_init(adev);
		break;
669 670 671
	case CHIP_CYAN_SKILLFISH:
		cyan_skillfish_reg_base_init(adev);
		break;
672 673 674 675
	default:
		return -EINVAL;
	}

676 677 678
	return 0;
}

679 680 681 682 683
void nv_set_virt_ops(struct amdgpu_device *adev)
{
	adev->virt.ops = &xgpu_nv_virt_ops;
}

684 685 686 687
int nv_set_ip_blocks(struct amdgpu_device *adev)
{
	int r;

688 689 690 691 692 693 694
	if (adev->flags & AMD_IS_APU) {
		adev->nbio.funcs = &nbio_v7_2_funcs;
		adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
	} else {
		adev->nbio.funcs = &nbio_v2_3_funcs;
		adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
	}
695
	adev->hdp.funcs = &hdp_v5_0_funcs;
696

697 698 699 700 701
	if (adev->asic_type >= CHIP_SIENNA_CICHLID)
		adev->smuio.funcs = &smuio_v11_0_6_funcs;
	else
		adev->smuio.funcs = &smuio_v11_0_funcs;

702 703 704
	if (adev->asic_type == CHIP_SIENNA_CICHLID)
		adev->gmc.xgmi.supported = true;

705 706 707 708
	/* Set IP register base before any HW register access */
	r = nv_reg_base_init(adev);
	if (r)
		return r;
709

710 711
	switch (adev->asic_type) {
	case CHIP_NAVI10:
712
	case CHIP_NAVI14:
713 714 715 716 717
		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
718
		    !amdgpu_sriov_vf(adev))
719 720 721
			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
722
#if defined(CONFIG_DRM_AMD_DC)
723
		else if (amdgpu_device_has_dc_support(adev))
724
			amdgpu_device_ip_block_add(adev, &dm_ip_block);
725
#endif
726 727 728
		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
729
		    !amdgpu_sriov_vf(adev))
730
			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
731
		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
732
		amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
733 734 735
		if (adev->enable_mes)
			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
		break;
736 737 738
	case CHIP_NAVI12:
		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
739 740 741 742 743 744 745
		if (!amdgpu_sriov_vf(adev)) {
			amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
		} else {
			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
			amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
		}
746
		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
747
			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
748 749
		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
750
#if defined(CONFIG_DRM_AMD_DC)
L
Leo Li 已提交
751 752
		else if (amdgpu_device_has_dc_support(adev))
			amdgpu_device_ip_block_add(adev, &dm_ip_block);
753
#endif
754 755
		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
756
		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
757
		    !amdgpu_sriov_vf(adev))
758
			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
759
		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
760 761
		if (!amdgpu_sriov_vf(adev))
			amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
762
		break;
763 764
	case CHIP_SIENNA_CICHLID:
		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
765
		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
766 767 768 769 770 771 772 773 774
		if (!amdgpu_sriov_vf(adev)) {
			amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
				amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
		} else {
			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
				amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
			amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
		}
775
		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
776
		    is_support_sw_smu(adev))
777
			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
778 779
		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
780 781 782 783
#if defined(CONFIG_DRM_AMD_DC)
		else if (amdgpu_device_has_dc_support(adev))
			amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
784
		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
785
		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
786
		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
787 788
		if (!amdgpu_sriov_vf(adev))
			amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
789 790
		if (adev->enable_mes)
			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
791
		break;
792 793
	case CHIP_NAVY_FLOUNDER:
		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
794
		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
795
		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
796 797 798 799 800
		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
		    is_support_sw_smu(adev))
			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
801 802
		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
803 804 805 806
#if defined(CONFIG_DRM_AMD_DC)
		else if (amdgpu_device_has_dc_support(adev))
			amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
807
		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
808
		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
809 810
		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
		amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
811 812 813
		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
		    is_support_sw_smu(adev))
			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
814
		break;
H
Huang Rui 已提交
815 816 817 818
	case CHIP_VANGOGH:
		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
819 820
		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
821
		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
H
Huang Rui 已提交
822 823
		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
824 825 826 827
#if defined(CONFIG_DRM_AMD_DC)
		else if (amdgpu_device_has_dc_support(adev))
			amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
H
Huang Rui 已提交
828 829
		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
830 831
		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
		amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
H
Huang Rui 已提交
832
		break;
833 834
	case CHIP_DIMGREY_CAVEFISH:
		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
835
		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
836
		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
837 838 839 840 841
		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
		    is_support_sw_smu(adev))
			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
842 843
		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
844 845 846 847
#if defined(CONFIG_DRM_AMD_DC)
                else if (amdgpu_device_has_dc_support(adev))
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
848
		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
849
		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
850
		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
851
		amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
852
		break;
853 854
	case CHIP_BEIGE_GOBY:
		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
855
		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
856
		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
857 858 859 860 861
		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
		    is_support_sw_smu(adev))
			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
862
		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
863
		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
864 865
		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
866 867 868 869
#if defined(CONFIG_DRM_AMD_DC)
		else if (amdgpu_device_has_dc_support(adev))
			amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
870 871 872
		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
		    is_support_sw_smu(adev))
			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
873
		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
874
		break;
875 876 877 878
	case CHIP_YELLOW_CARP:
		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
879 880
		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
			amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
881
		amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
882 883 884 885
		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
886 887 888 889 890 891
		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
		else if (amdgpu_device_has_dc_support(adev))
			amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
892 893
		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
		amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
894
		break;
895 896 897 898 899 900 901 902 903
	default:
		return -EINVAL;
	}

	return 0;
}

static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
{
904
	return adev->nbio.funcs->get_rev_id(adev);
905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
}

static bool nv_need_full_reset(struct amdgpu_device *adev)
{
	return true;
}

static bool nv_need_reset_on_init(struct amdgpu_device *adev)
{
	u32 sol_reg;

	if (adev->flags & AMD_IS_APU)
		return false;

	/* Check sOS sign of life register to confirm sys driver and sOS
	 * are already been loaded.
	 */
	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
	if (sol_reg)
		return true;
925

926 927 928
	return false;
}

929 930 931 932 933 934 935 936 937 938
static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
{

	/* TODO
	 * dummy implement for pcie_replay_count sysfs interface
	 * */

	return 0;
}

939 940 941 942 943 944 945 946 947 948 949 950 951 952 953
static void nv_init_doorbell_index(struct amdgpu_device *adev)
{
	adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
	adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
	adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
	adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
	adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
	adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
	adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
	adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
	adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
	adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
	adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
	adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
	adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
954
	adev->doorbell_index.mes_ring = AMDGPU_NAVI10_DOORBELL_MES_RING;
955 956
	adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
	adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
957 958
	adev->doorbell_index.sdma_engine[2] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE2;
	adev->doorbell_index.sdma_engine[3] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE3;
959 960 961 962 963 964 965 966 967 968 969 970
	adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
	adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
	adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
	adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
	adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
	adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;

	adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
	adev->doorbell_index.sdma_doorbell_range = 20;
}

971 972 973 974
static void nv_pre_asic_init(struct amdgpu_device *adev)
{
}

975 976 977 978 979 980 981 982 983 984 985
static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
				       bool enter)
{
	if (enter)
		amdgpu_gfx_rlc_enter_safe_mode(adev);
	else
		amdgpu_gfx_rlc_exit_safe_mode(adev);

	if (adev->gfx.funcs->update_perfmon_mgcg)
		adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);

986
	if (!(adev->flags & AMD_IS_APU) &&
987
	    (adev->nbio.funcs->enable_aspm))
988 989 990 991 992
		adev->nbio.funcs->enable_aspm(adev, !enter);

	return 0;
}

993 994 995 996 997 998
static const struct amdgpu_asic_funcs nv_asic_funcs =
{
	.read_disabled_bios = &nv_read_disabled_bios,
	.read_bios_from_rom = &nv_read_bios_from_rom,
	.read_register = &nv_read_register,
	.reset = &nv_asic_reset,
999
	.reset_method = &nv_asic_reset_method,
1000 1001 1002 1003 1004 1005 1006 1007
	.set_vga_state = &nv_vga_set_state,
	.get_xclk = &nv_get_xclk,
	.set_uvd_clocks = &nv_set_uvd_clocks,
	.set_vce_clocks = &nv_set_vce_clocks,
	.get_config_memsize = &nv_get_config_memsize,
	.init_doorbell_index = &nv_init_doorbell_index,
	.need_full_reset = &nv_need_full_reset,
	.need_reset_on_init = &nv_need_reset_on_init,
1008
	.get_pcie_replay_count = &nv_get_pcie_replay_count,
1009
	.supports_baco = &amdgpu_dpm_is_baco_supported,
1010
	.pre_asic_init = &nv_pre_asic_init,
1011
	.update_umd_stable_pstate = &nv_update_umd_stable_pstate,
1012
	.query_video_codecs = &nv_query_video_codecs,
1013 1014 1015 1016
};

static int nv_common_early_init(void *handle)
{
1017
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
1018 1019
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

1020 1021
	adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
	adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
1022 1023 1024 1025
	adev->smc_rreg = NULL;
	adev->smc_wreg = NULL;
	adev->pcie_rreg = &nv_pcie_rreg;
	adev->pcie_wreg = &nv_pcie_wreg;
1026 1027
	adev->pcie_rreg64 = &nv_pcie_rreg64;
	adev->pcie_wreg64 = &nv_pcie_wreg64;
1028 1029
	adev->pciep_rreg = &nv_pcie_port_rreg;
	adev->pciep_wreg = &nv_pcie_port_wreg;
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055

	/* TODO: will add them during VCN v2 implementation */
	adev->uvd_ctx_rreg = NULL;
	adev->uvd_ctx_wreg = NULL;

	adev->didt_rreg = &nv_didt_rreg;
	adev->didt_wreg = &nv_didt_wreg;

	adev->asic_funcs = &nv_asic_funcs;

	adev->rev_id = nv_get_rev_id(adev);
	adev->external_rev_id = 0xff;
	switch (adev->asic_type) {
	case CHIP_NAVI10:
		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
			AMD_CG_SUPPORT_GFX_CGCG |
			AMD_CG_SUPPORT_IH_CG |
			AMD_CG_SUPPORT_HDP_MGCG |
			AMD_CG_SUPPORT_HDP_LS |
			AMD_CG_SUPPORT_SDMA_MGCG |
			AMD_CG_SUPPORT_SDMA_LS |
			AMD_CG_SUPPORT_MC_MGCG |
			AMD_CG_SUPPORT_MC_LS |
			AMD_CG_SUPPORT_ATHUB_MGCG |
			AMD_CG_SUPPORT_ATHUB_LS |
			AMD_CG_SUPPORT_VCN_MGCG |
L
Leo Liu 已提交
1056
			AMD_CG_SUPPORT_JPEG_MGCG |
1057 1058
			AMD_CG_SUPPORT_BIF_MGCG |
			AMD_CG_SUPPORT_BIF_LS;
L
Leo Liu 已提交
1059
		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1060
			AMD_PG_SUPPORT_VCN_DPG |
L
Leo Liu 已提交
1061
			AMD_PG_SUPPORT_JPEG |
1062
			AMD_PG_SUPPORT_ATHUB;
1063 1064
		adev->external_rev_id = adev->rev_id + 0x1;
		break;
1065
	case CHIP_NAVI14:
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
			AMD_CG_SUPPORT_GFX_CGCG |
			AMD_CG_SUPPORT_IH_CG |
			AMD_CG_SUPPORT_HDP_MGCG |
			AMD_CG_SUPPORT_HDP_LS |
			AMD_CG_SUPPORT_SDMA_MGCG |
			AMD_CG_SUPPORT_SDMA_LS |
			AMD_CG_SUPPORT_MC_MGCG |
			AMD_CG_SUPPORT_MC_LS |
			AMD_CG_SUPPORT_ATHUB_MGCG |
			AMD_CG_SUPPORT_ATHUB_LS |
			AMD_CG_SUPPORT_VCN_MGCG |
L
Leo Liu 已提交
1078
			AMD_CG_SUPPORT_JPEG_MGCG |
1079 1080
			AMD_CG_SUPPORT_BIF_MGCG |
			AMD_CG_SUPPORT_BIF_LS;
1081
		adev->pg_flags = AMD_PG_SUPPORT_VCN |
L
Leo Liu 已提交
1082
			AMD_PG_SUPPORT_JPEG |
1083
			AMD_PG_SUPPORT_VCN_DPG;
1084
		adev->external_rev_id = adev->rev_id + 20;
1085
		break;
1086
	case CHIP_NAVI12:
1087 1088 1089 1090
		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
			AMD_CG_SUPPORT_GFX_MGLS |
			AMD_CG_SUPPORT_GFX_CGCG |
			AMD_CG_SUPPORT_GFX_CP_LS |
1091
			AMD_CG_SUPPORT_GFX_RLC_LS |
1092
			AMD_CG_SUPPORT_IH_CG |
1093
			AMD_CG_SUPPORT_HDP_MGCG |
1094 1095
			AMD_CG_SUPPORT_HDP_LS |
			AMD_CG_SUPPORT_SDMA_MGCG |
1096 1097
			AMD_CG_SUPPORT_SDMA_LS |
			AMD_CG_SUPPORT_MC_MGCG |
1098 1099
			AMD_CG_SUPPORT_MC_LS |
			AMD_CG_SUPPORT_ATHUB_MGCG |
1100
			AMD_CG_SUPPORT_ATHUB_LS |
L
Leo Liu 已提交
1101 1102
			AMD_CG_SUPPORT_VCN_MGCG |
			AMD_CG_SUPPORT_JPEG_MGCG;
1103
		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1104
			AMD_PG_SUPPORT_VCN_DPG |
L
Leo Liu 已提交
1105
			AMD_PG_SUPPORT_JPEG |
1106
			AMD_PG_SUPPORT_ATHUB;
1107 1108 1109 1110 1111 1112
		/* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
		 * as a consequence, the rev_id and external_rev_id are wrong.
		 * workaround it by hardcoding rev_id to 0 (default value).
		 */
		if (amdgpu_sriov_vf(adev))
			adev->rev_id = 0;
1113 1114
		adev->external_rev_id = adev->rev_id + 0xa;
		break;
1115
	case CHIP_SIENNA_CICHLID:
1116 1117
		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
			AMD_CG_SUPPORT_GFX_CGCG |
K
Kenneth Feng 已提交
1118
			AMD_CG_SUPPORT_GFX_CGLS |
1119
			AMD_CG_SUPPORT_GFX_3D_CGCG |
1120
			AMD_CG_SUPPORT_MC_MGCG |
1121
			AMD_CG_SUPPORT_VCN_MGCG |
1122 1123
			AMD_CG_SUPPORT_JPEG_MGCG |
			AMD_CG_SUPPORT_HDP_MGCG |
K
Kenneth Feng 已提交
1124
			AMD_CG_SUPPORT_HDP_LS |
1125 1126
			AMD_CG_SUPPORT_IH_CG |
			AMD_CG_SUPPORT_MC_LS;
1127
		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1128
			AMD_PG_SUPPORT_VCN_DPG |
1129
			AMD_PG_SUPPORT_JPEG |
1130 1131
			AMD_PG_SUPPORT_ATHUB |
			AMD_PG_SUPPORT_MMHUB;
1132 1133 1134 1135 1136
		if (amdgpu_sriov_vf(adev)) {
			/* hypervisor control CG and PG enablement */
			adev->cg_flags = 0;
			adev->pg_flags = 0;
		}
1137 1138
		adev->external_rev_id = adev->rev_id + 0x28;
		break;
1139
	case CHIP_NAVY_FLOUNDER:
1140 1141
		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
			AMD_CG_SUPPORT_GFX_CGCG |
K
Kenneth Feng 已提交
1142
			AMD_CG_SUPPORT_GFX_CGLS |
1143 1144
			AMD_CG_SUPPORT_GFX_3D_CGCG |
			AMD_CG_SUPPORT_VCN_MGCG |
1145 1146
			AMD_CG_SUPPORT_JPEG_MGCG |
			AMD_CG_SUPPORT_MC_MGCG |
1147 1148
			AMD_CG_SUPPORT_MC_LS |
			AMD_CG_SUPPORT_HDP_MGCG |
1149 1150
			AMD_CG_SUPPORT_HDP_LS |
			AMD_CG_SUPPORT_IH_CG;
1151
		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1152
			AMD_PG_SUPPORT_VCN_DPG |
1153 1154 1155
			AMD_PG_SUPPORT_JPEG |
			AMD_PG_SUPPORT_ATHUB |
			AMD_PG_SUPPORT_MMHUB;
1156 1157 1158
		adev->external_rev_id = adev->rev_id + 0x32;
		break;

1159
	case CHIP_VANGOGH:
1160 1161 1162 1163 1164
		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
			AMD_CG_SUPPORT_GFX_MGLS |
			AMD_CG_SUPPORT_GFX_CP_LS |
			AMD_CG_SUPPORT_GFX_RLC_LS |
			AMD_CG_SUPPORT_GFX_CGCG |
1165 1166
			AMD_CG_SUPPORT_GFX_CGLS |
			AMD_CG_SUPPORT_GFX_3D_CGCG |
1167
			AMD_CG_SUPPORT_GFX_3D_CGLS |
1168 1169
			AMD_CG_SUPPORT_MC_MGCG |
			AMD_CG_SUPPORT_MC_LS |
J
Jinzhou.Su 已提交
1170
			AMD_CG_SUPPORT_GFX_FGCG |
1171
			AMD_CG_SUPPORT_VCN_MGCG |
1172
			AMD_CG_SUPPORT_SDMA_MGCG |
1173
			AMD_CG_SUPPORT_SDMA_LS |
1174 1175 1176 1177 1178
			AMD_CG_SUPPORT_JPEG_MGCG;
		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
			AMD_PG_SUPPORT_VCN |
			AMD_PG_SUPPORT_VCN_DPG |
			AMD_PG_SUPPORT_JPEG;
H
Huang Rui 已提交
1179 1180
		if (adev->apu_flags & AMD_APU_IS_VANGOGH)
			adev->external_rev_id = adev->rev_id + 0x01;
1181
		break;
1182
	case CHIP_DIMGREY_CAVEFISH:
1183 1184
		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
			AMD_CG_SUPPORT_GFX_CGCG |
K
Kenneth Feng 已提交
1185
			AMD_CG_SUPPORT_GFX_CGLS |
1186 1187
			AMD_CG_SUPPORT_GFX_3D_CGCG |
			AMD_CG_SUPPORT_VCN_MGCG |
1188 1189
			AMD_CG_SUPPORT_JPEG_MGCG |
			AMD_CG_SUPPORT_MC_MGCG |
1190 1191
			AMD_CG_SUPPORT_MC_LS |
			AMD_CG_SUPPORT_HDP_MGCG |
1192 1193
			AMD_CG_SUPPORT_HDP_LS |
			AMD_CG_SUPPORT_IH_CG;
1194
		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1195
			AMD_PG_SUPPORT_VCN_DPG |
1196 1197 1198
			AMD_PG_SUPPORT_JPEG |
			AMD_PG_SUPPORT_ATHUB |
			AMD_PG_SUPPORT_MMHUB;
1199 1200
		adev->external_rev_id = adev->rev_id + 0x3c;
		break;
1201
	case CHIP_BEIGE_GOBY:
1202 1203
		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
			AMD_CG_SUPPORT_GFX_CGCG |
T
Tao Zhou 已提交
1204
			AMD_CG_SUPPORT_GFX_CGLS |
1205 1206
			AMD_CG_SUPPORT_GFX_3D_CGCG |
			AMD_CG_SUPPORT_MC_MGCG |
1207 1208
			AMD_CG_SUPPORT_MC_LS |
			AMD_CG_SUPPORT_HDP_MGCG |
1209
			AMD_CG_SUPPORT_HDP_LS |
1210 1211
			AMD_CG_SUPPORT_IH_CG |
			AMD_CG_SUPPORT_VCN_MGCG;
1212
		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1213 1214 1215
			AMD_PG_SUPPORT_VCN_DPG |
			AMD_PG_SUPPORT_ATHUB |
			AMD_PG_SUPPORT_MMHUB;
1216 1217
		adev->external_rev_id = adev->rev_id + 0x46;
		break;
1218
	case CHIP_YELLOW_CARP:
1219 1220 1221 1222 1223 1224 1225 1226
		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
			AMD_CG_SUPPORT_GFX_MGLS |
			AMD_CG_SUPPORT_GFX_CGCG |
			AMD_CG_SUPPORT_GFX_CGLS |
			AMD_CG_SUPPORT_GFX_3D_CGCG |
			AMD_CG_SUPPORT_GFX_3D_CGLS |
			AMD_CG_SUPPORT_GFX_RLC_LS |
			AMD_CG_SUPPORT_GFX_CP_LS |
1227 1228
			AMD_CG_SUPPORT_GFX_FGCG |
			AMD_CG_SUPPORT_MC_MGCG |
1229
			AMD_CG_SUPPORT_MC_LS |
1230 1231
			AMD_CG_SUPPORT_SDMA_LS |
			AMD_CG_SUPPORT_HDP_MGCG |
1232 1233
			AMD_CG_SUPPORT_HDP_LS |
			AMD_CG_SUPPORT_ATHUB_MGCG |
1234
			AMD_CG_SUPPORT_ATHUB_LS |
1235 1236 1237
			AMD_CG_SUPPORT_IH_CG |
			AMD_CG_SUPPORT_VCN_MGCG |
			AMD_CG_SUPPORT_JPEG_MGCG;
1238
		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1239 1240 1241
			AMD_PG_SUPPORT_VCN |
			AMD_PG_SUPPORT_VCN_DPG |
			AMD_PG_SUPPORT_JPEG;
1242 1243 1244 1245
		if (adev->pdev->device == 0x1681)
			adev->external_rev_id = adev->rev_id + 0x19;
		else
			adev->external_rev_id = adev->rev_id + 0x01;
1246
		break;
1247 1248 1249 1250 1251
	default:
		/* FIXME: not supported yet */
		return -EINVAL;
	}

1252 1253 1254 1255 1256
	if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
		adev->pg_flags &= ~(AMD_PG_SUPPORT_VCN |
				    AMD_PG_SUPPORT_VCN_DPG |
				    AMD_PG_SUPPORT_JPEG);

1257 1258 1259 1260 1261
	if (amdgpu_sriov_vf(adev)) {
		amdgpu_virt_init_setting(adev);
		xgpu_nv_mailbox_set_irq_funcs(adev);
	}

1262 1263 1264 1265 1266
	return 0;
}

static int nv_common_late_init(void *handle)
{
1267 1268
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

1269
	if (amdgpu_sriov_vf(adev)) {
1270
		xgpu_nv_mailbox_get_irq(adev);
1271 1272 1273 1274
		amdgpu_virt_update_sriov_video_codec(adev,
				sriov_sc_video_codecs_encode_array, ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
				sriov_sc_video_codecs_decode_array, ARRAY_SIZE(sriov_sc_video_codecs_decode_array));
	}
1275

1276 1277 1278 1279 1280
	return 0;
}

static int nv_common_sw_init(void *handle)
{
1281 1282 1283 1284 1285
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	if (amdgpu_sriov_vf(adev))
		xgpu_nv_mailbox_add_irq_id(adev);

1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
	return 0;
}

static int nv_common_sw_fini(void *handle)
{
	return 0;
}

static int nv_common_hw_init(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

1298 1299 1300
	if (adev->nbio.funcs->apply_lc_spc_mode_wa)
		adev->nbio.funcs->apply_lc_spc_mode_wa(adev);

1301 1302 1303
	if (adev->nbio.funcs->apply_l1_link_width_reconfig_wa)
		adev->nbio.funcs->apply_l1_link_width_reconfig_wa(adev);

1304 1305 1306 1307 1308
	/* enable pcie gen2/3 link */
	nv_pcie_gen3_enable(adev);
	/* enable aspm */
	nv_program_aspm(adev);
	/* setup nbio registers */
1309
	adev->nbio.funcs->init_registers(adev);
1310 1311 1312 1313 1314 1315
	/* remap HDP registers to a hole in mmio space,
	 * for the purpose of expose those registers
	 * to process space
	 */
	if (adev->nbio.funcs->remap_hdp_registers)
		adev->nbio.funcs->remap_hdp_registers(adev);
1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370
	/* enable the doorbell aperture */
	nv_enable_doorbell_aperture(adev, true);

	return 0;
}

static int nv_common_hw_fini(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	/* disable the doorbell aperture */
	nv_enable_doorbell_aperture(adev, false);

	return 0;
}

static int nv_common_suspend(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	return nv_common_hw_fini(adev);
}

static int nv_common_resume(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	return nv_common_hw_init(adev);
}

static bool nv_common_is_idle(void *handle)
{
	return true;
}

static int nv_common_wait_for_idle(void *handle)
{
	return 0;
}

static int nv_common_soft_reset(void *handle)
{
	return 0;
}

static int nv_common_set_clockgating_state(void *handle,
					   enum amd_clockgating_state state)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	if (amdgpu_sriov_vf(adev))
		return 0;

	switch (adev->asic_type) {
	case CHIP_NAVI10:
1371
	case CHIP_NAVI14:
1372
	case CHIP_NAVI12:
1373
	case CHIP_SIENNA_CICHLID:
1374
	case CHIP_NAVY_FLOUNDER:
1375
	case CHIP_DIMGREY_CAVEFISH:
1376
	case CHIP_BEIGE_GOBY:
1377
		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1378
				state == AMD_CG_STATE_GATE);
1379
		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1380
				state == AMD_CG_STATE_GATE);
1381
		adev->hdp.funcs->update_clock_gating(adev,
1382
				state == AMD_CG_STATE_GATE);
1383 1384
		adev->smuio.funcs->update_rom_clock_gating(adev,
				state == AMD_CG_STATE_GATE);
1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405
		break;
	default:
		break;
	}
	return 0;
}

static int nv_common_set_powergating_state(void *handle,
					   enum amd_powergating_state state)
{
	/* TODO */
	return 0;
}

static void nv_common_get_clockgating_state(void *handle, u32 *flags)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	if (amdgpu_sriov_vf(adev))
		*flags = 0;

1406
	adev->nbio.funcs->get_clockgating_state(adev, flags);
1407

1408
	adev->hdp.funcs->get_clock_gating_state(adev, flags);
1409

1410 1411
	adev->smuio.funcs->get_clock_gating_state(adev, flags);

1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
	return;
}

static const struct amd_ip_funcs nv_common_ip_funcs = {
	.name = "nv_common",
	.early_init = nv_common_early_init,
	.late_init = nv_common_late_init,
	.sw_init = nv_common_sw_init,
	.sw_fini = nv_common_sw_fini,
	.hw_init = nv_common_hw_init,
	.hw_fini = nv_common_hw_fini,
	.suspend = nv_common_suspend,
	.resume = nv_common_resume,
	.is_idle = nv_common_is_idle,
	.wait_for_idle = nv_common_wait_for_idle,
	.soft_reset = nv_common_soft_reset,
	.set_clockgating_state = nv_common_set_clockgating_state,
	.set_powergating_state = nv_common_set_powergating_state,
	.get_clockgating_state = nv_common_get_clockgating_state,
};