soc15.c 48.1 KB
Newer Older
K
Ken Wang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright 2016 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
26 27
#include <linux/pci.h>

28 29
#include <drm/amdgpu_drm.h>

K
Ken Wang 已提交
30
#include "amdgpu.h"
31
#include "amdgpu_atombios.h"
K
Ken Wang 已提交
32 33 34 35 36 37 38 39
#include "amdgpu_ih.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
#include "amdgpu_ucode.h"
#include "amdgpu_psp.h"
#include "atom.h"
#include "amd_pcie.h"

40
#include "uvd/uvd_7_0_offset.h"
41 42
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
43 44
#include "sdma0/sdma0_4_0_offset.h"
#include "sdma1/sdma1_4_0_offset.h"
45
#include "nbio/nbio_7_0_default.h"
46
#include "nbio/nbio_7_0_offset.h"
47 48
#include "nbio/nbio_7_0_sh_mask.h"
#include "nbio/nbio_7_0_smn.h"
49
#include "mp/mp_9_0_offset.h"
K
Ken Wang 已提交
50 51 52 53 54 55 56

#include "soc15.h"
#include "soc15_common.h"
#include "gfx_v9_0.h"
#include "gmc_v9_0.h"
#include "gfxhub_v1_0.h"
#include "mmhub_v1_0.h"
57
#include "df_v1_7.h"
58
#include "df_v3_6.h"
59 60 61
#include "nbio_v6_1.h"
#include "nbio_v7_0.h"
#include "nbio_v7_4.h"
62
#include "hdp_v4_0.h"
K
Ken Wang 已提交
63
#include "vega10_ih.h"
64
#include "vega20_ih.h"
65
#include "navi10_ih.h"
K
Ken Wang 已提交
66 67 68
#include "sdma_v4_0.h"
#include "uvd_v7_0.h"
#include "vce_v4_0.h"
L
Leo Liu 已提交
69
#include "vcn_v1_0.h"
70
#include "vcn_v2_0.h"
71
#include "jpeg_v2_0.h"
L
Leo Liu 已提交
72
#include "vcn_v2_5.h"
73
#include "jpeg_v2_5.h"
74 75
#include "smuio_v9_0.h"
#include "smuio_v11_0.h"
76
#include "smuio_v13_0.h"
77
#include "dce_virtual.h"
78
#include "mxgpu_ai.h"
79 80
#include "amdgpu_ras.h"
#include "amdgpu_xgmi.h"
81
#include <uapi/linux/kfd_ioctl.h>
K
Ken Wang 已提交
82 83 84 85 86 87

#define mmMP0_MISC_CGTT_CTRL0                                                                   0x01b9
#define mmMP0_MISC_CGTT_CTRL0_BASE_IDX                                                          0
#define mmMP0_MISC_LIGHT_SLEEP_CTRL                                                             0x01ba
#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX                                                    0

88 89 90
/* Vega, Raven, Arcturus */
static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
{
91 92
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
93 94 95 96 97 98 99 100 101 102 103
};

static const struct amdgpu_video_codecs vega_video_codecs_encode =
{
	.codec_count = ARRAY_SIZE(vega_video_codecs_encode_array),
	.codec_array = vega_video_codecs_encode_array,
};

/* Vega */
static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
{
104 105 106 107 108 109
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
110 111 112 113 114 115 116 117 118 119 120
};

static const struct amdgpu_video_codecs vega_video_codecs_decode =
{
	.codec_count = ARRAY_SIZE(vega_video_codecs_decode_array),
	.codec_array = vega_video_codecs_decode_array,
};

/* Raven */
static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
{
121 122 123 124 125 126 127
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
128 129 130 131 132 133 134 135 136 137 138
};

static const struct amdgpu_video_codecs rv_video_codecs_decode =
{
	.codec_count = ARRAY_SIZE(rv_video_codecs_decode_array),
	.codec_array = rv_video_codecs_decode_array,
};

/* Renoir, Arcturus */
static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
{
139 140 141 142 143 144 145
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
};

static const struct amdgpu_video_codecs rn_video_codecs_decode =
{
	.codec_count = ARRAY_SIZE(rn_video_codecs_decode_array),
	.codec_array = rn_video_codecs_decode_array,
};

static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
				    const struct amdgpu_video_codecs **codecs)
{
	switch (adev->asic_type) {
	case CHIP_VEGA20:
	case CHIP_VEGA10:
	case CHIP_VEGA12:
		if (encode)
			*codecs = &vega_video_codecs_encode;
		else
			*codecs = &vega_video_codecs_decode;
		return 0;
	case CHIP_RAVEN:
		if (encode)
			*codecs = &vega_video_codecs_encode;
		else
			*codecs = &rv_video_codecs_decode;
		return 0;
	case CHIP_ARCTURUS:
173
	case CHIP_ALDEBARAN:
174 175 176 177 178 179 180 181 182 183 184
	case CHIP_RENOIR:
		if (encode)
			*codecs = &vega_video_codecs_encode;
		else
			*codecs = &rn_video_codecs_decode;
		return 0;
	default:
		return -EINVAL;
	}
}

K
Ken Wang 已提交
185 186 187 188 189
/*
 * Indirect registers accessor
 */
static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
190
	unsigned long address, data;
191 192
	address = adev->nbio.funcs->get_pcie_index_offset(adev);
	data = adev->nbio.funcs->get_pcie_data_offset(adev);
K
Ken Wang 已提交
193

194
	return amdgpu_device_indirect_rreg(adev, address, data, reg);
K
Ken Wang 已提交
195 196 197 198
}

static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
199
	unsigned long address, data;
K
Ken Wang 已提交
200

201 202
	address = adev->nbio.funcs->get_pcie_index_offset(adev);
	data = adev->nbio.funcs->get_pcie_data_offset(adev);
K
Ken Wang 已提交
203

204
	amdgpu_device_indirect_wreg(adev, address, data, reg, v);
K
Ken Wang 已提交
205 206
}

207 208
static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
{
209
	unsigned long address, data;
210 211
	address = adev->nbio.funcs->get_pcie_index_offset(adev);
	data = adev->nbio.funcs->get_pcie_data_offset(adev);
212

213
	return amdgpu_device_indirect_rreg64(adev, address, data, reg);
214 215 216 217
}

static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
{
218
	unsigned long address, data;
219

220 221
	address = adev->nbio.funcs->get_pcie_index_offset(adev);
	data = adev->nbio.funcs->get_pcie_data_offset(adev);
222

223
	amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
224 225
}

K
Ken Wang 已提交
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
{
	unsigned long flags, address, data;
	u32 r;

	address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
	data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);

	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
	WREG32(address, ((reg) & 0x1ff));
	r = RREG32(data);
	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
	return r;
}

static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
	unsigned long flags, address, data;

	address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
	data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);

	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
	WREG32(address, ((reg) & 0x1ff));
	WREG32(data, (v));
	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
}

static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
{
	unsigned long flags, address, data;
	u32 r;

	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);

	spin_lock_irqsave(&adev->didt_idx_lock, flags);
	WREG32(address, (reg));
	r = RREG32(data);
	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
	return r;
}

static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
	unsigned long flags, address, data;

	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);

	spin_lock_irqsave(&adev->didt_idx_lock, flags);
	WREG32(address, (reg));
	WREG32(data, (v));
	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
}

282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
{
	unsigned long flags;
	u32 r;

	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
	WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
	r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
	return r;
}

static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
	unsigned long flags;

	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
	WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
	WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
}

304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
{
	unsigned long flags;
	u32 r;

	spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
	WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
	r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
	spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
	return r;
}

static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
	unsigned long flags;

	spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
	WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
	WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
	spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
}

K
Ken Wang 已提交
326 327
static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
{
328
	return adev->nbio.funcs->get_memsize(adev);
K
Ken Wang 已提交
329 330 331 332
}

static u32 soc15_get_xclk(struct amdgpu_device *adev)
{
333 334
	u32 reference_clock = adev->clock.spll.reference_freq;

335 336
	if (adev->asic_type == CHIP_RENOIR)
		return 10000;
337 338 339 340
	if (adev->asic_type == CHIP_RAVEN)
		return reference_clock / 4;

	return reference_clock;
K
Ken Wang 已提交
341 342 343 344 345 346 347 348 349 350 351 352
}


void soc15_grbm_select(struct amdgpu_device *adev,
		     u32 me, u32 pipe, u32 queue, u32 vmid)
{
	u32 grbm_gfx_cntl = 0;
	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);

353
	WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
K
Ken Wang 已提交
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
}

static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
{
	/* todo */
}

static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
{
	/* todo */
	return false;
}

static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
				     u8 *bios, u32 length_bytes)
{
	u32 *dw_ptr;
	u32 i, length_dw;
372 373
	uint32_t rom_index_offset;
	uint32_t rom_data_offset;
K
Ken Wang 已提交
374 375 376 377 378 379 380 381 382 383 384 385

	if (bios == NULL)
		return false;
	if (length_bytes == 0)
		return false;
	/* APU vbios image is part of sbios image */
	if (adev->flags & AMD_IS_APU)
		return false;

	dw_ptr = (u32 *)bios;
	length_dw = ALIGN(length_bytes, 4) / 4;

386 387 388 389
	rom_index_offset =
		adev->smuio.funcs->get_rom_index_offset(adev);
	rom_data_offset =
		adev->smuio.funcs->get_rom_data_offset(adev);
390

K
Ken Wang 已提交
391
	/* set rom index to 0 */
392
	WREG32(rom_index_offset, 0);
K
Ken Wang 已提交
393 394
	/* read out the rom data */
	for (i = 0; i < length_dw; i++)
395
		dw_ptr[i] = RREG32(rom_data_offset);
K
Ken Wang 已提交
396 397 398 399

	return true;
}

400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
	{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
	{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
	{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
416
	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
417 418 419
	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
420
	{ SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
K
Ken Wang 已提交
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
};

static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
					 u32 sh_num, u32 reg_offset)
{
	uint32_t val;

	mutex_lock(&adev->grbm_idx_mutex);
	if (se_num != 0xffffffff || sh_num != 0xffffffff)
		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);

	val = RREG32(reg_offset);

	if (se_num != 0xffffffff || sh_num != 0xffffffff)
		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
	mutex_unlock(&adev->grbm_idx_mutex);
	return val;
}

440 441 442 443 444 445 446
static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
					 bool indexed, u32 se_num,
					 u32 sh_num, u32 reg_offset)
{
	if (indexed) {
		return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
	} else {
447
		if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
448
			return adev->gfx.config.gb_addr_config;
449 450
		else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
			return adev->gfx.config.db_debug2;
451
		return RREG32(reg_offset);
452 453 454
	}
}

K
Ken Wang 已提交
455 456 457
static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
			    u32 sh_num, u32 reg_offset, u32 *value)
{
458
	uint32_t i;
459
	struct soc15_allowed_register_entry  *en;
K
Ken Wang 已提交
460 461 462

	*value = 0;
	for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
463
		en = &soc15_allowed_read_registers[i];
464 465
		if (adev->reg_offset[en->hwip][en->inst] &&
			reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
466
					+ en->reg_offset))
K
Ken Wang 已提交
467 468
			continue;

469 470 471
		*value = soc15_get_register_value(adev,
						  soc15_allowed_read_registers[i].grbm_indexed,
						  se_num, sh_num, reg_offset);
K
Ken Wang 已提交
472 473 474 475 476
		return 0;
	}
	return -EINVAL;
}

477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503

/**
 * soc15_program_register_sequence - program an array of registers.
 *
 * @adev: amdgpu_device pointer
 * @regs: pointer to the register array
 * @array_size: size of the register array
 *
 * Programs an array or registers with and and or masks.
 * This is a helper for setting golden registers.
 */

void soc15_program_register_sequence(struct amdgpu_device *adev,
					     const struct soc15_reg_golden *regs,
					     const u32 array_size)
{
	const struct soc15_reg_golden *entry;
	u32 tmp, reg;
	int i;

	for (i = 0; i < array_size; ++i) {
		entry = &regs[i];
		reg =  adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;

		if (entry->and_mask == 0xffffffff) {
			tmp = entry->or_mask;
		} else {
504 505 506
			tmp = (entry->hwip == GC_HWIP) ?
				RREG32_SOC15_IP(GC, reg) : RREG32(reg);

507
			tmp &= ~(entry->and_mask);
508
			tmp |= (entry->or_mask & entry->and_mask);
509
		}
510 511 512 513 514 515 516

		if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) ||
			reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) ||
			reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) ||
			reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
			WREG32_RLC(reg, tmp);
		else
517 518
			(entry->hwip == GC_HWIP) ?
				WREG32_SOC15_IP(GC, reg, tmp) : WREG32(reg, tmp);
519

520 521 522 523
	}

}

524 525
static int soc15_asic_baco_reset(struct amdgpu_device *adev)
{
526
	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
527
	int ret = 0;
528

529
	/* avoid NBIF got stuck when do RAS recovery in BACO reset */
530
	if (ras && adev->ras_enabled)
531 532
		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);

533 534 535
	ret = amdgpu_dpm_baco_reset(adev);
	if (ret)
		return ret;
536

537
	/* re-enable doorbell interrupt after BACO exit */
538
	if (ras && adev->ras_enabled)
539 540
		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);

541 542 543
	return 0;
}

544 545
static enum amd_reset_method
soc15_asic_reset_method(struct amdgpu_device *adev)
546
{
547
	bool baco_reset = false;
548
	bool connected_to_cpu = false;
549
	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
550

551 552 553
        if (adev->gmc.xgmi.supported && adev->gmc.xgmi.connected_to_cpu)
                connected_to_cpu = true;

554 555
	if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
	    amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
556
	    amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
557 558 559 560 561 562
	    amdgpu_reset_method == AMD_RESET_METHOD_PCI) {
		/* If connected to cpu, driver only support mode2 */
                if (connected_to_cpu)
                        return AMD_RESET_METHOD_MODE2;
                return amdgpu_reset_method;
        }
563 564 565 566 567

	if (amdgpu_reset_method != -1)
		dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
				  amdgpu_reset_method);

568
	switch (adev->asic_type) {
569
	case CHIP_RAVEN:
570
	case CHIP_RENOIR:
571
		return AMD_RESET_METHOD_MODE2;
572
	case CHIP_VEGA10:
573
	case CHIP_VEGA12:
574
	case CHIP_ARCTURUS:
575
		baco_reset = amdgpu_dpm_is_baco_supported(adev);
576
		break;
577
	case CHIP_VEGA20:
578
		if (adev->psp.sos.fw_version >= 0x80067)
579
			baco_reset = amdgpu_dpm_is_baco_supported(adev);
580

581 582 583 584
		/*
		 * 1. PMFW version > 0x284300: all cases use baco
		 * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
		 */
585
		if (ras && adev->ras_enabled &&
586
		    adev->pm.fw_version <= 0x283400)
587
			baco_reset = false;
588
		break;
589 590 591 592 593 594 595 596
	case CHIP_ALDEBARAN:
		 /*
		 * 1.connected to cpu: driver issue mode2 reset
		 * 2.discret gpu: driver issue mode1 reset
		 */
		if (connected_to_cpu)
			return AMD_RESET_METHOD_MODE2;
		break;
597 598 599 600 601
	default:
		break;
	}

	if (baco_reset)
602 603 604 605 606 607 608
		return AMD_RESET_METHOD_BACO;
	else
		return AMD_RESET_METHOD_MODE1;
}

static int soc15_asic_reset(struct amdgpu_device *adev)
{
609
	/* original raven doesn't have full asic reset */
A
Alex Deucher 已提交
610 611
	if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
	    !(adev->apu_flags & AMD_APU_IS_RAVEN2))
612 613
		return 0;

614
	switch (soc15_asic_reset_method(adev)) {
615 616 617 618 619 620 621 622 623 624 625
	case AMD_RESET_METHOD_PCI:
		dev_info(adev->dev, "PCI reset\n");
		return amdgpu_device_pci_reset(adev);
	case AMD_RESET_METHOD_BACO:
		dev_info(adev->dev, "BACO reset\n");
		return soc15_asic_baco_reset(adev);
	case AMD_RESET_METHOD_MODE2:
		dev_info(adev->dev, "MODE2 reset\n");
		return amdgpu_dpm_mode2_reset(adev);
	default:
		dev_info(adev->dev, "MODE1 reset\n");
626
		return amdgpu_device_mode1_reset(adev);
627
	}
628 629
}

630 631 632 633 634
static bool soc15_supports_baco(struct amdgpu_device *adev)
{
	switch (adev->asic_type) {
	case CHIP_VEGA10:
	case CHIP_VEGA12:
635
	case CHIP_ARCTURUS:
636
		return amdgpu_dpm_is_baco_supported(adev);
637
	case CHIP_VEGA20:
638
		if (adev->psp.sos.fw_version >= 0x80067)
639 640
			return amdgpu_dpm_is_baco_supported(adev);
		return false;
641 642 643 644 645
	default:
		return false;
	}
}

K
Ken Wang 已提交
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691
/*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
			u32 cntl_reg, u32 status_reg)
{
	return 0;
}*/

static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
{
	/*int r;

	r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
	if (r)
		return r;

	r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
	*/
	return 0;
}

static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
{
	/* todo */

	return 0;
}

static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
{
	if (pci_is_root_bus(adev->pdev->bus))
		return;

	if (amdgpu_pcie_gen2 == 0)
		return;

	if (adev->flags & AMD_IS_APU)
		return;

	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
		return;

	/* todo */
}

static void soc15_program_aspm(struct amdgpu_device *adev)
{
692
	if (!amdgpu_aspm)
K
Ken Wang 已提交
693 694
		return;

695 696 697
	if (!(adev->flags & AMD_IS_APU) &&
	    (adev->nbio.funcs->program_aspm))
		adev->nbio.funcs->program_aspm(adev);
K
Ken Wang 已提交
698 699 700
}

static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
701
					   bool enable)
K
Ken Wang 已提交
702
{
703 704
	adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
K
Ken Wang 已提交
705 706 707 708 709 710 711 712 713 714 715
}

static const struct amdgpu_ip_block_version vega10_common_ip_block =
{
	.type = AMD_IP_BLOCK_TYPE_COMMON,
	.major = 2,
	.minor = 0,
	.rev = 0,
	.funcs = &soc15_common_ip_funcs,
};

716 717
static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
{
718
	return adev->nbio.funcs->get_rev_id(adev);
719 720
}

721
static void soc15_reg_base_init(struct amdgpu_device *adev)
K
Ken Wang 已提交
722
{
723 724
	int r;

725 726 727
	/* Set IP register base before any HW register access */
	switch (adev->asic_type) {
	case CHIP_VEGA10:
728
	case CHIP_VEGA12:
729 730 731
	case CHIP_RAVEN:
		vega10_reg_base_init(adev);
		break;
732
	case CHIP_RENOIR:
733 734
		/* It's safe to do ip discovery here for Renior,
		 * it doesn't support SRIOV. */
735 736
		if (amdgpu_discovery) {
			r = amdgpu_discovery_reg_base_init(adev);
737 738 739 740
			if (r == 0)
				break;
			DRM_WARN("failed to init reg base from ip discovery table, "
				 "fallback to legacy init method\n");
741
		}
742
		vega10_reg_base_init(adev);
743
		break;
744 745 746
	case CHIP_VEGA20:
		vega20_reg_base_init(adev);
		break;
747 748 749
	case CHIP_ARCTURUS:
		arct_reg_base_init(adev);
		break;
750 751 752
	case CHIP_ALDEBARAN:
		aldebaran_reg_base_init(adev);
		break;
753
	default:
754 755
		DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type);
		break;
756
	}
757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773
}

void soc15_set_virt_ops(struct amdgpu_device *adev)
{
	adev->virt.ops = &xgpu_ai_virt_ops;

	/* init soc15 reg base early enough so we can
	 * request request full access for sriov before
	 * set_ip_blocks. */
	soc15_reg_base_init(adev);
}

int soc15_set_ip_blocks(struct amdgpu_device *adev)
{
	/* for bare metal case */
	if (!amdgpu_sriov_vf(adev))
		soc15_reg_base_init(adev);
774

775 776 777 778
	if (adev->flags & AMD_IS_APU) {
		adev->nbio.funcs = &nbio_v7_0_funcs;
		adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
	} else if (adev->asic_type == CHIP_VEGA20 ||
779 780
		   adev->asic_type == CHIP_ARCTURUS ||
		   adev->asic_type == CHIP_ALDEBARAN) {
781 782 783 784 785 786
		adev->nbio.funcs = &nbio_v7_4_funcs;
		adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
	} else {
		adev->nbio.funcs = &nbio_v6_1_funcs;
		adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
	}
787
	adev->hdp.funcs = &hdp_v4_0_funcs;
788

789 790 791
	if (adev->asic_type == CHIP_VEGA20 ||
	    adev->asic_type == CHIP_ARCTURUS ||
	    adev->asic_type == CHIP_ALDEBARAN)
792
		adev->df.funcs = &df_v3_6_funcs;
793
	else
794
		adev->df.funcs = &df_v1_7_funcs;
795

796 797 798
	if (adev->asic_type == CHIP_VEGA20 ||
	    adev->asic_type == CHIP_ARCTURUS)
		adev->smuio.funcs = &smuio_v11_0_funcs;
799 800
	else if (adev->asic_type == CHIP_ALDEBARAN)
		adev->smuio.funcs = &smuio_v13_0_funcs;
801 802 803
	else
		adev->smuio.funcs = &smuio_v9_0_funcs;

804
	adev->rev_id = soc15_get_rev_id(adev);
805

K
Ken Wang 已提交
806 807
	switch (adev->asic_type) {
	case CHIP_VEGA10:
808
	case CHIP_VEGA12:
809
	case CHIP_VEGA20:
810 811
		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
812 813 814 815 816 817 818 819 820

		/* For Vega10 SR-IOV, PSP need to be initialized before IH */
		if (amdgpu_sriov_vf(adev)) {
			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
				if (adev->asic_type == CHIP_VEGA20)
					amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
				else
					amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
			}
821
			if (adev->asic_type == CHIP_VEGA20)
822
				amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
823 824
			else
				amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
825
		} else {
826
			if (adev->asic_type == CHIP_VEGA20)
827
				amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
828 829
			else
				amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
830 831 832 833 834 835
			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
				if (adev->asic_type == CHIP_VEGA20)
					amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
				else
					amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
			}
836
		}
837 838
		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
839 840
		if (is_support_sw_smu(adev)) {
			if (!amdgpu_sriov_vf(adev))
841
				amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
842 843
		} else {
			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
844
		}
845
		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
846
			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
847 848
#if defined(CONFIG_DRM_AMD_DC)
		else if (amdgpu_device_has_dc_support(adev))
849
			amdgpu_device_ip_block_add(adev, &dm_ip_block);
850
#endif
851 852 853 854
		if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
			amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
			amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
		}
K
Ken Wang 已提交
855
		break;
856
	case CHIP_RAVEN:
857 858
		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
859
		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
860 861
		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
			amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
862 863
		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
864
		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
865
		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
866
			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
867 868
#if defined(CONFIG_DRM_AMD_DC)
		else if (amdgpu_device_has_dc_support(adev))
869
			amdgpu_device_ip_block_add(adev, &dm_ip_block);
870
#endif
871
		amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
872
		break;
873 874 875
	case CHIP_ARCTURUS:
		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
876 877 878 879

		if (amdgpu_sriov_vf(adev)) {
			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
				amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
880
			amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
881
		} else {
882
			amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
883 884 885 886
			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
				amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
		}

887 888 889 890
		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
891
		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
892

893 894 895 896
		if (amdgpu_sriov_vf(adev)) {
			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
				amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
		} else {
897
			amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
898
		}
899 900
		if (!amdgpu_sriov_vf(adev))
			amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
901
		break;
H
Huang Rui 已提交
902 903 904 905
	case CHIP_RENOIR:
		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
906 907
		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
			amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
908
		amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
909 910
		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
911 912
		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
R
Roman Li 已提交
913 914
#if defined(CONFIG_DRM_AMD_DC)
                else if (amdgpu_device_has_dc_support(adev))
915
			amdgpu_device_ip_block_add(adev, &dm_ip_block);
R
Roman Li 已提交
916
#endif
917
		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
918
		amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
H
Huang Rui 已提交
919
		break;
920 921 922
	case CHIP_ALDEBARAN:
		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
923 924 925 926

		if (amdgpu_sriov_vf(adev)) {
			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
				amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
927
			amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
928
		} else {
929
			amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
930 931 932 933
			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
				amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
		}

934 935
		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
936

937
		amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
938 939
		amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
		amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
940
		break;
K
Ken Wang 已提交
941 942 943 944 945 946 947
	default:
		return -EINVAL;
	}

	return 0;
}

948 949 950 951 952
static bool soc15_need_full_reset(struct amdgpu_device *adev)
{
	/* change this when we implement soft reset */
	return true;
}
953

954 955 956 957 958 959 960 961 962 963
static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
				 uint64_t *count1)
{
	uint32_t perfctr = 0;
	uint64_t cnt0_of, cnt1_of;
	int tmp;

	/* This reports 0 on APUs, so return to avoid writing/reading registers
	 * that may or may not be different from their GPU counterparts
	 */
964 965
	if (adev->flags & AMD_IS_APU)
		return;
966 967

	/* Set the 2 events that we wish to watch, defined above */
968
	/* Reg 40 is # received msgs */
K
Kent Russell 已提交
969
	/* Reg 104 is # of posted requests sent */
970
	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
K
Kent Russell 已提交
971
	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999

	/* Write to enable desired perf counters */
	WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
	/* Zero out and enable the perf counters
	 * Write 0x5:
	 * Bit 0 = Start all counters(1)
	 * Bit 2 = Global counter reset enable(1)
	 */
	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);

	msleep(1000);

	/* Load the shadow and disable the perf counters
	 * Write 0x2:
	 * Bit 0 = Stop counters(0)
	 * Bit 1 = Load the shadow counters(1)
	 */
	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);

	/* Read register values to get any >32bit overflow */
	tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);

	/* Get the values and add the overflow */
	*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
	*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
}
1000

K
Kent Russell 已提交
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
				 uint64_t *count1)
{
	uint32_t perfctr = 0;
	uint64_t cnt0_of, cnt1_of;
	int tmp;

	/* This reports 0 on APUs, so return to avoid writing/reading registers
	 * that may or may not be different from their GPU counterparts
	 */
	if (adev->flags & AMD_IS_APU)
		return;

	/* Set the 2 events that we wish to watch, defined above */
	/* Reg 40 is # received msgs */
	/* Reg 108 is # of posted requests sent on VG20 */
	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
				EVENT0_SEL, 40);
	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
				EVENT1_SEL, 108);

	/* Write to enable desired perf counters */
	WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr);
	/* Zero out and enable the perf counters
	 * Write 0x5:
	 * Bit 0 = Start all counters(1)
	 * Bit 2 = Global counter reset enable(1)
	 */
	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);

	msleep(1000);

	/* Load the shadow and disable the perf counters
	 * Write 0x2:
	 * Bit 0 = Stop counters(0)
	 * Bit 1 = Load the shadow counters(1)
	 */
	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);

	/* Read register values to get any >32bit overflow */
	tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3);
	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER);
	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER);

	/* Get the values and add the overflow */
	*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32);
	*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32);
}

1050 1051 1052 1053
static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
{
	u32 sol_reg;

1054 1055 1056
	/* Just return false for soc15 GPUs.  Reset does not seem to
	 * be necessary.
	 */
1057 1058
	if (!amdgpu_passthrough(adev))
		return false;
1059

1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
	if (adev->flags & AMD_IS_APU)
		return false;

	/* Check sOS sign of life register to confirm sys driver and sOS
	 * are already been loaded.
	 */
	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
	if (sol_reg)
		return true;

	return false;
}

1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
{
	uint64_t nak_r, nak_g;

	/* Get the number of NAKs received and generated */
	nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK);
	nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED);

	/* Add the total number of NAKs, i.e the number of replays */
	return (nak_r + nak_g);
}

1085 1086 1087 1088 1089
static void soc15_pre_asic_init(struct amdgpu_device *adev)
{
	gmc_v9_0_restore_registers(adev);
}

K
Ken Wang 已提交
1090 1091 1092 1093 1094 1095
static const struct amdgpu_asic_funcs soc15_asic_funcs =
{
	.read_disabled_bios = &soc15_read_disabled_bios,
	.read_bios_from_rom = &soc15_read_bios_from_rom,
	.read_register = &soc15_read_register,
	.reset = &soc15_asic_reset,
1096
	.reset_method = &soc15_asic_reset_method,
K
Ken Wang 已提交
1097 1098 1099 1100 1101
	.set_vga_state = &soc15_vga_set_state,
	.get_xclk = &soc15_get_xclk,
	.set_uvd_clocks = &soc15_set_uvd_clocks,
	.set_vce_clocks = &soc15_set_vce_clocks,
	.get_config_memsize = &soc15_get_config_memsize,
1102
	.need_full_reset = &soc15_need_full_reset,
1103
	.init_doorbell_index = &vega10_doorbell_index_init,
1104
	.get_pcie_usage = &soc15_get_pcie_usage,
1105
	.need_reset_on_init = &soc15_need_reset_on_init,
1106
	.get_pcie_replay_count = &soc15_get_pcie_replay_count,
1107
	.supports_baco = &soc15_supports_baco,
1108
	.pre_asic_init = &soc15_pre_asic_init,
1109
	.query_video_codecs = &soc15_query_video_codecs,
K
Ken Wang 已提交
1110 1111
};

1112 1113 1114 1115 1116 1117
static const struct amdgpu_asic_funcs vega20_asic_funcs =
{
	.read_disabled_bios = &soc15_read_disabled_bios,
	.read_bios_from_rom = &soc15_read_bios_from_rom,
	.read_register = &soc15_read_register,
	.reset = &soc15_asic_reset,
1118
	.reset_method = &soc15_asic_reset_method,
1119 1120 1121 1122 1123 1124 1125
	.set_vga_state = &soc15_vga_set_state,
	.get_xclk = &soc15_get_xclk,
	.set_uvd_clocks = &soc15_set_uvd_clocks,
	.set_vce_clocks = &soc15_set_vce_clocks,
	.get_config_memsize = &soc15_get_config_memsize,
	.need_full_reset = &soc15_need_full_reset,
	.init_doorbell_index = &vega20_doorbell_index_init,
K
Kent Russell 已提交
1126
	.get_pcie_usage = &vega20_get_pcie_usage,
1127
	.need_reset_on_init = &soc15_need_reset_on_init,
1128
	.get_pcie_replay_count = &soc15_get_pcie_replay_count,
1129
	.supports_baco = &soc15_supports_baco,
1130
	.pre_asic_init = &soc15_pre_asic_init,
1131
	.query_video_codecs = &soc15_query_video_codecs,
K
Ken Wang 已提交
1132 1133 1134 1135
};

static int soc15_common_early_init(void *handle)
{
1136
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
K
Ken Wang 已提交
1137 1138
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

1139 1140
	adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
	adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
K
Ken Wang 已提交
1141 1142 1143 1144
	adev->smc_rreg = NULL;
	adev->smc_wreg = NULL;
	adev->pcie_rreg = &soc15_pcie_rreg;
	adev->pcie_wreg = &soc15_pcie_wreg;
1145 1146
	adev->pcie_rreg64 = &soc15_pcie_rreg64;
	adev->pcie_wreg64 = &soc15_pcie_wreg64;
K
Ken Wang 已提交
1147 1148 1149 1150
	adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
	adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
	adev->didt_rreg = &soc15_didt_rreg;
	adev->didt_wreg = &soc15_didt_wreg;
1151 1152
	adev->gc_cac_rreg = &soc15_gc_cac_rreg;
	adev->gc_cac_wreg = &soc15_gc_cac_wreg;
1153 1154
	adev->se_cac_rreg = &soc15_se_cac_rreg;
	adev->se_cac_wreg = &soc15_se_cac_wreg;
K
Ken Wang 已提交
1155 1156 1157 1158 1159


	adev->external_rev_id = 0xFF;
	switch (adev->asic_type) {
	case CHIP_VEGA10:
1160
		adev->asic_funcs = &soc15_asic_funcs;
K
Ken Wang 已提交
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
			AMD_CG_SUPPORT_GFX_MGLS |
			AMD_CG_SUPPORT_GFX_RLC_LS |
			AMD_CG_SUPPORT_GFX_CP_LS |
			AMD_CG_SUPPORT_GFX_3D_CGCG |
			AMD_CG_SUPPORT_GFX_3D_CGLS |
			AMD_CG_SUPPORT_GFX_CGCG |
			AMD_CG_SUPPORT_GFX_CGLS |
			AMD_CG_SUPPORT_BIF_MGCG |
			AMD_CG_SUPPORT_BIF_LS |
			AMD_CG_SUPPORT_HDP_LS |
			AMD_CG_SUPPORT_DRM_MGCG |
			AMD_CG_SUPPORT_DRM_LS |
			AMD_CG_SUPPORT_ROM_MGCG |
			AMD_CG_SUPPORT_DF_MGCG |
			AMD_CG_SUPPORT_SDMA_MGCG |
			AMD_CG_SUPPORT_SDMA_LS |
			AMD_CG_SUPPORT_MC_MGCG |
			AMD_CG_SUPPORT_MC_LS;
		adev->pg_flags = 0;
		adev->external_rev_id = 0x1;
		break;
1183
	case CHIP_VEGA12:
1184
		adev->asic_funcs = &soc15_asic_funcs;
1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
			AMD_CG_SUPPORT_GFX_MGLS |
			AMD_CG_SUPPORT_GFX_CGCG |
			AMD_CG_SUPPORT_GFX_CGLS |
			AMD_CG_SUPPORT_GFX_3D_CGCG |
			AMD_CG_SUPPORT_GFX_3D_CGLS |
			AMD_CG_SUPPORT_GFX_CP_LS |
			AMD_CG_SUPPORT_MC_LS |
			AMD_CG_SUPPORT_MC_MGCG |
			AMD_CG_SUPPORT_SDMA_MGCG |
			AMD_CG_SUPPORT_SDMA_LS |
			AMD_CG_SUPPORT_BIF_MGCG |
			AMD_CG_SUPPORT_BIF_LS |
			AMD_CG_SUPPORT_HDP_MGCG |
			AMD_CG_SUPPORT_HDP_LS |
			AMD_CG_SUPPORT_ROM_MGCG |
			AMD_CG_SUPPORT_VCE_MGCG |
			AMD_CG_SUPPORT_UVD_MGCG;
1203
		adev->pg_flags = 0;
1204
		adev->external_rev_id = adev->rev_id + 0x14;
1205
		break;
1206
	case CHIP_VEGA20:
1207
		adev->asic_funcs = &vega20_asic_funcs;
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
			AMD_CG_SUPPORT_GFX_MGLS |
			AMD_CG_SUPPORT_GFX_CGCG |
			AMD_CG_SUPPORT_GFX_CGLS |
			AMD_CG_SUPPORT_GFX_3D_CGCG |
			AMD_CG_SUPPORT_GFX_3D_CGLS |
			AMD_CG_SUPPORT_GFX_CP_LS |
			AMD_CG_SUPPORT_MC_LS |
			AMD_CG_SUPPORT_MC_MGCG |
			AMD_CG_SUPPORT_SDMA_MGCG |
			AMD_CG_SUPPORT_SDMA_LS |
			AMD_CG_SUPPORT_BIF_MGCG |
			AMD_CG_SUPPORT_BIF_LS |
			AMD_CG_SUPPORT_HDP_MGCG |
1222
			AMD_CG_SUPPORT_HDP_LS |
1223 1224 1225
			AMD_CG_SUPPORT_ROM_MGCG |
			AMD_CG_SUPPORT_VCE_MGCG |
			AMD_CG_SUPPORT_UVD_MGCG;
1226 1227 1228
		adev->pg_flags = 0;
		adev->external_rev_id = adev->rev_id + 0x28;
		break;
1229
	case CHIP_RAVEN:
1230
		adev->asic_funcs = &soc15_asic_funcs;
1231

1232
		if (adev->rev_id >= 0x8)
A
Alex Deucher 已提交
1233 1234 1235
			adev->apu_flags |= AMD_APU_IS_RAVEN2;

		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1236
			adev->external_rev_id = adev->rev_id + 0x79;
A
Alex Deucher 已提交
1237
		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1238
			adev->external_rev_id = adev->rev_id + 0x41;
1239 1240
		else if (adev->rev_id == 1)
			adev->external_rev_id = adev->rev_id + 0x20;
1241
		else
1242
			adev->external_rev_id = adev->rev_id + 0x01;
1243

A
Alex Deucher 已提交
1244
		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
				AMD_CG_SUPPORT_GFX_MGLS |
				AMD_CG_SUPPORT_GFX_CP_LS |
				AMD_CG_SUPPORT_GFX_3D_CGCG |
				AMD_CG_SUPPORT_GFX_3D_CGLS |
				AMD_CG_SUPPORT_GFX_CGCG |
				AMD_CG_SUPPORT_GFX_CGLS |
				AMD_CG_SUPPORT_BIF_LS |
				AMD_CG_SUPPORT_HDP_LS |
				AMD_CG_SUPPORT_MC_MGCG |
				AMD_CG_SUPPORT_MC_LS |
				AMD_CG_SUPPORT_SDMA_MGCG |
				AMD_CG_SUPPORT_SDMA_LS |
				AMD_CG_SUPPORT_VCN_MGCG;
1259

1260
			adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
A
Alex Deucher 已提交
1261
		} else if (adev->apu_flags & AMD_APU_IS_PICASSO) {
L
Likun Gao 已提交
1262 1263
			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
				AMD_CG_SUPPORT_GFX_MGLS |
1264 1265 1266 1267 1268 1269 1270 1271 1272
				AMD_CG_SUPPORT_GFX_CP_LS |
				AMD_CG_SUPPORT_GFX_3D_CGLS |
				AMD_CG_SUPPORT_GFX_CGCG |
				AMD_CG_SUPPORT_GFX_CGLS |
				AMD_CG_SUPPORT_BIF_LS |
				AMD_CG_SUPPORT_HDP_LS |
				AMD_CG_SUPPORT_MC_MGCG |
				AMD_CG_SUPPORT_MC_LS |
				AMD_CG_SUPPORT_SDMA_MGCG |
1273 1274
				AMD_CG_SUPPORT_SDMA_LS |
				AMD_CG_SUPPORT_VCN_MGCG;
1275 1276 1277

			adev->pg_flags = AMD_PG_SUPPORT_SDMA |
				AMD_PG_SUPPORT_MMHUB |
1278
				AMD_PG_SUPPORT_VCN;
1279
		} else {
1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
				AMD_CG_SUPPORT_GFX_MGLS |
				AMD_CG_SUPPORT_GFX_RLC_LS |
				AMD_CG_SUPPORT_GFX_CP_LS |
				AMD_CG_SUPPORT_GFX_3D_CGLS |
				AMD_CG_SUPPORT_GFX_CGCG |
				AMD_CG_SUPPORT_GFX_CGLS |
				AMD_CG_SUPPORT_BIF_MGCG |
				AMD_CG_SUPPORT_BIF_LS |
				AMD_CG_SUPPORT_HDP_MGCG |
				AMD_CG_SUPPORT_HDP_LS |
				AMD_CG_SUPPORT_DRM_MGCG |
				AMD_CG_SUPPORT_DRM_LS |
				AMD_CG_SUPPORT_MC_MGCG |
				AMD_CG_SUPPORT_MC_LS |
				AMD_CG_SUPPORT_SDMA_MGCG |
				AMD_CG_SUPPORT_SDMA_LS |
				AMD_CG_SUPPORT_VCN_MGCG;
1298

1299
			adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1300
		}
1301
		break;
1302
	case CHIP_ARCTURUS:
1303
		adev->asic_funcs = &vega20_asic_funcs;
1304 1305 1306
		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
			AMD_CG_SUPPORT_GFX_MGLS |
			AMD_CG_SUPPORT_GFX_CGCG |
1307
			AMD_CG_SUPPORT_GFX_CGLS |
1308
			AMD_CG_SUPPORT_GFX_CP_LS |
1309
			AMD_CG_SUPPORT_HDP_MGCG |
1310 1311
			AMD_CG_SUPPORT_HDP_LS |
			AMD_CG_SUPPORT_SDMA_MGCG |
1312 1313
			AMD_CG_SUPPORT_SDMA_LS |
			AMD_CG_SUPPORT_MC_MGCG |
1314
			AMD_CG_SUPPORT_MC_LS |
1315 1316 1317
			AMD_CG_SUPPORT_IH_CG |
			AMD_CG_SUPPORT_VCN_MGCG |
			AMD_CG_SUPPORT_JPEG_MGCG;
1318
		adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG;
1319
		adev->external_rev_id = adev->rev_id + 0x32;
1320
		break;
1321
	case CHIP_RENOIR:
1322
		adev->asic_funcs = &soc15_asic_funcs;
1323 1324 1325 1326 1327

		if (adev->apu_flags & AMD_APU_IS_RENOIR)
			adev->external_rev_id = adev->rev_id + 0x91;
		else
			adev->external_rev_id = adev->rev_id + 0xa1;
1328 1329 1330 1331 1332 1333
		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
				 AMD_CG_SUPPORT_GFX_MGLS |
				 AMD_CG_SUPPORT_GFX_3D_CGCG |
				 AMD_CG_SUPPORT_GFX_3D_CGLS |
				 AMD_CG_SUPPORT_GFX_CGCG |
				 AMD_CG_SUPPORT_GFX_CGLS |
1334 1335
				 AMD_CG_SUPPORT_GFX_CP_LS |
				 AMD_CG_SUPPORT_MC_MGCG |
1336 1337
				 AMD_CG_SUPPORT_MC_LS |
				 AMD_CG_SUPPORT_SDMA_MGCG |
1338
				 AMD_CG_SUPPORT_SDMA_LS |
1339
				 AMD_CG_SUPPORT_BIF_LS |
1340
				 AMD_CG_SUPPORT_HDP_LS |
1341
				 AMD_CG_SUPPORT_VCN_MGCG |
L
Leo Liu 已提交
1342
				 AMD_CG_SUPPORT_JPEG_MGCG |
1343 1344
				 AMD_CG_SUPPORT_IH_CG |
				 AMD_CG_SUPPORT_ATHUB_LS |
1345 1346
				 AMD_CG_SUPPORT_ATHUB_MGCG |
				 AMD_CG_SUPPORT_DF_MGCG;
1347 1348
		adev->pg_flags = AMD_PG_SUPPORT_SDMA |
				 AMD_PG_SUPPORT_VCN |
L
Leo Liu 已提交
1349
				 AMD_PG_SUPPORT_JPEG |
1350
				 AMD_PG_SUPPORT_VCN_DPG;
1351
		break;
1352 1353
	case CHIP_ALDEBARAN:
		adev->asic_funcs = &vega20_asic_funcs;
1354 1355 1356 1357 1358 1359 1360 1361
		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
			AMD_CG_SUPPORT_GFX_MGLS |
			AMD_CG_SUPPORT_GFX_CGCG |
			AMD_CG_SUPPORT_GFX_CGLS |
			AMD_CG_SUPPORT_GFX_CP_LS |
			AMD_CG_SUPPORT_HDP_LS |
			AMD_CG_SUPPORT_SDMA_MGCG |
			AMD_CG_SUPPORT_SDMA_LS |
1362 1363
			AMD_CG_SUPPORT_IH_CG |
			AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG;
1364
		adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG;
1365
		adev->external_rev_id = adev->rev_id + 0x3c;
1366
		break;
K
Ken Wang 已提交
1367 1368 1369 1370 1371
	default:
		/* FIXME: not supported yet */
		return -EINVAL;
	}

1372 1373 1374 1375 1376
	if (amdgpu_sriov_vf(adev)) {
		amdgpu_virt_init_setting(adev);
		xgpu_ai_mailbox_set_irq_funcs(adev);
	}

K
Ken Wang 已提交
1377 1378 1379
	return 0;
}

1380 1381 1382
static int soc15_common_late_init(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1383
	int r = 0;
1384 1385 1386 1387

	if (amdgpu_sriov_vf(adev))
		xgpu_ai_mailbox_get_irq(adev);

1388 1389 1390
	if (adev->nbio.ras_funcs &&
	    adev->nbio.ras_funcs->ras_late_init)
		r = adev->nbio.ras_funcs->ras_late_init(adev);
1391 1392

	return r;
1393 1394
}

K
Ken Wang 已提交
1395 1396
static int soc15_common_sw_init(void *handle)
{
1397 1398 1399 1400 1401
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	if (amdgpu_sriov_vf(adev))
		xgpu_ai_mailbox_add_irq_id(adev);

1402
	adev->df.funcs->sw_init(adev);
1403

K
Ken Wang 已提交
1404 1405 1406 1407 1408
	return 0;
}

static int soc15_common_sw_fini(void *handle)
{
1409 1410
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

1411 1412 1413
	if (adev->nbio.ras_funcs &&
	    adev->nbio.ras_funcs->ras_fini)
		adev->nbio.ras_funcs->ras_fini(adev);
1414
	adev->df.funcs->sw_fini(adev);
K
Ken Wang 已提交
1415 1416 1417
	return 0;
}

1418 1419 1420 1421 1422
static void soc15_doorbell_range_init(struct amdgpu_device *adev)
{
	int i;
	struct amdgpu_ring *ring;

1423 1424
	/* sdma/ih doorbell range are programed by hypervisor */
	if (!amdgpu_sriov_vf(adev)) {
1425 1426
		for (i = 0; i < adev->sdma.num_instances; i++) {
			ring = &adev->sdma.instance[i].ring;
1427
			adev->nbio.funcs->sdma_doorbell_range(adev, i,
1428 1429 1430
				ring->use_doorbell, ring->doorbell_index,
				adev->doorbell_index.sdma_doorbell_range);
		}
1431

1432
		adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
1433
						adev->irq.ih.doorbell_index);
1434
	}
1435 1436
}

K
Ken Wang 已提交
1437 1438 1439 1440 1441 1442 1443 1444
static int soc15_common_hw_init(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	/* enable pcie gen2/3 link */
	soc15_pcie_gen3_enable(adev);
	/* enable aspm */
	soc15_program_aspm(adev);
1445
	/* setup nbio registers */
1446
	adev->nbio.funcs->init_registers(adev);
1447 1448 1449 1450
	/* remap HDP registers to a hole in mmio space,
	 * for the purpose of expose those registers
	 * to process space
	 */
1451 1452
	if (adev->nbio.funcs->remap_hdp_registers)
		adev->nbio.funcs->remap_hdp_registers(adev);
1453

K
Ken Wang 已提交
1454 1455
	/* enable the doorbell aperture */
	soc15_enable_doorbell_aperture(adev, true);
1456 1457 1458 1459 1460 1461
	/* HW doorbell routing policy: doorbell writing not
	 * in SDMA/IH/MM/ACV range will be routed to CP. So
	 * we need to init SDMA/IH/MM/ACV doorbell range prior
	 * to CP ip block init and ring test.
	 */
	soc15_doorbell_range_init(adev);
K
Ken Wang 已提交
1462 1463 1464 1465 1466 1467 1468 1469 1470 1471

	return 0;
}

static int soc15_common_hw_fini(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	/* disable the doorbell aperture */
	soc15_enable_doorbell_aperture(adev, false);
1472 1473
	if (amdgpu_sriov_vf(adev))
		xgpu_ai_mailbox_put_irq(adev);
K
Ken Wang 已提交
1474

1475 1476
	if (adev->nbio.ras_if &&
	    amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
1477 1478
		if (adev->nbio.ras_funcs &&
		    adev->nbio.ras_funcs->init_ras_controller_interrupt)
1479
			amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
1480 1481
		if (adev->nbio.ras_funcs &&
		    adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt)
1482 1483 1484
			amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
	}

K
Ken Wang 已提交
1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565
	return 0;
}

static int soc15_common_suspend(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	return soc15_common_hw_fini(adev);
}

static int soc15_common_resume(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	return soc15_common_hw_init(adev);
}

static bool soc15_common_is_idle(void *handle)
{
	return true;
}

static int soc15_common_wait_for_idle(void *handle)
{
	return 0;
}

static int soc15_common_soft_reset(void *handle)
{
	return 0;
}

static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
{
	uint32_t def, data;

	def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));

	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
		data &= ~(0x01000000 |
			  0x02000000 |
			  0x04000000 |
			  0x08000000 |
			  0x10000000 |
			  0x20000000 |
			  0x40000000 |
			  0x80000000);
	else
		data |= (0x01000000 |
			 0x02000000 |
			 0x04000000 |
			 0x08000000 |
			 0x10000000 |
			 0x20000000 |
			 0x40000000 |
			 0x80000000);

	if (def != data)
		WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
}

static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
{
	uint32_t def, data;

	def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));

	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
		data |= 1;
	else
		data &= ~1;

	if (def != data)
		WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
}

static int soc15_common_set_clockgating_state(void *handle,
					    enum amd_clockgating_state state)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

M
Monk Liu 已提交
1566 1567 1568
	if (amdgpu_sriov_vf(adev))
		return 0;

K
Ken Wang 已提交
1569 1570
	switch (adev->asic_type) {
	case CHIP_VEGA10:
1571
	case CHIP_VEGA12:
1572
	case CHIP_VEGA20:
1573
		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1574
				state == AMD_CG_STATE_GATE);
1575
		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1576
				state == AMD_CG_STATE_GATE);
1577
		adev->hdp.funcs->update_clock_gating(adev,
1578
				state == AMD_CG_STATE_GATE);
K
Ken Wang 已提交
1579
		soc15_update_drm_clock_gating(adev,
1580
				state == AMD_CG_STATE_GATE);
K
Ken Wang 已提交
1581
		soc15_update_drm_light_sleep(adev,
1582
				state == AMD_CG_STATE_GATE);
1583
		adev->smuio.funcs->update_rom_clock_gating(adev,
1584
				state == AMD_CG_STATE_GATE);
1585
		adev->df.funcs->update_medium_grain_clock_gating(adev,
1586
				state == AMD_CG_STATE_GATE);
K
Ken Wang 已提交
1587
		break;
1588
	case CHIP_RAVEN:
1589
	case CHIP_RENOIR:
1590
		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1591
				state == AMD_CG_STATE_GATE);
1592
		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1593
				state == AMD_CG_STATE_GATE);
1594
		adev->hdp.funcs->update_clock_gating(adev,
1595
				state == AMD_CG_STATE_GATE);
1596
		soc15_update_drm_clock_gating(adev,
1597
				state == AMD_CG_STATE_GATE);
1598
		soc15_update_drm_light_sleep(adev,
1599
				state == AMD_CG_STATE_GATE);
1600
		break;
1601
	case CHIP_ARCTURUS:
1602
	case CHIP_ALDEBARAN:
1603
		adev->hdp.funcs->update_clock_gating(adev,
1604
				state == AMD_CG_STATE_GATE);
1605
		break;
K
Ken Wang 已提交
1606 1607 1608 1609 1610 1611
	default:
		break;
	}
	return 0;
}

1612 1613 1614 1615 1616 1617 1618 1619
static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
	int data;

	if (amdgpu_sriov_vf(adev))
		*flags = 0;

1620
	adev->nbio.funcs->get_clockgating_state(adev, flags);
1621

1622
	adev->hdp.funcs->get_clock_gating_state(adev, flags);
1623

1624
	if (adev->asic_type != CHIP_ALDEBARAN) {
1625

1626 1627 1628 1629 1630 1631 1632 1633 1634 1635
		/* AMD_CG_SUPPORT_DRM_MGCG */
		data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
		if (!(data & 0x01000000))
			*flags |= AMD_CG_SUPPORT_DRM_MGCG;

		/* AMD_CG_SUPPORT_DRM_LS */
		data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
		if (data & 0x1)
			*flags |= AMD_CG_SUPPORT_DRM_LS;
	}
1636 1637

	/* AMD_CG_SUPPORT_ROM_MGCG */
1638
	adev->smuio.funcs->get_clock_gating_state(adev, flags);
1639

1640
	adev->df.funcs->get_clockgating_state(adev, flags);
1641 1642
}

K
Ken Wang 已提交
1643 1644 1645 1646 1647 1648 1649 1650 1651 1652
static int soc15_common_set_powergating_state(void *handle,
					    enum amd_powergating_state state)
{
	/* todo */
	return 0;
}

const struct amd_ip_funcs soc15_common_ip_funcs = {
	.name = "soc15_common",
	.early_init = soc15_common_early_init,
1653
	.late_init = soc15_common_late_init,
K
Ken Wang 已提交
1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664
	.sw_init = soc15_common_sw_init,
	.sw_fini = soc15_common_sw_fini,
	.hw_init = soc15_common_hw_init,
	.hw_fini = soc15_common_hw_fini,
	.suspend = soc15_common_suspend,
	.resume = soc15_common_resume,
	.is_idle = soc15_common_is_idle,
	.wait_for_idle = soc15_common_wait_for_idle,
	.soft_reset = soc15_common_soft_reset,
	.set_clockgating_state = soc15_common_set_clockgating_state,
	.set_powergating_state = soc15_common_set_powergating_state,
1665
	.get_clockgating_state= soc15_common_get_clockgating_state,
K
Ken Wang 已提交
1666
};