gmc_v9_0.c 30.1 KB
Newer Older
A
Alex Xie 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright 2016 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */
#include <linux/firmware.h>
24
#include <drm/drm_cache.h>
A
Alex Xie 已提交
25 26
#include "amdgpu.h"
#include "gmc_v9_0.h"
27
#include "amdgpu_atomfirmware.h"
A
Alex Xie 已提交
28

29 30
#include "hdp/hdp_4_0_offset.h"
#include "hdp/hdp_4_0_sh_mask.h"
31
#include "gc/gc_9_0_sh_mask.h"
32 33
#include "dce/dce_12_0_offset.h"
#include "dce/dce_12_0_sh_mask.h"
34
#include "vega10_enum.h"
35
#include "mmhub/mmhub_1_0_offset.h"
36
#include "athub/athub_1_0_offset.h"
A
Alex Xie 已提交
37

38
#include "soc15.h"
A
Alex Xie 已提交
39
#include "soc15_common.h"
40
#include "umc/umc_6_0_sh_mask.h"
A
Alex Xie 已提交
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75

#include "gfxhub_v1_0.h"
#include "mmhub_v1_0.h"

#define mmDF_CS_AON0_DramBaseAddress0                                                                  0x0044
#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX                                                         0
//DF_CS_AON0_DramBaseAddress0
#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT                                                        0x0
#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT                                                    0x1
#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT                                                      0x4
#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT                                                      0x8
#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT                                                      0xc
#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK                                                          0x00000001L
#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK                                                      0x00000002L
#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK                                                        0x000000F0L
#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK                                                        0x00000700L
#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK                                                        0xFFFFF000L

/* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
#define AMDGPU_NUM_OF_VMIDS			8

static const u32 golden_settings_vega10_hdp[] =
{
	0xf64, 0x0fffffff, 0x00000000,
	0xf65, 0x0fffffff, 0x00000000,
	0xf66, 0x0fffffff, 0x00000000,
	0xf67, 0x0fffffff, 0x00000000,
	0xf68, 0x0fffffff, 0x00000000,
	0xf6a, 0x0fffffff, 0x00000000,
	0xf6b, 0x0fffffff, 0x00000000,
	0xf6c, 0x0fffffff, 0x00000000,
	0xf6d, 0x0fffffff, 0x00000000,
	0xf6e, 0x0fffffff, 0x00000000,
};

76
static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
77
{
78 79
	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
80 81
};

82
static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
83
{
84 85
	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
86 87
};

88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
/* Ecc related register addresses, (BASE + reg offset) */
/* Universal Memory Controller caps (may be fused). */
/* UMCCH:UmcLocalCap */
#define UMCLOCALCAPS_ADDR0	(0x00014306 + 0x00000000)
#define UMCLOCALCAPS_ADDR1	(0x00014306 + 0x00000800)
#define UMCLOCALCAPS_ADDR2	(0x00014306 + 0x00001000)
#define UMCLOCALCAPS_ADDR3	(0x00014306 + 0x00001800)
#define UMCLOCALCAPS_ADDR4	(0x00054306 + 0x00000000)
#define UMCLOCALCAPS_ADDR5	(0x00054306 + 0x00000800)
#define UMCLOCALCAPS_ADDR6	(0x00054306 + 0x00001000)
#define UMCLOCALCAPS_ADDR7	(0x00054306 + 0x00001800)
#define UMCLOCALCAPS_ADDR8	(0x00094306 + 0x00000000)
#define UMCLOCALCAPS_ADDR9	(0x00094306 + 0x00000800)
#define UMCLOCALCAPS_ADDR10	(0x00094306 + 0x00001000)
#define UMCLOCALCAPS_ADDR11	(0x00094306 + 0x00001800)
#define UMCLOCALCAPS_ADDR12	(0x000d4306 + 0x00000000)
#define UMCLOCALCAPS_ADDR13	(0x000d4306 + 0x00000800)
#define UMCLOCALCAPS_ADDR14	(0x000d4306 + 0x00001000)
#define UMCLOCALCAPS_ADDR15	(0x000d4306 + 0x00001800)

/* Universal Memory Controller Channel config. */
/* UMCCH:UMC_CONFIG */
#define UMCCH_UMC_CONFIG_ADDR0	(0x00014040 + 0x00000000)
#define UMCCH_UMC_CONFIG_ADDR1	(0x00014040 + 0x00000800)
#define UMCCH_UMC_CONFIG_ADDR2	(0x00014040 + 0x00001000)
#define UMCCH_UMC_CONFIG_ADDR3	(0x00014040 + 0x00001800)
#define UMCCH_UMC_CONFIG_ADDR4	(0x00054040 + 0x00000000)
#define UMCCH_UMC_CONFIG_ADDR5	(0x00054040 + 0x00000800)
#define UMCCH_UMC_CONFIG_ADDR6	(0x00054040 + 0x00001000)
#define UMCCH_UMC_CONFIG_ADDR7	(0x00054040 + 0x00001800)
#define UMCCH_UMC_CONFIG_ADDR8	(0x00094040 + 0x00000000)
#define UMCCH_UMC_CONFIG_ADDR9	(0x00094040 + 0x00000800)
#define UMCCH_UMC_CONFIG_ADDR10	(0x00094040 + 0x00001000)
#define UMCCH_UMC_CONFIG_ADDR11	(0x00094040 + 0x00001800)
#define UMCCH_UMC_CONFIG_ADDR12	(0x000d4040 + 0x00000000)
#define UMCCH_UMC_CONFIG_ADDR13	(0x000d4040 + 0x00000800)
#define UMCCH_UMC_CONFIG_ADDR14	(0x000d4040 + 0x00001000)
#define UMCCH_UMC_CONFIG_ADDR15	(0x000d4040 + 0x00001800)

/* Universal Memory Controller Channel Ecc config. */
/* UMCCH:EccCtrl */
#define UMCCH_ECCCTRL_ADDR0	(0x00014053 + 0x00000000)
#define UMCCH_ECCCTRL_ADDR1	(0x00014053 + 0x00000800)
#define UMCCH_ECCCTRL_ADDR2	(0x00014053 + 0x00001000)
#define UMCCH_ECCCTRL_ADDR3	(0x00014053 + 0x00001800)
#define UMCCH_ECCCTRL_ADDR4	(0x00054053 + 0x00000000)
#define UMCCH_ECCCTRL_ADDR5	(0x00054053 + 0x00000800)
#define UMCCH_ECCCTRL_ADDR6	(0x00054053 + 0x00001000)
#define UMCCH_ECCCTRL_ADDR7	(0x00054053 + 0x00001800)
#define UMCCH_ECCCTRL_ADDR8	(0x00094053 + 0x00000000)
#define UMCCH_ECCCTRL_ADDR9	(0x00094053 + 0x00000800)
#define UMCCH_ECCCTRL_ADDR10	(0x00094053 + 0x00001000)
#define UMCCH_ECCCTRL_ADDR11	(0x00094053 + 0x00001800)
#define UMCCH_ECCCTRL_ADDR12	(0x000d4053 + 0x00000000)
#define UMCCH_ECCCTRL_ADDR13	(0x000d4053 + 0x00000800)
#define UMCCH_ECCCTRL_ADDR14	(0x000d4053 + 0x00001000)
#define UMCCH_ECCCTRL_ADDR15	(0x000d4053 + 0x00001800)

static const uint32_t ecc_umclocalcap_addrs[] = {
	UMCLOCALCAPS_ADDR0,
	UMCLOCALCAPS_ADDR1,
	UMCLOCALCAPS_ADDR2,
	UMCLOCALCAPS_ADDR3,
	UMCLOCALCAPS_ADDR4,
	UMCLOCALCAPS_ADDR5,
	UMCLOCALCAPS_ADDR6,
	UMCLOCALCAPS_ADDR7,
	UMCLOCALCAPS_ADDR8,
	UMCLOCALCAPS_ADDR9,
	UMCLOCALCAPS_ADDR10,
	UMCLOCALCAPS_ADDR11,
	UMCLOCALCAPS_ADDR12,
	UMCLOCALCAPS_ADDR13,
	UMCLOCALCAPS_ADDR14,
	UMCLOCALCAPS_ADDR15,
};

static const uint32_t ecc_umcch_umc_config_addrs[] = {
	UMCCH_UMC_CONFIG_ADDR0,
	UMCCH_UMC_CONFIG_ADDR1,
	UMCCH_UMC_CONFIG_ADDR2,
	UMCCH_UMC_CONFIG_ADDR3,
	UMCCH_UMC_CONFIG_ADDR4,
	UMCCH_UMC_CONFIG_ADDR5,
	UMCCH_UMC_CONFIG_ADDR6,
	UMCCH_UMC_CONFIG_ADDR7,
	UMCCH_UMC_CONFIG_ADDR8,
	UMCCH_UMC_CONFIG_ADDR9,
	UMCCH_UMC_CONFIG_ADDR10,
	UMCCH_UMC_CONFIG_ADDR11,
	UMCCH_UMC_CONFIG_ADDR12,
	UMCCH_UMC_CONFIG_ADDR13,
	UMCCH_UMC_CONFIG_ADDR14,
	UMCCH_UMC_CONFIG_ADDR15,
};

static const uint32_t ecc_umcch_eccctrl_addrs[] = {
	UMCCH_ECCCTRL_ADDR0,
	UMCCH_ECCCTRL_ADDR1,
	UMCCH_ECCCTRL_ADDR2,
	UMCCH_ECCCTRL_ADDR3,
	UMCCH_ECCCTRL_ADDR4,
	UMCCH_ECCCTRL_ADDR5,
	UMCCH_ECCCTRL_ADDR6,
	UMCCH_ECCCTRL_ADDR7,
	UMCCH_ECCCTRL_ADDR8,
	UMCCH_ECCCTRL_ADDR9,
	UMCCH_ECCCTRL_ADDR10,
	UMCCH_ECCCTRL_ADDR11,
	UMCCH_ECCCTRL_ADDR12,
	UMCCH_ECCCTRL_ADDR13,
	UMCCH_ECCCTRL_ADDR14,
	UMCCH_ECCCTRL_ADDR15,
};

A
Alex Xie 已提交
203 204 205 206 207 208
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
					struct amdgpu_irq_src *src,
					unsigned type,
					enum amdgpu_interrupt_state state)
{
	struct amdgpu_vmhub *hub;
209
	u32 tmp, reg, bits, i, j;
A
Alex Xie 已提交
210

211 212 213 214 215 216 217 218
	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;

A
Alex Xie 已提交
219 220
	switch (state) {
	case AMDGPU_IRQ_STATE_DISABLE:
221 222 223 224 225 226 227 228
		for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
			hub = &adev->vmhub[j];
			for (i = 0; i < 16; i++) {
				reg = hub->vm_context0_cntl + i;
				tmp = RREG32(reg);
				tmp &= ~bits;
				WREG32(reg, tmp);
			}
A
Alex Xie 已提交
229 230 231
		}
		break;
	case AMDGPU_IRQ_STATE_ENABLE:
232 233 234 235 236 237 238 239
		for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
			hub = &adev->vmhub[j];
			for (i = 0; i < 16; i++) {
				reg = hub->vm_context0_cntl + i;
				tmp = RREG32(reg);
				tmp |= bits;
				WREG32(reg, tmp);
			}
A
Alex Xie 已提交
240 241 242 243 244 245 246 247 248 249 250 251
		}
	default:
		break;
	}

	return 0;
}

static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
				struct amdgpu_irq_src *source,
				struct amdgpu_iv_entry *entry)
{
252
	struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
253
	uint32_t status = 0;
A
Alex Xie 已提交
254 255 256 257 258
	u64 addr;

	addr = (u64)entry->src_data[0] << 12;
	addr |= ((u64)entry->src_data[1] & 0xf) << 44;

259
	if (!amdgpu_sriov_vf(adev)) {
260 261
		status = RREG32(hub->vm_l2_pro_fault_status);
		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
262
	}
A
Alex Xie 已提交
263

264 265
	if (printk_ratelimit()) {
		dev_err(adev->dev,
266 267 268
			"[%s] VMC page fault (src_id:%u ring:%u vmid:%u pas_id:%u)\n",
			entry->vmid_src ? "mmhub" : "gfxhub",
			entry->src_id, entry->ring_id, entry->vmid,
269 270 271 272 273 274 275
			entry->pas_id);
		dev_err(adev->dev, "  at page 0x%016llx from %d\n",
			addr, entry->client_id);
		if (!amdgpu_sriov_vf(adev))
			dev_err(adev->dev,
				"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
				status);
276
	}
A
Alex Xie 已提交
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291

	return 0;
}

static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
	.set = gmc_v9_0_vm_fault_interrupt_state,
	.process = gmc_v9_0_process_interrupt,
};

static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
{
	adev->mc.vm_fault.num_types = 1;
	adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
}

292
static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
293 294 295
{
	u32 req = 0;

296
	/* invalidate using legacy mode on vmid*/
297
	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
298
			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
299 300 301 302 303 304 305 306 307 308 309 310
	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);

	return req;
}

A
Alex Xie 已提交
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
/*
 * GART
 * VMID 0 is the physical GPU addresses as used by the kernel.
 * VMIDs 1-15 are used for userspace clients and are handled
 * by the amdgpu vm/hsa code.
 */

/**
 * gmc_v9_0_gart_flush_gpu_tlb - gart tlb flush callback
 *
 * @adev: amdgpu_device pointer
 * @vmid: vm instance to flush
 *
 * Flush the TLB for the requested page table.
 */
static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
					uint32_t vmid)
{
	/* Use register 17 for GART */
	const unsigned eng = 17;
	unsigned i, j;

	spin_lock(&adev->mc.invalidate_lock);

	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
		struct amdgpu_vmhub *hub = &adev->vmhub[i];
337
		u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
A
Alex Xie 已提交
338

339
		WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
A
Alex Xie 已提交
340 341 342

		/* Busy wait for ACK.*/
		for (j = 0; j < 100; j++) {
343
			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
A
Alex Xie 已提交
344 345 346 347 348 349 350 351 352 353
			tmp &= 1 << vmid;
			if (tmp)
				break;
			cpu_relax();
		}
		if (j < 100)
			continue;

		/* Wait for ACK with a delay.*/
		for (j = 0; j < adev->usec_timeout; j++) {
354
			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
A
Alex Xie 已提交
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
			tmp &= 1 << vmid;
			if (tmp)
				break;
			udelay(1);
		}
		if (j < adev->usec_timeout)
			continue;

		DRM_ERROR("Timeout waiting for VM flush ACK!\n");
	}

	spin_unlock(&adev->mc.invalidate_lock);
}

/**
 * gmc_v9_0_gart_set_pte_pde - update the page tables using MMIO
 *
 * @adev: amdgpu_device pointer
 * @cpu_pt_addr: cpu address of the page table
 * @gpu_page_idx: entry in the page table to update
 * @addr: dst addr to write into pte/pde
 * @flags: access flags
 *
 * Update the page tables using the CPU.
 */
static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device *adev,
					void *cpu_pt_addr,
					uint32_t gpu_page_idx,
					uint64_t addr,
					uint64_t flags)
{
	void __iomem *ptr = (void *)cpu_pt_addr;
	uint64_t value;

	/*
	 * PTE format on VEGA 10:
	 * 63:59 reserved
	 * 58:57 mtype
	 * 56 F
	 * 55 L
	 * 54 P
	 * 53 SW
	 * 52 T
	 * 50:48 reserved
	 * 47:12 4k physical page base address
	 * 11:7 fragment
	 * 6 write
	 * 5 read
	 * 4 exe
	 * 3 Z
	 * 2 snooped
	 * 1 system
	 * 0 valid
	 *
	 * PDE format on VEGA 10:
	 * 63:59 block fragment size
	 * 58:55 reserved
	 * 54 P
	 * 53:48 reserved
	 * 47:6 physical base address of PD or PTE
	 * 5:3 reserved
	 * 2 C
	 * 1 system
	 * 0 valid
	 */

	/*
	 * The following is for PTE only. GART does not have PDEs.
	*/
	value = addr & 0x0000FFFFFFFFF000ULL;
	value |= flags;
	writeq(value, ptr + (gpu_page_idx * 8));
	return 0;
}

static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
						uint32_t flags)

{
	uint64_t pte_flag = 0;

	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
		pte_flag |= AMDGPU_PTE_EXECUTABLE;
	if (flags & AMDGPU_VM_PAGE_READABLE)
		pte_flag |= AMDGPU_PTE_READABLE;
	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
		pte_flag |= AMDGPU_PTE_WRITEABLE;

	switch (flags & AMDGPU_VM_MTYPE_MASK) {
	case AMDGPU_VM_MTYPE_DEFAULT:
		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
		break;
	case AMDGPU_VM_MTYPE_NC:
		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
		break;
	case AMDGPU_VM_MTYPE_WC:
		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
		break;
	case AMDGPU_VM_MTYPE_CC:
		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
		break;
	case AMDGPU_VM_MTYPE_UC:
		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
		break;
	default:
		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
		break;
	}

	if (flags & AMDGPU_VM_PAGE_PRT)
		pte_flag |= AMDGPU_PTE_PRT;

	return pte_flag;
}

470 471
static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
				uint64_t *addr, uint64_t *flags)
A
Alex Xie 已提交
472
{
473 474 475 476
	if (!(*flags & AMDGPU_PDE_PTE))
		*addr = adev->vm_manager.vram_base_offset + *addr -
			adev->mc.vram_start;
	BUG_ON(*addr & 0xFFFF00000000003FULL);
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491

	if (!adev->mc.translate_further)
		return;

	if (level == AMDGPU_VM_PDB1) {
		/* Set the block fragment size */
		if (!(*flags & AMDGPU_PDE_PTE))
			*flags |= AMDGPU_PDE_BFS(0x9);

	} else if (level == AMDGPU_VM_PDB0) {
		if (*flags & AMDGPU_PDE_PTE)
			*flags &= ~AMDGPU_PDE_PTE;
		else
			*flags |= AMDGPU_PTE_TF;
	}
A
Alex Xie 已提交
492 493
}

494 495 496
static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
	.flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
	.set_pte_pde = gmc_v9_0_gart_set_pte_pde,
497
	.get_invalidate_req = gmc_v9_0_get_invalidate_req,
498 499
	.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
	.get_vm_pde = gmc_v9_0_get_vm_pde
A
Alex Xie 已提交
500 501
};

502
static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
A
Alex Xie 已提交
503
{
504 505
	if (adev->gart.gart_funcs == NULL)
		adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
A
Alex Xie 已提交
506 507 508 509 510 511 512 513 514
}

static int gmc_v9_0_early_init(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	gmc_v9_0_set_gart_funcs(adev);
	gmc_v9_0_set_irq_funcs(adev);

515 516 517 518 519 520 521 522
	adev->mc.shared_aperture_start = 0x2000000000000000ULL;
	adev->mc.shared_aperture_end =
		adev->mc.shared_aperture_start + (4ULL << 30) - 1;
	adev->mc.private_aperture_start =
		adev->mc.shared_aperture_end + 1;
	adev->mc.private_aperture_end =
		adev->mc.private_aperture_start + (4ULL << 30) - 1;

A
Alex Xie 已提交
523 524 525
	return 0;
}

526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
static int gmc_v9_0_ecc_available(struct amdgpu_device *adev)
{
	uint32_t reg_val;
	uint32_t reg_addr;
	uint32_t field_val;
	size_t i;
	uint32_t fv2;
	size_t lost_sheep;

	DRM_DEBUG("ecc: gmc_v9_0_ecc_available()\n");

	lost_sheep = 0;
	for (i = 0; i < ARRAY_SIZE(ecc_umclocalcap_addrs); ++i) {
		reg_addr = ecc_umclocalcap_addrs[i];
		DRM_DEBUG("ecc: "
			  "UMCCH_UmcLocalCap[%zu]: reg_addr: 0x%08x\n",
			  i, reg_addr);
		reg_val = RREG32(reg_addr);
		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UmcLocalCap,
					  EccDis);
		DRM_DEBUG("ecc: "
			  "reg_val: 0x%08x, "
			  "EccDis: 0x%08x, ",
			  reg_val, field_val);
		if (field_val) {
			DRM_ERROR("ecc: UmcLocalCap:EccDis is set.\n");
			++lost_sheep;
		}
	}

	for (i = 0; i < ARRAY_SIZE(ecc_umcch_umc_config_addrs); ++i) {
		reg_addr = ecc_umcch_umc_config_addrs[i];
		DRM_DEBUG("ecc: "
			  "UMCCH0_0_UMC_CONFIG[%zu]: reg_addr: 0x%08x",
			  i, reg_addr);
		reg_val = RREG32(reg_addr);
		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UMC_CONFIG,
					  DramReady);
		DRM_DEBUG("ecc: "
			  "reg_val: 0x%08x, "
			  "DramReady: 0x%08x\n",
			  reg_val, field_val);

		if (!field_val) {
			DRM_ERROR("ecc: UMC_CONFIG:DramReady is not set.\n");
			++lost_sheep;
		}
	}

	for (i = 0; i < ARRAY_SIZE(ecc_umcch_eccctrl_addrs); ++i) {
		reg_addr = ecc_umcch_eccctrl_addrs[i];
		DRM_DEBUG("ecc: "
			  "UMCCH_EccCtrl[%zu]: reg_addr: 0x%08x, ",
			  i, reg_addr);
		reg_val = RREG32(reg_addr);
		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
					  WrEccEn);
		fv2 = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
				    RdEccEn);
		DRM_DEBUG("ecc: "
			  "reg_val: 0x%08x, "
			  "WrEccEn: 0x%08x, "
			  "RdEccEn: 0x%08x\n",
			  reg_val, field_val, fv2);

		if (!field_val) {
592
			DRM_DEBUG("ecc: WrEccEn is not set\n");
593 594 595
			++lost_sheep;
		}
		if (!fv2) {
596
			DRM_DEBUG("ecc: RdEccEn is not set\n");
597 598 599 600 601 602 603 604
			++lost_sheep;
		}
	}

	DRM_DEBUG("ecc: lost_sheep: %zu\n", lost_sheep);
	return lost_sheep == 0;
}

A
Alex Xie 已提交
605 606 607
static int gmc_v9_0_late_init(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
608 609 610 611 612 613 614 615 616 617
	/*
	 * The latest engine allocation on gfx9 is:
	 * Engine 0, 1: idle
	 * Engine 2, 3: firmware
	 * Engine 4~13: amdgpu ring, subject to change when ring number changes
	 * Engine 14~15: idle
	 * Engine 16: kfd tlb invalidation
	 * Engine 17: Gart flushes
	 */
	unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
618
	unsigned i;
619
	int r;
620 621 622 623 624 625

	for(i = 0; i < adev->num_rings; ++i) {
		struct amdgpu_ring *ring = adev->rings[i];
		unsigned vmhub = ring->funcs->vmhub;

		ring->vm_inv_eng = vm_inv_eng[vmhub]++;
626 627 628
		dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
			 ring->idx, ring->name, ring->vm_inv_eng,
			 ring->funcs->vmhub);
629 630
	}

631
	/* Engine 16 is used for KFD and 17 for GART flushes */
632
	for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
633
		BUG_ON(vm_inv_eng[i] > 16);
634

635 636 637 638 639 640 641 642 643 644
	if (adev->asic_type == CHIP_VEGA10) {
		r = gmc_v9_0_ecc_available(adev);
		if (r == 1) {
			DRM_INFO("ECC is active.\n");
		} else if (r == 0) {
			DRM_INFO("ECC is not present.\n");
		} else {
			DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
			return r;
		}
645 646
	}

A
Alex Xie 已提交
647 648 649 650 651 652
	return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
}

static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
					struct amdgpu_mc *mc)
{
653 654 655
	u64 base = 0;
	if (!amdgpu_sriov_vf(adev))
		base = mmhub_v1_0_get_fb_location(adev);
656 657
	amdgpu_device_vram_location(adev, &adev->mc, base);
	amdgpu_device_gart_location(adev, mc);
658 659 660 661 662
	/* base offset of vram pages */
	if (adev->flags & AMD_IS_APU)
		adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
	else
		adev->vm_manager.vram_base_offset = 0;
A
Alex Xie 已提交
663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
}

/**
 * gmc_v9_0_mc_init - initialize the memory controller driver params
 *
 * @adev: amdgpu_device pointer
 *
 * Look up the amount of vram, vram width, and decide how to place
 * vram and gart within the GPU's physical address space.
 * Returns 0 for success.
 */
static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
{
	u32 tmp;
	int chansize, numchan;
678
	int r;
A
Alex Xie 已提交
679

680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
	adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
	if (!adev->mc.vram_width) {
		/* hbm memory channel size */
		chansize = 128;

		tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
		tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
		tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
		switch (tmp) {
		case 0:
		default:
			numchan = 1;
			break;
		case 1:
			numchan = 2;
			break;
		case 2:
			numchan = 0;
			break;
		case 3:
			numchan = 4;
			break;
		case 4:
			numchan = 0;
			break;
		case 5:
			numchan = 8;
			break;
		case 6:
			numchan = 0;
			break;
		case 7:
			numchan = 16;
			break;
		case 8:
			numchan = 2;
			break;
		}
		adev->mc.vram_width = numchan * chansize;
A
Alex Xie 已提交
719 720 721 722
	}

	/* size in MB on si */
	adev->mc.mc_vram_size =
723
		adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
A
Alex Xie 已提交
724
	adev->mc.real_vram_size = adev->mc.mc_vram_size;
725 726 727 728 729 730 731 732

	if (!(adev->flags & AMD_IS_APU)) {
		r = amdgpu_device_resize_fb_bar(adev);
		if (r)
			return r;
	}
	adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
	adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
A
Alex Xie 已提交
733 734

	/* In case the PCI BAR is larger than the actual amount of vram */
735
	adev->mc.visible_vram_size = adev->mc.aper_size;
A
Alex Xie 已提交
736 737 738
	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
		adev->mc.visible_vram_size = adev->mc.real_vram_size;

739 740 741 742 743 744 745 746 747 748 749 750 751 752 753
	/* set the gart size */
	if (amdgpu_gart_size == -1) {
		switch (adev->asic_type) {
		case CHIP_VEGA10:  /* all engines support GPUVM */
		default:
			adev->mc.gart_size = 256ULL << 20;
			break;
		case CHIP_RAVEN:   /* DCE SG support */
			adev->mc.gart_size = 1024ULL << 20;
			break;
		}
	} else {
		adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
	}

A
Alex Xie 已提交
754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
	gmc_v9_0_vram_gtt_location(adev, &adev->mc);

	return 0;
}

static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
{
	int r;

	if (adev->gart.robj) {
		WARN(1, "VEGA10 PCIE GART already initialized\n");
		return 0;
	}
	/* Initialize common gart structure */
	r = amdgpu_gart_init(adev);
	if (r)
		return r;
	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
				 AMDGPU_PTE_EXECUTABLE;
	return amdgpu_gart_table_vram_alloc(adev);
}

static int gmc_v9_0_sw_init(void *handle)
{
	int r;
	int dma_bits;
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

783
	gfxhub_v1_0_init(adev);
784
	mmhub_v1_0_init(adev);
785

A
Alex Xie 已提交
786 787
	spin_lock_init(&adev->mc.invalidate_lock);

788 789
	switch (adev->asic_type) {
	case CHIP_RAVEN:
A
Alex Xie 已提交
790
		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
791
		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
792
			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
793 794 795 796 797 798
		} else {
			/* vm_size is 128TB + 512GB for legacy 3-level page support */
			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
			adev->mc.translate_further =
				adev->vm_manager.num_level > 1;
		}
799 800
		break;
	case CHIP_VEGA10:
A
Alex Xie 已提交
801 802
		/* XXX Don't know how to get VRAM type yet. */
		adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
803 804 805 806 807
		/*
		 * To fulfill 4-level page support,
		 * vm size is 256TB (48bit), maximum size of Vega10,
		 * block size 512 (9bit)
		 */
808
		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
809 810 811
		break;
	default:
		break;
A
Alex Xie 已提交
812 813 814 815 816
	}

	/* This interrupt is VMC page fault.*/
	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
				&adev->mc.vm_fault);
817 818
	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0,
				&adev->mc.vm_fault);
A
Alex Xie 已提交
819 820 821 822 823 824 825 826 827 828

	if (r)
		return r;

	/* Set the internal MC address mask
	 * This is the max address of the GPU's
	 * internal address space.
	 */
	adev->mc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */

829 830 831 832 833 834
	/*
	 * It needs to reserve 8M stolen memory for vega10
	 * TODO: Figure out how to avoid that...
	 */
	adev->mc.stolen_size = 8 * 1024 * 1024;

A
Alex Xie 已提交
835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852
	/* set DMA mask + need_dma32 flags.
	 * PCIE - can handle 44-bits.
	 * IGP - can handle 44-bits
	 * PCI - dma32 for legacy pci gart, 44 bits on vega10
	 */
	adev->need_dma32 = false;
	dma_bits = adev->need_dma32 ? 32 : 44;
	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
	if (r) {
		adev->need_dma32 = true;
		dma_bits = 32;
		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
	}
	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
	if (r) {
		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
		printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
	}
853
	adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
A
Alex Xie 已提交
854 855 856 857 858 859 860 861 862 863 864 865 866 867

	r = gmc_v9_0_mc_init(adev);
	if (r)
		return r;

	/* Memory manager */
	r = amdgpu_bo_init(adev);
	if (r)
		return r;

	r = gmc_v9_0_gart_init(adev);
	if (r)
		return r;

868 869 870 871 872 873 874 875 876 877 878 879
	/*
	 * number of VMs
	 * VMID 0 is reserved for System
	 * amdgpu graphics/compute will use VMIDs 1-7
	 * amdkfd will use VMIDs 8-15
	 */
	adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
	adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;

	amdgpu_vm_manager_init(adev);

	return 0;
A
Alex Xie 已提交
880 881 882
}

/**
883
 * gmc_v9_0_gart_fini - vm fini callback
A
Alex Xie 已提交
884 885 886 887 888 889 890 891 892 893 894 895 896 897 898
 *
 * @adev: amdgpu_device pointer
 *
 * Tears down the driver GART/VM setup (CIK).
 */
static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
{
	amdgpu_gart_table_vram_free(adev);
	amdgpu_gart_fini(adev);
}

static int gmc_v9_0_sw_fini(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

899
	amdgpu_gem_force_release(adev);
900
	amdgpu_vm_manager_fini(adev);
A
Alex Xie 已提交
901 902 903 904 905 906 907 908
	gmc_v9_0_gart_fini(adev);
	amdgpu_bo_fini(adev);

	return 0;
}

static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
909

A
Alex Xie 已提交
910 911
	switch (adev->asic_type) {
	case CHIP_VEGA10:
912
		soc15_program_register_sequence(adev,
913
						golden_settings_mmhub_1_0_0,
914
						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
915
		soc15_program_register_sequence(adev,
916
						golden_settings_athub_1_0_0,
917
						ARRAY_SIZE(golden_settings_athub_1_0_0));
A
Alex Xie 已提交
918
		break;
919
	case CHIP_RAVEN:
920
		soc15_program_register_sequence(adev,
921
						golden_settings_athub_1_0_0,
922
						ARRAY_SIZE(golden_settings_athub_1_0_0));
923
		break;
A
Alex Xie 已提交
924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939
	default:
		break;
	}
}

/**
 * gmc_v9_0_gart_enable - gart enable
 *
 * @adev: amdgpu_device pointer
 */
static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
{
	int r;
	bool value;
	u32 tmp;

940 941 942
	amdgpu_device_program_register_sequence(adev,
						golden_settings_vega10_hdp,
						ARRAY_SIZE(golden_settings_vega10_hdp));
A
Alex Xie 已提交
943 944 945 946 947

	if (adev->gart.robj == NULL) {
		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
		return -EINVAL;
	}
948 949 950
	r = amdgpu_gart_table_vram_pin(adev);
	if (r)
		return r;
A
Alex Xie 已提交
951

952 953 954
	switch (adev->asic_type) {
	case CHIP_RAVEN:
		mmhub_v1_0_initialize_power_gating(adev);
955
		mmhub_v1_0_update_power_gating(adev, true);
956 957 958 959 960
		break;
	default:
		break;
	}

A
Alex Xie 已提交
961 962 963 964 965 966 967 968
	r = gfxhub_v1_0_gart_enable(adev);
	if (r)
		return r;

	r = mmhub_v1_0_gart_enable(adev);
	if (r)
		return r;

969
	WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
A
Alex Xie 已提交
970

971 972
	tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
	WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
A
Alex Xie 已提交
973

974
	/* After HDP is initialized, flush HDP.*/
975
	adev->nbio_funcs->hdp_flush(adev);
976

A
Alex Xie 已提交
977 978 979 980 981 982 983 984 985 986
	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
		value = false;
	else
		value = true;

	gfxhub_v1_0_set_fault_enable_default(adev, value);
	mmhub_v1_0_set_fault_enable_default(adev, value);
	gmc_v9_0_gart_flush_gpu_tlb(adev, 0);

	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
987
		 (unsigned)(adev->mc.gart_size >> 20),
A
Alex Xie 已提交
988 989 990 991 992 993 994 995 996 997 998 999 1000
		 (unsigned long long)adev->gart.table_addr);
	adev->gart.ready = true;
	return 0;
}

static int gmc_v9_0_hw_init(void *handle)
{
	int r;
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	/* The sequence of these two function calls matters.*/
	gmc_v9_0_init_golden_registers(adev);

1001 1002
	if (adev->mode_info.num_crtc) {
		/* Lockout access through VGA aperture*/
1003
		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1004 1005

		/* disable VGA render */
1006
		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1007 1008
	}

A
Alex Xie 已提交
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
	r = gmc_v9_0_gart_enable(adev);

	return r;
}

/**
 * gmc_v9_0_gart_disable - gart disable
 *
 * @adev: amdgpu_device pointer
 *
 * This disables all VM page table.
 */
static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
{
	gfxhub_v1_0_gart_disable(adev);
	mmhub_v1_0_gart_disable(adev);
1025
	amdgpu_gart_table_vram_unpin(adev);
A
Alex Xie 已提交
1026 1027 1028 1029 1030 1031
}

static int gmc_v9_0_hw_fini(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

1032 1033 1034 1035 1036 1037
	if (amdgpu_sriov_vf(adev)) {
		/* full access mode, so don't touch any GMC register */
		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
		return 0;
	}

A
Alex Xie 已提交
1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
	gmc_v9_0_gart_disable(adev);

	return 0;
}

static int gmc_v9_0_suspend(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

1048
	return gmc_v9_0_hw_fini(adev);
A
Alex Xie 已提交
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
}

static int gmc_v9_0_resume(void *handle)
{
	int r;
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	r = gmc_v9_0_hw_init(adev);
	if (r)
		return r;

1060
	amdgpu_vmid_reset_all(adev);
A
Alex Xie 已提交
1061

1062
	return 0;
A
Alex Xie 已提交
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
}

static bool gmc_v9_0_is_idle(void *handle)
{
	/* MC is always ready in GMC v9.*/
	return true;
}

static int gmc_v9_0_wait_for_idle(void *handle)
{
	/* There is no need to wait for MC idle in GMC v9.*/
	return 0;
}

static int gmc_v9_0_soft_reset(void *handle)
{
	/* XXX for emulation.*/
	return 0;
}

static int gmc_v9_0_set_clockgating_state(void *handle,
					enum amd_clockgating_state state)
{
1086 1087 1088
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	return mmhub_v1_0_set_clockgating(adev, state);
A
Alex Xie 已提交
1089 1090
}

1091 1092 1093 1094 1095 1096 1097
static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	mmhub_v1_0_get_clockgating(adev, flags);
}

A
Alex Xie 已提交
1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
static int gmc_v9_0_set_powergating_state(void *handle,
					enum amd_powergating_state state)
{
	return 0;
}

const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
	.name = "gmc_v9_0",
	.early_init = gmc_v9_0_early_init,
	.late_init = gmc_v9_0_late_init,
	.sw_init = gmc_v9_0_sw_init,
	.sw_fini = gmc_v9_0_sw_fini,
	.hw_init = gmc_v9_0_hw_init,
	.hw_fini = gmc_v9_0_hw_fini,
	.suspend = gmc_v9_0_suspend,
	.resume = gmc_v9_0_resume,
	.is_idle = gmc_v9_0_is_idle,
	.wait_for_idle = gmc_v9_0_wait_for_idle,
	.soft_reset = gmc_v9_0_soft_reset,
	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
	.set_powergating_state = gmc_v9_0_set_powergating_state,
1119
	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
A
Alex Xie 已提交
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
};

const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
{
	.type = AMD_IP_BLOCK_TYPE_GMC,
	.major = 9,
	.minor = 0,
	.rev = 0,
	.funcs = &gmc_v9_0_ip_funcs,
};