gmc_v9_0.c 34.3 KB
Newer Older
A
Alex Xie 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright 2016 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */
#include <linux/firmware.h>
24
#include <drm/drm_cache.h>
A
Alex Xie 已提交
25 26
#include "amdgpu.h"
#include "gmc_v9_0.h"
27
#include "amdgpu_atomfirmware.h"
28
#include "amdgpu_gem.h"
A
Alex Xie 已提交
29

30 31
#include "hdp/hdp_4_0_offset.h"
#include "hdp/hdp_4_0_sh_mask.h"
32
#include "gc/gc_9_0_sh_mask.h"
33 34
#include "dce/dce_12_0_offset.h"
#include "dce/dce_12_0_sh_mask.h"
35
#include "vega10_enum.h"
36
#include "mmhub/mmhub_1_0_offset.h"
37
#include "athub/athub_1_0_offset.h"
38
#include "oss/osssys_4_0_offset.h"
A
Alex Xie 已提交
39

40
#include "soc15.h"
A
Alex Xie 已提交
41
#include "soc15_common.h"
42
#include "umc/umc_6_0_sh_mask.h"
A
Alex Xie 已提交
43 44 45

#include "gfxhub_v1_0.h"
#include "mmhub_v1_0.h"
46
#include "gfxhub_v1_1.h"
A
Alex Xie 已提交
47

48 49
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"

50 51 52 53 54 55 56 57
/* add these here since we already include dce12 headers and these are for DCN */
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L

A
Alex Xie 已提交
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
/* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
#define AMDGPU_NUM_OF_VMIDS			8

static const u32 golden_settings_vega10_hdp[] =
{
	0xf64, 0x0fffffff, 0x00000000,
	0xf65, 0x0fffffff, 0x00000000,
	0xf66, 0x0fffffff, 0x00000000,
	0xf67, 0x0fffffff, 0x00000000,
	0xf68, 0x0fffffff, 0x00000000,
	0xf6a, 0x0fffffff, 0x00000000,
	0xf6b, 0x0fffffff, 0x00000000,
	0xf6c, 0x0fffffff, 0x00000000,
	0xf6d, 0x0fffffff, 0x00000000,
	0xf6e, 0x0fffffff, 0x00000000,
};

75
static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
76
{
77 78
	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
79 80
};

81
static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
82
{
83 84
	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
85 86
};

87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
/* Ecc related register addresses, (BASE + reg offset) */
/* Universal Memory Controller caps (may be fused). */
/* UMCCH:UmcLocalCap */
#define UMCLOCALCAPS_ADDR0	(0x00014306 + 0x00000000)
#define UMCLOCALCAPS_ADDR1	(0x00014306 + 0x00000800)
#define UMCLOCALCAPS_ADDR2	(0x00014306 + 0x00001000)
#define UMCLOCALCAPS_ADDR3	(0x00014306 + 0x00001800)
#define UMCLOCALCAPS_ADDR4	(0x00054306 + 0x00000000)
#define UMCLOCALCAPS_ADDR5	(0x00054306 + 0x00000800)
#define UMCLOCALCAPS_ADDR6	(0x00054306 + 0x00001000)
#define UMCLOCALCAPS_ADDR7	(0x00054306 + 0x00001800)
#define UMCLOCALCAPS_ADDR8	(0x00094306 + 0x00000000)
#define UMCLOCALCAPS_ADDR9	(0x00094306 + 0x00000800)
#define UMCLOCALCAPS_ADDR10	(0x00094306 + 0x00001000)
#define UMCLOCALCAPS_ADDR11	(0x00094306 + 0x00001800)
#define UMCLOCALCAPS_ADDR12	(0x000d4306 + 0x00000000)
#define UMCLOCALCAPS_ADDR13	(0x000d4306 + 0x00000800)
#define UMCLOCALCAPS_ADDR14	(0x000d4306 + 0x00001000)
#define UMCLOCALCAPS_ADDR15	(0x000d4306 + 0x00001800)

/* Universal Memory Controller Channel config. */
/* UMCCH:UMC_CONFIG */
#define UMCCH_UMC_CONFIG_ADDR0	(0x00014040 + 0x00000000)
#define UMCCH_UMC_CONFIG_ADDR1	(0x00014040 + 0x00000800)
#define UMCCH_UMC_CONFIG_ADDR2	(0x00014040 + 0x00001000)
#define UMCCH_UMC_CONFIG_ADDR3	(0x00014040 + 0x00001800)
#define UMCCH_UMC_CONFIG_ADDR4	(0x00054040 + 0x00000000)
#define UMCCH_UMC_CONFIG_ADDR5	(0x00054040 + 0x00000800)
#define UMCCH_UMC_CONFIG_ADDR6	(0x00054040 + 0x00001000)
#define UMCCH_UMC_CONFIG_ADDR7	(0x00054040 + 0x00001800)
#define UMCCH_UMC_CONFIG_ADDR8	(0x00094040 + 0x00000000)
#define UMCCH_UMC_CONFIG_ADDR9	(0x00094040 + 0x00000800)
#define UMCCH_UMC_CONFIG_ADDR10	(0x00094040 + 0x00001000)
#define UMCCH_UMC_CONFIG_ADDR11	(0x00094040 + 0x00001800)
#define UMCCH_UMC_CONFIG_ADDR12	(0x000d4040 + 0x00000000)
#define UMCCH_UMC_CONFIG_ADDR13	(0x000d4040 + 0x00000800)
#define UMCCH_UMC_CONFIG_ADDR14	(0x000d4040 + 0x00001000)
#define UMCCH_UMC_CONFIG_ADDR15	(0x000d4040 + 0x00001800)

/* Universal Memory Controller Channel Ecc config. */
/* UMCCH:EccCtrl */
#define UMCCH_ECCCTRL_ADDR0	(0x00014053 + 0x00000000)
#define UMCCH_ECCCTRL_ADDR1	(0x00014053 + 0x00000800)
#define UMCCH_ECCCTRL_ADDR2	(0x00014053 + 0x00001000)
#define UMCCH_ECCCTRL_ADDR3	(0x00014053 + 0x00001800)
#define UMCCH_ECCCTRL_ADDR4	(0x00054053 + 0x00000000)
#define UMCCH_ECCCTRL_ADDR5	(0x00054053 + 0x00000800)
#define UMCCH_ECCCTRL_ADDR6	(0x00054053 + 0x00001000)
#define UMCCH_ECCCTRL_ADDR7	(0x00054053 + 0x00001800)
#define UMCCH_ECCCTRL_ADDR8	(0x00094053 + 0x00000000)
#define UMCCH_ECCCTRL_ADDR9	(0x00094053 + 0x00000800)
#define UMCCH_ECCCTRL_ADDR10	(0x00094053 + 0x00001000)
#define UMCCH_ECCCTRL_ADDR11	(0x00094053 + 0x00001800)
#define UMCCH_ECCCTRL_ADDR12	(0x000d4053 + 0x00000000)
#define UMCCH_ECCCTRL_ADDR13	(0x000d4053 + 0x00000800)
#define UMCCH_ECCCTRL_ADDR14	(0x000d4053 + 0x00001000)
#define UMCCH_ECCCTRL_ADDR15	(0x000d4053 + 0x00001800)

static const uint32_t ecc_umclocalcap_addrs[] = {
	UMCLOCALCAPS_ADDR0,
	UMCLOCALCAPS_ADDR1,
	UMCLOCALCAPS_ADDR2,
	UMCLOCALCAPS_ADDR3,
	UMCLOCALCAPS_ADDR4,
	UMCLOCALCAPS_ADDR5,
	UMCLOCALCAPS_ADDR6,
	UMCLOCALCAPS_ADDR7,
	UMCLOCALCAPS_ADDR8,
	UMCLOCALCAPS_ADDR9,
	UMCLOCALCAPS_ADDR10,
	UMCLOCALCAPS_ADDR11,
	UMCLOCALCAPS_ADDR12,
	UMCLOCALCAPS_ADDR13,
	UMCLOCALCAPS_ADDR14,
	UMCLOCALCAPS_ADDR15,
};

static const uint32_t ecc_umcch_umc_config_addrs[] = {
	UMCCH_UMC_CONFIG_ADDR0,
	UMCCH_UMC_CONFIG_ADDR1,
	UMCCH_UMC_CONFIG_ADDR2,
	UMCCH_UMC_CONFIG_ADDR3,
	UMCCH_UMC_CONFIG_ADDR4,
	UMCCH_UMC_CONFIG_ADDR5,
	UMCCH_UMC_CONFIG_ADDR6,
	UMCCH_UMC_CONFIG_ADDR7,
	UMCCH_UMC_CONFIG_ADDR8,
	UMCCH_UMC_CONFIG_ADDR9,
	UMCCH_UMC_CONFIG_ADDR10,
	UMCCH_UMC_CONFIG_ADDR11,
	UMCCH_UMC_CONFIG_ADDR12,
	UMCCH_UMC_CONFIG_ADDR13,
	UMCCH_UMC_CONFIG_ADDR14,
	UMCCH_UMC_CONFIG_ADDR15,
};

static const uint32_t ecc_umcch_eccctrl_addrs[] = {
	UMCCH_ECCCTRL_ADDR0,
	UMCCH_ECCCTRL_ADDR1,
	UMCCH_ECCCTRL_ADDR2,
	UMCCH_ECCCTRL_ADDR3,
	UMCCH_ECCCTRL_ADDR4,
	UMCCH_ECCCTRL_ADDR5,
	UMCCH_ECCCTRL_ADDR6,
	UMCCH_ECCCTRL_ADDR7,
	UMCCH_ECCCTRL_ADDR8,
	UMCCH_ECCCTRL_ADDR9,
	UMCCH_ECCCTRL_ADDR10,
	UMCCH_ECCCTRL_ADDR11,
	UMCCH_ECCCTRL_ADDR12,
	UMCCH_ECCCTRL_ADDR13,
	UMCCH_ECCCTRL_ADDR14,
	UMCCH_ECCCTRL_ADDR15,
};

A
Alex Xie 已提交
202 203 204 205 206 207
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
					struct amdgpu_irq_src *src,
					unsigned type,
					enum amdgpu_interrupt_state state)
{
	struct amdgpu_vmhub *hub;
208
	u32 tmp, reg, bits, i, j;
A
Alex Xie 已提交
209

210 211 212 213 214 215 216 217
	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;

A
Alex Xie 已提交
218 219
	switch (state) {
	case AMDGPU_IRQ_STATE_DISABLE:
220 221 222 223 224 225 226 227
		for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
			hub = &adev->vmhub[j];
			for (i = 0; i < 16; i++) {
				reg = hub->vm_context0_cntl + i;
				tmp = RREG32(reg);
				tmp &= ~bits;
				WREG32(reg, tmp);
			}
A
Alex Xie 已提交
228 229 230
		}
		break;
	case AMDGPU_IRQ_STATE_ENABLE:
231 232 233 234 235 236 237 238
		for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
			hub = &adev->vmhub[j];
			for (i = 0; i < 16; i++) {
				reg = hub->vm_context0_cntl + i;
				tmp = RREG32(reg);
				tmp |= bits;
				WREG32(reg, tmp);
			}
A
Alex Xie 已提交
239 240 241 242 243 244 245 246 247 248 249 250
		}
	default:
		break;
	}

	return 0;
}

static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
				struct amdgpu_irq_src *source,
				struct amdgpu_iv_entry *entry)
{
251
	struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
252
	uint32_t status = 0;
A
Alex Xie 已提交
253 254 255 256 257
	u64 addr;

	addr = (u64)entry->src_data[0] << 12;
	addr |= ((u64)entry->src_data[1] & 0xf) << 44;

258
	if (!amdgpu_sriov_vf(adev)) {
259 260
		status = RREG32(hub->vm_l2_pro_fault_status);
		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
261
	}
A
Alex Xie 已提交
262

263
	if (printk_ratelimit()) {
264 265 266 267
		struct amdgpu_task_info task_info = { 0 };

		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);

268
		dev_err(adev->dev,
269
			"[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n",
270 271
			entry->vmid_src ? "mmhub" : "gfxhub",
			entry->src_id, entry->ring_id, entry->vmid,
272 273
			entry->pasid, task_info.process_name, task_info.tgid,
			task_info.task_name, task_info.pid);
274
		dev_err(adev->dev, "  in page starting at address 0x%016llx from %d\n",
275 276 277 278 279
			addr, entry->client_id);
		if (!amdgpu_sriov_vf(adev))
			dev_err(adev->dev,
				"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
				status);
280
	}
A
Alex Xie 已提交
281 282 283 284 285 286 287 288 289 290 291

	return 0;
}

static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
	.set = gmc_v9_0_vm_fault_interrupt_state,
	.process = gmc_v9_0_process_interrupt,
};

static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
{
292 293
	adev->gmc.vm_fault.num_types = 1;
	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
A
Alex Xie 已提交
294 295
}

296
static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
297 298 299
{
	u32 req = 0;

300
	/* invalidate using legacy mode on vmid*/
301
	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
302
			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
303 304 305 306 307 308 309 310 311 312 313 314
	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);

	return req;
}

315
static signed long  amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
						  uint32_t reg0, uint32_t reg1,
						  uint32_t ref, uint32_t mask)
{
	signed long r, cnt = 0;
	unsigned long flags;
	uint32_t seq;
	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
	struct amdgpu_ring *ring = &kiq->ring;

	spin_lock_irqsave(&kiq->ring_lock, flags);

	amdgpu_ring_alloc(ring, 32);
	amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
					    ref, mask);
	amdgpu_fence_emit_polling(ring, &seq);
	amdgpu_ring_commit(ring);
	spin_unlock_irqrestore(&kiq->ring_lock, flags);

	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);

336 337
	/* don't wait anymore for IRQ context */
	if (r < 1 && in_interrupt())
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
		goto failed_kiq;

	might_sleep();

	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
	}

	if (cnt > MAX_KIQ_REG_TRY)
		goto failed_kiq;

	return 0;

failed_kiq:
	pr_err("failed to invalidate tlb with kiq\n");
	return r;
}

A
Alex Xie 已提交
357 358 359 360 361 362 363 364
/*
 * GART
 * VMID 0 is the physical GPU addresses as used by the kernel.
 * VMIDs 1-15 are used for userspace clients and are handled
 * by the amdgpu vm/hsa code.
 */

/**
365
 * gmc_v9_0_flush_gpu_tlb - gart tlb flush callback
A
Alex Xie 已提交
366 367 368 369 370 371
 *
 * @adev: amdgpu_device pointer
 * @vmid: vm instance to flush
 *
 * Flush the TLB for the requested page table.
 */
372
static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
A
Alex Xie 已提交
373 374 375 376 377
					uint32_t vmid)
{
	/* Use register 17 for GART */
	const unsigned eng = 17;
	unsigned i, j;
378
	int r;
A
Alex Xie 已提交
379 380 381

	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
		struct amdgpu_vmhub *hub = &adev->vmhub[i];
382
		u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
A
Alex Xie 已提交
383

384
		if (adev->gfx.kiq.ring.ready &&
385 386
		    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
		    !adev->in_gpu_reset) {
387 388 389 390 391
			r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng,
				hub->vm_inv_eng0_ack + eng, tmp, 1 << vmid);
			if (!r)
				continue;
		}
392 393 394

		spin_lock(&adev->gmc.invalidate_lock);

395
		WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
A
Alex Xie 已提交
396 397 398

		/* Busy wait for ACK.*/
		for (j = 0; j < 100; j++) {
399
			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
A
Alex Xie 已提交
400 401 402 403 404
			tmp &= 1 << vmid;
			if (tmp)
				break;
			cpu_relax();
		}
405 406
		if (j < 100) {
			spin_unlock(&adev->gmc.invalidate_lock);
A
Alex Xie 已提交
407
			continue;
408
		}
A
Alex Xie 已提交
409 410 411

		/* Wait for ACK with a delay.*/
		for (j = 0; j < adev->usec_timeout; j++) {
412
			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
A
Alex Xie 已提交
413 414 415 416 417
			tmp &= 1 << vmid;
			if (tmp)
				break;
			udelay(1);
		}
418 419
		if (j < adev->usec_timeout) {
			spin_unlock(&adev->gmc.invalidate_lock);
A
Alex Xie 已提交
420
			continue;
421 422
		}
		spin_unlock(&adev->gmc.invalidate_lock);
A
Alex Xie 已提交
423 424 425 426
		DRM_ERROR("Timeout waiting for VM flush ACK!\n");
	}
}

427
static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
428
					    unsigned vmid, uint64_t pd_addr)
429
{
430 431
	struct amdgpu_device *adev = ring->adev;
	struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
432 433 434 435 436 437 438 439 440
	uint32_t req = gmc_v9_0_get_invalidate_req(vmid);
	unsigned eng = ring->vm_inv_eng;

	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
			      lower_32_bits(pd_addr));

	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
			      upper_32_bits(pd_addr));

441 442 443
	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
					    hub->vm_inv_eng0_ack + eng,
					    req, 1 << vmid);
444

445 446 447
	return pd_addr;
}

448 449 450 451 452 453 454 455 456 457 458 459 460 461
static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
					unsigned pasid)
{
	struct amdgpu_device *adev = ring->adev;
	uint32_t reg;

	if (ring->funcs->vmhub == AMDGPU_GFXHUB)
		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
	else
		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;

	amdgpu_ring_emit_wreg(ring, reg, pasid);
}

A
Alex Xie 已提交
462
/**
463
 * gmc_v9_0_set_pte_pde - update the page tables using MMIO
A
Alex Xie 已提交
464 465 466 467 468 469 470 471 472
 *
 * @adev: amdgpu_device pointer
 * @cpu_pt_addr: cpu address of the page table
 * @gpu_page_idx: entry in the page table to update
 * @addr: dst addr to write into pte/pde
 * @flags: access flags
 *
 * Update the page tables using the CPU.
 */
473 474 475
static int gmc_v9_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
				uint32_t gpu_page_idx, uint64_t addr,
				uint64_t flags)
A
Alex Xie 已提交
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
{
	void __iomem *ptr = (void *)cpu_pt_addr;
	uint64_t value;

	/*
	 * PTE format on VEGA 10:
	 * 63:59 reserved
	 * 58:57 mtype
	 * 56 F
	 * 55 L
	 * 54 P
	 * 53 SW
	 * 52 T
	 * 50:48 reserved
	 * 47:12 4k physical page base address
	 * 11:7 fragment
	 * 6 write
	 * 5 read
	 * 4 exe
	 * 3 Z
	 * 2 snooped
	 * 1 system
	 * 0 valid
	 *
	 * PDE format on VEGA 10:
	 * 63:59 block fragment size
	 * 58:55 reserved
	 * 54 P
	 * 53:48 reserved
	 * 47:6 physical base address of PD or PTE
	 * 5:3 reserved
	 * 2 C
	 * 1 system
	 * 0 valid
	 */

	/*
	 * The following is for PTE only. GART does not have PDEs.
	*/
	value = addr & 0x0000FFFFFFFFF000ULL;
	value |= flags;
	writeq(value, ptr + (gpu_page_idx * 8));
	return 0;
}

static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
						uint32_t flags)

{
	uint64_t pte_flag = 0;

	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
		pte_flag |= AMDGPU_PTE_EXECUTABLE;
	if (flags & AMDGPU_VM_PAGE_READABLE)
		pte_flag |= AMDGPU_PTE_READABLE;
	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
		pte_flag |= AMDGPU_PTE_WRITEABLE;

	switch (flags & AMDGPU_VM_MTYPE_MASK) {
	case AMDGPU_VM_MTYPE_DEFAULT:
		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
		break;
	case AMDGPU_VM_MTYPE_NC:
		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
		break;
	case AMDGPU_VM_MTYPE_WC:
		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
		break;
	case AMDGPU_VM_MTYPE_CC:
		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
		break;
	case AMDGPU_VM_MTYPE_UC:
		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
		break;
	default:
		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
		break;
	}

	if (flags & AMDGPU_VM_PAGE_PRT)
		pte_flag |= AMDGPU_PTE_PRT;

	return pte_flag;
}

561 562
static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
				uint64_t *addr, uint64_t *flags)
A
Alex Xie 已提交
563
{
564
	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
565
		*addr = adev->vm_manager.vram_base_offset + *addr -
566
			adev->gmc.vram_start;
567
	BUG_ON(*addr & 0xFFFF00000000003FULL);
568

569
	if (!adev->gmc.translate_further)
570 571 572 573 574 575 576 577 578 579 580 581 582
		return;

	if (level == AMDGPU_VM_PDB1) {
		/* Set the block fragment size */
		if (!(*flags & AMDGPU_PDE_PTE))
			*flags |= AMDGPU_PDE_BFS(0x9);

	} else if (level == AMDGPU_VM_PDB0) {
		if (*flags & AMDGPU_PDE_PTE)
			*flags &= ~AMDGPU_PDE_PTE;
		else
			*flags |= AMDGPU_PTE_TF;
	}
A
Alex Xie 已提交
583 584
}

585 586
static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
587
	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
588
	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
589
	.set_pte_pde = gmc_v9_0_set_pte_pde,
590 591
	.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
	.get_vm_pde = gmc_v9_0_get_vm_pde
A
Alex Xie 已提交
592 593
};

594
static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
A
Alex Xie 已提交
595
{
596 597
	if (adev->gmc.gmc_funcs == NULL)
		adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
A
Alex Xie 已提交
598 599 600 601 602 603
}

static int gmc_v9_0_early_init(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

604
	gmc_v9_0_set_gmc_funcs(adev);
A
Alex Xie 已提交
605 606
	gmc_v9_0_set_irq_funcs(adev);

607 608 609
	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
	adev->gmc.shared_aperture_end =
		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
610
	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
611 612
	adev->gmc.private_aperture_end =
		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
613

A
Alex Xie 已提交
614 615 616
	return 0;
}

617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
static int gmc_v9_0_ecc_available(struct amdgpu_device *adev)
{
	uint32_t reg_val;
	uint32_t reg_addr;
	uint32_t field_val;
	size_t i;
	uint32_t fv2;
	size_t lost_sheep;

	DRM_DEBUG("ecc: gmc_v9_0_ecc_available()\n");

	lost_sheep = 0;
	for (i = 0; i < ARRAY_SIZE(ecc_umclocalcap_addrs); ++i) {
		reg_addr = ecc_umclocalcap_addrs[i];
		DRM_DEBUG("ecc: "
			  "UMCCH_UmcLocalCap[%zu]: reg_addr: 0x%08x\n",
			  i, reg_addr);
		reg_val = RREG32(reg_addr);
		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UmcLocalCap,
					  EccDis);
		DRM_DEBUG("ecc: "
			  "reg_val: 0x%08x, "
			  "EccDis: 0x%08x, ",
			  reg_val, field_val);
		if (field_val) {
			DRM_ERROR("ecc: UmcLocalCap:EccDis is set.\n");
			++lost_sheep;
		}
	}

	for (i = 0; i < ARRAY_SIZE(ecc_umcch_umc_config_addrs); ++i) {
		reg_addr = ecc_umcch_umc_config_addrs[i];
		DRM_DEBUG("ecc: "
			  "UMCCH0_0_UMC_CONFIG[%zu]: reg_addr: 0x%08x",
			  i, reg_addr);
		reg_val = RREG32(reg_addr);
		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UMC_CONFIG,
					  DramReady);
		DRM_DEBUG("ecc: "
			  "reg_val: 0x%08x, "
			  "DramReady: 0x%08x\n",
			  reg_val, field_val);

		if (!field_val) {
			DRM_ERROR("ecc: UMC_CONFIG:DramReady is not set.\n");
			++lost_sheep;
		}
	}

	for (i = 0; i < ARRAY_SIZE(ecc_umcch_eccctrl_addrs); ++i) {
		reg_addr = ecc_umcch_eccctrl_addrs[i];
		DRM_DEBUG("ecc: "
			  "UMCCH_EccCtrl[%zu]: reg_addr: 0x%08x, ",
			  i, reg_addr);
		reg_val = RREG32(reg_addr);
		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
					  WrEccEn);
		fv2 = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
				    RdEccEn);
		DRM_DEBUG("ecc: "
			  "reg_val: 0x%08x, "
			  "WrEccEn: 0x%08x, "
			  "RdEccEn: 0x%08x\n",
			  reg_val, field_val, fv2);

		if (!field_val) {
683
			DRM_DEBUG("ecc: WrEccEn is not set\n");
684 685 686
			++lost_sheep;
		}
		if (!fv2) {
687
			DRM_DEBUG("ecc: RdEccEn is not set\n");
688 689 690 691 692 693 694 695
			++lost_sheep;
		}
	}

	DRM_DEBUG("ecc: lost_sheep: %zu\n", lost_sheep);
	return lost_sheep == 0;
}

696 697 698 699 700 701 702 703 704 705 706 707 708
static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
{

	/*
	 * TODO:
	 * Currently there is a bug where some memory client outside
	 * of the driver writes to first 8M of VRAM on S3 resume,
	 * this overrides GART which by default gets placed in first 8M and
	 * causes VM_FAULTS once GTT is accessed.
	 * Keep the stolen memory reservation until the while this is not solved.
	 * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
	 */
	switch (adev->asic_type) {
709 710
	case CHIP_VEGA10:
		return true;
711 712 713 714
	case CHIP_RAVEN:
	case CHIP_VEGA12:
	case CHIP_VEGA20:
	default:
715
		return false;
716 717 718
	}
}

A
Alex Xie 已提交
719 720 721
static int gmc_v9_0_late_init(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
722 723 724 725 726 727 728 729 730 731
	/*
	 * The latest engine allocation on gfx9 is:
	 * Engine 0, 1: idle
	 * Engine 2, 3: firmware
	 * Engine 4~13: amdgpu ring, subject to change when ring number changes
	 * Engine 14~15: idle
	 * Engine 16: kfd tlb invalidation
	 * Engine 17: Gart flushes
	 */
	unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
732
	unsigned i;
733
	int r;
734

735 736
	if (!gmc_v9_0_keep_stolen_memory(adev))
		amdgpu_bo_late_init(adev);
737

738 739 740 741 742
	for(i = 0; i < adev->num_rings; ++i) {
		struct amdgpu_ring *ring = adev->rings[i];
		unsigned vmhub = ring->funcs->vmhub;

		ring->vm_inv_eng = vm_inv_eng[vmhub]++;
743 744 745
		dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
			 ring->idx, ring->name, ring->vm_inv_eng,
			 ring->funcs->vmhub);
746 747
	}

748
	/* Engine 16 is used for KFD and 17 for GART flushes */
749
	for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
750
		BUG_ON(vm_inv_eng[i] > 16);
751

752
	if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
753 754 755 756 757
		r = gmc_v9_0_ecc_available(adev);
		if (r == 1) {
			DRM_INFO("ECC is active.\n");
		} else if (r == 0) {
			DRM_INFO("ECC is not present.\n");
758
			adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
759 760 761 762
		} else {
			DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
			return r;
		}
763 764
	}

765
	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
A
Alex Xie 已提交
766 767 768
}

static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
769
					struct amdgpu_gmc *mc)
A
Alex Xie 已提交
770
{
771 772 773
	u64 base = 0;
	if (!amdgpu_sriov_vf(adev))
		base = mmhub_v1_0_get_fb_location(adev);
774 775
	/* add the xgmi offset of the physical node */
	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
776 777
	amdgpu_gmc_vram_location(adev, &adev->gmc, base);
	amdgpu_gmc_gart_location(adev, mc);
778 779
	if (!amdgpu_sriov_vf(adev))
		amdgpu_gmc_agp_location(adev, mc);
780
	/* base offset of vram pages */
781
	adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
782 783 784 785

	/* XXX: add the xgmi offset of the physical node? */
	adev->vm_manager.vram_base_offset +=
		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
A
Alex Xie 已提交
786 787 788 789 790 791 792 793 794 795 796 797 798 799
}

/**
 * gmc_v9_0_mc_init - initialize the memory controller driver params
 *
 * @adev: amdgpu_device pointer
 *
 * Look up the amount of vram, vram width, and decide how to place
 * vram and gart within the GPU's physical address space.
 * Returns 0 for success.
 */
static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
{
	int chansize, numchan;
800
	int r;
A
Alex Xie 已提交
801

802 803
	if (amdgpu_emu_mode != 1)
		adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
804
	if (!adev->gmc.vram_width) {
805
		/* hbm memory channel size */
806 807 808 809
		if (adev->flags & AMD_IS_APU)
			chansize = 64;
		else
			chansize = 128;
810

811
		numchan = adev->df_funcs->get_hbm_channel_number(adev);
812
		adev->gmc.vram_width = numchan * chansize;
A
Alex Xie 已提交
813 814 815
	}

	/* size in MB on si */
816
	adev->gmc.mc_vram_size =
817
		adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
818
	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
819 820 821 822 823 824

	if (!(adev->flags & AMD_IS_APU)) {
		r = amdgpu_device_resize_fb_bar(adev);
		if (r)
			return r;
	}
825 826
	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
A
Alex Xie 已提交
827

828 829 830 831 832 833
#ifdef CONFIG_X86_64
	if (adev->flags & AMD_IS_APU) {
		adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
		adev->gmc.aper_size = adev->gmc.real_vram_size;
	}
#endif
A
Alex Xie 已提交
834
	/* In case the PCI BAR is larger than the actual amount of vram */
835 836 837
	adev->gmc.visible_vram_size = adev->gmc.aper_size;
	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
A
Alex Xie 已提交
838

839 840 841 842
	/* set the gart size */
	if (amdgpu_gart_size == -1) {
		switch (adev->asic_type) {
		case CHIP_VEGA10:  /* all engines support GPUVM */
843
		case CHIP_VEGA12:  /* all engines support GPUVM */
F
Feifei Xu 已提交
844
		case CHIP_VEGA20:
845
		default:
846
			adev->gmc.gart_size = 512ULL << 20;
847 848
			break;
		case CHIP_RAVEN:   /* DCE SG support */
849
			adev->gmc.gart_size = 1024ULL << 20;
850 851 852
			break;
		}
	} else {
853
		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
854 855
	}

856
	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
A
Alex Xie 已提交
857 858 859 860 861 862 863 864

	return 0;
}

static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
{
	int r;

865
	if (adev->gart.bo) {
A
Alex Xie 已提交
866 867 868 869 870 871 872 873 874 875 876 877 878
		WARN(1, "VEGA10 PCIE GART already initialized\n");
		return 0;
	}
	/* Initialize common gart structure */
	r = amdgpu_gart_init(adev);
	if (r)
		return r;
	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
				 AMDGPU_PTE_EXECUTABLE;
	return amdgpu_gart_table_vram_alloc(adev);
}

879 880 881 882 883
static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
{
	u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
	unsigned size;

884 885 886 887
	/*
	 * TODO Remove once GART corruption is resolved
	 * Check related code in gmc_v9_0_sw_fini
	 * */
888 889
	if (gmc_v9_0_keep_stolen_memory(adev))
		return 9 * 1024 * 1024;
890

891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906
	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
	} else {
		u32 viewport;

		switch (adev->asic_type) {
		case CHIP_RAVEN:
			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
			size = (REG_GET_FIELD(viewport,
					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
				REG_GET_FIELD(viewport,
					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
				4);
			break;
		case CHIP_VEGA10:
		case CHIP_VEGA12:
907
		case CHIP_VEGA20:
908 909 910 911 912 913 914 915 916 917 918
		default:
			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
				4);
			break;
		}
	}
	/* return 0 if the pre-OS buffer uses up most of vram */
	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
		return 0;
919

920 921 922
	return size;
}

A
Alex Xie 已提交
923 924 925 926 927 928
static int gmc_v9_0_sw_init(void *handle)
{
	int r;
	int dma_bits;
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

929
	gfxhub_v1_0_init(adev);
930
	mmhub_v1_0_init(adev);
931

932
	spin_lock_init(&adev->gmc.invalidate_lock);
A
Alex Xie 已提交
933

934
	adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
935 936
	switch (adev->asic_type) {
	case CHIP_RAVEN:
937
		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
938
			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
939 940 941
		} else {
			/* vm_size is 128TB + 512GB for legacy 3-level page support */
			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
942
			adev->gmc.translate_further =
943 944
				adev->vm_manager.num_level > 1;
		}
945 946
		break;
	case CHIP_VEGA10:
947
	case CHIP_VEGA12:
F
Feifei Xu 已提交
948
	case CHIP_VEGA20:
949 950 951 952 953
		/*
		 * To fulfill 4-level page support,
		 * vm size is 256TB (48bit), maximum size of Vega10,
		 * block size 512 (9bit)
		 */
954
		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
955 956 957
		break;
	default:
		break;
A
Alex Xie 已提交
958 959 960
	}

	/* This interrupt is VMC page fault.*/
961
	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
962
				&adev->gmc.vm_fault);
963
	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
964
				&adev->gmc.vm_fault);
A
Alex Xie 已提交
965 966 967 968 969 970 971 972

	if (r)
		return r;

	/* Set the internal MC address mask
	 * This is the max address of the GPU's
	 * internal address space.
	 */
973
	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
A
Alex Xie 已提交
974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992

	/* set DMA mask + need_dma32 flags.
	 * PCIE - can handle 44-bits.
	 * IGP - can handle 44-bits
	 * PCI - dma32 for legacy pci gart, 44 bits on vega10
	 */
	adev->need_dma32 = false;
	dma_bits = adev->need_dma32 ? 32 : 44;
	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
	if (r) {
		adev->need_dma32 = true;
		dma_bits = 32;
		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
	}
	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
	if (r) {
		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
		printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
	}
993
	adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
A
Alex Xie 已提交
994

995 996 997 998 999 1000
	if (adev->asic_type == CHIP_VEGA20) {
		r = gfxhub_v1_1_get_xgmi_info(adev);
		if (r)
			return r;
	}

A
Alex Xie 已提交
1001 1002 1003 1004
	r = gmc_v9_0_mc_init(adev);
	if (r)
		return r;

1005 1006
	adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);

A
Alex Xie 已提交
1007 1008 1009 1010 1011 1012 1013 1014 1015
	/* Memory manager */
	r = amdgpu_bo_init(adev);
	if (r)
		return r;

	r = gmc_v9_0_gart_init(adev);
	if (r)
		return r;

1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
	/*
	 * number of VMs
	 * VMID 0 is reserved for System
	 * amdgpu graphics/compute will use VMIDs 1-7
	 * amdkfd will use VMIDs 8-15
	 */
	adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
	adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;

	amdgpu_vm_manager_init(adev);

	return 0;
A
Alex Xie 已提交
1028 1029 1030 1031 1032 1033
}

static int gmc_v9_0_sw_fini(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

1034
	amdgpu_gem_force_release(adev);
1035
	amdgpu_vm_manager_fini(adev);
1036

1037 1038
	if (gmc_v9_0_keep_stolen_memory(adev))
		amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
1039

1040
	amdgpu_gart_table_vram_free(adev);
A
Alex Xie 已提交
1041
	amdgpu_bo_fini(adev);
1042
	amdgpu_gart_fini(adev);
A
Alex Xie 已提交
1043 1044 1045 1046 1047 1048

	return 0;
}

static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
1049

A
Alex Xie 已提交
1050 1051
	switch (adev->asic_type) {
	case CHIP_VEGA10:
F
Feifei Xu 已提交
1052
	case CHIP_VEGA20:
1053
		soc15_program_register_sequence(adev,
1054
						golden_settings_mmhub_1_0_0,
1055
						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1056
		soc15_program_register_sequence(adev,
1057
						golden_settings_athub_1_0_0,
1058
						ARRAY_SIZE(golden_settings_athub_1_0_0));
A
Alex Xie 已提交
1059
		break;
1060 1061
	case CHIP_VEGA12:
		break;
1062
	case CHIP_RAVEN:
1063
		soc15_program_register_sequence(adev,
1064
						golden_settings_athub_1_0_0,
1065
						ARRAY_SIZE(golden_settings_athub_1_0_0));
1066
		break;
A
Alex Xie 已提交
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
	default:
		break;
	}
}

/**
 * gmc_v9_0_gart_enable - gart enable
 *
 * @adev: amdgpu_device pointer
 */
static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
{
	int r;
	bool value;
	u32 tmp;

1083 1084 1085
	amdgpu_device_program_register_sequence(adev,
						golden_settings_vega10_hdp,
						ARRAY_SIZE(golden_settings_vega10_hdp));
A
Alex Xie 已提交
1086

1087
	if (adev->gart.bo == NULL) {
A
Alex Xie 已提交
1088 1089 1090
		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
		return -EINVAL;
	}
1091 1092 1093
	r = amdgpu_gart_table_vram_pin(adev);
	if (r)
		return r;
A
Alex Xie 已提交
1094

1095 1096
	switch (adev->asic_type) {
	case CHIP_RAVEN:
1097
		mmhub_v1_0_update_power_gating(adev, true);
1098 1099 1100 1101 1102
		break;
	default:
		break;
	}

A
Alex Xie 已提交
1103 1104 1105 1106 1107 1108 1109 1110
	r = gfxhub_v1_0_gart_enable(adev);
	if (r)
		return r;

	r = mmhub_v1_0_gart_enable(adev);
	if (r)
		return r;

1111
	WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
A
Alex Xie 已提交
1112

1113 1114
	tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
	WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
A
Alex Xie 已提交
1115

1116
	/* After HDP is initialized, flush HDP.*/
1117
	adev->nbio_funcs->hdp_flush(adev, NULL);
1118

A
Alex Xie 已提交
1119 1120 1121 1122 1123 1124 1125
	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
		value = false;
	else
		value = true;

	gfxhub_v1_0_set_fault_enable_default(adev, value);
	mmhub_v1_0_set_fault_enable_default(adev, value);
1126
	gmc_v9_0_flush_gpu_tlb(adev, 0);
A
Alex Xie 已提交
1127 1128

	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1129
		 (unsigned)(adev->gmc.gart_size >> 20),
1130
		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
A
Alex Xie 已提交
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
	adev->gart.ready = true;
	return 0;
}

static int gmc_v9_0_hw_init(void *handle)
{
	int r;
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	/* The sequence of these two function calls matters.*/
	gmc_v9_0_init_golden_registers(adev);

1143 1144
	if (adev->mode_info.num_crtc) {
		/* Lockout access through VGA aperture*/
1145
		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1146 1147

		/* disable VGA render */
1148
		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1149 1150
	}

A
Alex Xie 已提交
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
	r = gmc_v9_0_gart_enable(adev);

	return r;
}

/**
 * gmc_v9_0_gart_disable - gart disable
 *
 * @adev: amdgpu_device pointer
 *
 * This disables all VM page table.
 */
static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
{
	gfxhub_v1_0_gart_disable(adev);
	mmhub_v1_0_gart_disable(adev);
1167
	amdgpu_gart_table_vram_unpin(adev);
A
Alex Xie 已提交
1168 1169 1170 1171 1172 1173
}

static int gmc_v9_0_hw_fini(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

1174 1175 1176 1177 1178 1179
	if (amdgpu_sriov_vf(adev)) {
		/* full access mode, so don't touch any GMC register */
		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
		return 0;
	}

1180
	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
A
Alex Xie 已提交
1181 1182 1183 1184 1185 1186 1187 1188 1189
	gmc_v9_0_gart_disable(adev);

	return 0;
}

static int gmc_v9_0_suspend(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

1190
	return gmc_v9_0_hw_fini(adev);
A
Alex Xie 已提交
1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201
}

static int gmc_v9_0_resume(void *handle)
{
	int r;
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	r = gmc_v9_0_hw_init(adev);
	if (r)
		return r;

1202
	amdgpu_vmid_reset_all(adev);
A
Alex Xie 已提交
1203

1204
	return 0;
A
Alex Xie 已提交
1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
}

static bool gmc_v9_0_is_idle(void *handle)
{
	/* MC is always ready in GMC v9.*/
	return true;
}

static int gmc_v9_0_wait_for_idle(void *handle)
{
	/* There is no need to wait for MC idle in GMC v9.*/
	return 0;
}

static int gmc_v9_0_soft_reset(void *handle)
{
	/* XXX for emulation.*/
	return 0;
}

static int gmc_v9_0_set_clockgating_state(void *handle,
					enum amd_clockgating_state state)
{
1228 1229 1230
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	return mmhub_v1_0_set_clockgating(adev, state);
A
Alex Xie 已提交
1231 1232
}

1233 1234 1235 1236 1237 1238 1239
static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

	mmhub_v1_0_get_clockgating(adev, flags);
}

A
Alex Xie 已提交
1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260
static int gmc_v9_0_set_powergating_state(void *handle,
					enum amd_powergating_state state)
{
	return 0;
}

const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
	.name = "gmc_v9_0",
	.early_init = gmc_v9_0_early_init,
	.late_init = gmc_v9_0_late_init,
	.sw_init = gmc_v9_0_sw_init,
	.sw_fini = gmc_v9_0_sw_fini,
	.hw_init = gmc_v9_0_hw_init,
	.hw_fini = gmc_v9_0_hw_fini,
	.suspend = gmc_v9_0_suspend,
	.resume = gmc_v9_0_resume,
	.is_idle = gmc_v9_0_is_idle,
	.wait_for_idle = gmc_v9_0_wait_for_idle,
	.soft_reset = gmc_v9_0_soft_reset,
	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
	.set_powergating_state = gmc_v9_0_set_powergating_state,
1261
	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
A
Alex Xie 已提交
1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
};

const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
{
	.type = AMD_IP_BLOCK_TYPE_GMC,
	.major = 9,
	.minor = 0,
	.rev = 0,
	.funcs = &gmc_v9_0_ip_funcs,
};