sdma_v3_0.c 50.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
/*
 * Copyright 2014 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Alex Deucher
 */
#include <linux/firmware.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
#include "vi.h"
#include "vid.h"

#include "oss/oss_3_0_d.h"
#include "oss/oss_3_0_sh_mask.h"

#include "gmc/gmc_8_1_d.h"
#include "gmc/gmc_8_1_sh_mask.h"

#include "gca/gfx_8_0_d.h"
39
#include "gca/gfx_8_0_enum.h"
40 41 42 43 44 45 46 47 48 49 50 51
#include "gca/gfx_8_0_sh_mask.h"

#include "bif/bif_5_0_d.h"
#include "bif/bif_5_0_sh_mask.h"

#include "tonga_sdma_pkt_open.h"

static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev);

52 53 54 55
MODULE_FIRMWARE("amdgpu/tonga_sdma.bin");
MODULE_FIRMWARE("amdgpu/tonga_sdma1.bin");
MODULE_FIRMWARE("amdgpu/carrizo_sdma.bin");
MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin");
56 57
MODULE_FIRMWARE("amdgpu/fiji_sdma.bin");
MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin");
58
MODULE_FIRMWARE("amdgpu/stoney_sdma.bin");
59 60 61 62
MODULE_FIRMWARE("amdgpu/polaris10_sdma.bin");
MODULE_FIRMWARE("amdgpu/polaris10_sdma1.bin");
MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin");
MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin");
63 64
MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin");
MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin");
65

66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92

static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
{
	SDMA0_REGISTER_OFFSET,
	SDMA1_REGISTER_OFFSET
};

static const u32 golden_settings_tonga_a11[] =
{
	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
	mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
	mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
	mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
	mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
	mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
	mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
	mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
	mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
	mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
};

static const u32 tonga_mgcg_cgcg_init[] =
{
	mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
	mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
};

93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
static const u32 golden_settings_fiji_a10[] =
{
	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
	mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
	mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
	mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
	mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
	mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
	mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
	mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
};

static const u32 fiji_mgcg_cgcg_init[] =
{
	mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
	mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
};

111
static const u32 golden_settings_polaris11_a11[] =
112 113
{
	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
114
	mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
115 116 117 118
	mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
	mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
	mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
	mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
119
	mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
120 121 122 123 124
	mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
	mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
	mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
};

125
static const u32 golden_settings_polaris10_a11[] =
126 127 128 129 130 131 132 133 134 135 136 137 138
{
	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
	mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
	mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
	mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
	mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
	mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
	mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
	mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
	mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
	mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
};

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
static const u32 cz_golden_settings_a11[] =
{
	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
	mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
	mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100,
	mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800,
	mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100,
	mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100,
	mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
	mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
	mmSDMA1_GFX_IB_CNTL, 0x00000100, 0x00000100,
	mmSDMA1_POWER_CNTL, 0x00000800, 0x0003c800,
	mmSDMA1_RLC0_IB_CNTL, 0x00000100, 0x00000100,
	mmSDMA1_RLC1_IB_CNTL, 0x00000100, 0x00000100,
};

static const u32 cz_mgcg_cgcg_init[] =
{
	mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
	mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
};

161 162 163 164 165 166 167 168 169 170 171 172 173
static const u32 stoney_golden_settings_a11[] =
{
	mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100,
	mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800,
	mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100,
	mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100,
};

static const u32 stoney_mgcg_cgcg_init[] =
{
	mmSDMA0_CLK_CTRL, 0xffffffff, 0x00000100,
};

174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
/*
 * sDMA - System DMA
 * Starting with CIK, the GPU has new asynchronous
 * DMA engines.  These engines are used for compute
 * and gfx.  There are two DMA engines (SDMA0, SDMA1)
 * and each one supports 1 ring buffer used for gfx
 * and 2 queues used for compute.
 *
 * The programming model is very similar to the CP
 * (ring buffer, IBs, etc.), but sDMA has it's own
 * packet format that is different from the PM4 format
 * used by the CP. sDMA supports copying data, writing
 * embedded data, solid fills, and a number of other
 * things.  It also has support for tiling/detiling of
 * buffers.
 */

static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
{
	switch (adev->asic_type) {
194
	case CHIP_FIJI:
195 196 197 198 199 200
		amdgpu_device_program_register_sequence(adev,
							fiji_mgcg_cgcg_init,
							ARRAY_SIZE(fiji_mgcg_cgcg_init));
		amdgpu_device_program_register_sequence(adev,
							golden_settings_fiji_a10,
							ARRAY_SIZE(golden_settings_fiji_a10));
201
		break;
202
	case CHIP_TONGA:
203 204 205 206 207 208
		amdgpu_device_program_register_sequence(adev,
							tonga_mgcg_cgcg_init,
							ARRAY_SIZE(tonga_mgcg_cgcg_init));
		amdgpu_device_program_register_sequence(adev,
							golden_settings_tonga_a11,
							ARRAY_SIZE(golden_settings_tonga_a11));
209
		break;
210
	case CHIP_POLARIS11:
211
	case CHIP_POLARIS12:
212 213 214
		amdgpu_device_program_register_sequence(adev,
							golden_settings_polaris11_a11,
							ARRAY_SIZE(golden_settings_polaris11_a11));
215
		break;
216
	case CHIP_POLARIS10:
217 218 219
		amdgpu_device_program_register_sequence(adev,
							golden_settings_polaris10_a11,
							ARRAY_SIZE(golden_settings_polaris10_a11));
220
		break;
221
	case CHIP_CARRIZO:
222 223 224 225 226 227
		amdgpu_device_program_register_sequence(adev,
							cz_mgcg_cgcg_init,
							ARRAY_SIZE(cz_mgcg_cgcg_init));
		amdgpu_device_program_register_sequence(adev,
							cz_golden_settings_a11,
							ARRAY_SIZE(cz_golden_settings_a11));
228
		break;
229
	case CHIP_STONEY:
230 231 232 233 234 235
		amdgpu_device_program_register_sequence(adev,
							stoney_mgcg_cgcg_init,
							ARRAY_SIZE(stoney_mgcg_cgcg_init));
		amdgpu_device_program_register_sequence(adev,
							stoney_golden_settings_a11,
							ARRAY_SIZE(stoney_golden_settings_a11));
236
		break;
237 238 239 240 241
	default:
		break;
	}
}

M
Monk Liu 已提交
242 243 244 245 246 247 248 249 250
static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
{
	int i;
	for (i = 0; i < adev->sdma.num_instances; i++) {
		release_firmware(adev->sdma.instance[i].fw);
		adev->sdma.instance[i].fw = NULL;
	}
}

251 252 253 254 255 256 257 258 259 260 261 262 263
/**
 * sdma_v3_0_init_microcode - load ucode images from disk
 *
 * @adev: amdgpu_device pointer
 *
 * Use the firmware interface to load the ucode images into
 * the driver (not loaded into hw).
 * Returns 0 on success, error on failure.
 */
static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
{
	const char *chip_name;
	char fw_name[30];
A
Alex Deucher 已提交
264
	int err = 0, i;
265 266
	struct amdgpu_firmware_info *info = NULL;
	const struct common_firmware_header *header = NULL;
267
	const struct sdma_firmware_header_v1_0 *hdr;
268 269 270 271 272 273 274

	DRM_DEBUG("\n");

	switch (adev->asic_type) {
	case CHIP_TONGA:
		chip_name = "tonga";
		break;
275 276 277
	case CHIP_FIJI:
		chip_name = "fiji";
		break;
278 279
	case CHIP_POLARIS11:
		chip_name = "polaris11";
280
		break;
281 282
	case CHIP_POLARIS10:
		chip_name = "polaris10";
283
		break;
284 285 286
	case CHIP_POLARIS12:
		chip_name = "polaris12";
		break;
287 288 289
	case CHIP_CARRIZO:
		chip_name = "carrizo";
		break;
290 291 292
	case CHIP_STONEY:
		chip_name = "stoney";
		break;
293 294 295
	default: BUG();
	}

A
Alex Deucher 已提交
296
	for (i = 0; i < adev->sdma.num_instances; i++) {
297
		if (i == 0)
298
			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
299
		else
300
			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
A
Alex Deucher 已提交
301
		err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
302 303
		if (err)
			goto out;
A
Alex Deucher 已提交
304
		err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
305 306
		if (err)
			goto out;
A
Alex Deucher 已提交
307 308 309 310 311
		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
		adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
		adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
		if (adev->sdma.instance[i].feature_version >= 20)
			adev->sdma.instance[i].burst_nop = true;
312

313
		if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
314 315
			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
			info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
A
Alex Deucher 已提交
316
			info->fw = adev->sdma.instance[i].fw;
317 318 319 320 321 322 323
			header = (const struct common_firmware_header *)info->fw->data;
			adev->firmware.fw_size +=
				ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
		}
	}
out:
	if (err) {
324
		pr_err("sdma_v3_0: Failed to load firmware \"%s\"\n", fw_name);
A
Alex Deucher 已提交
325 326 327
		for (i = 0; i < adev->sdma.num_instances; i++) {
			release_firmware(adev->sdma.instance[i].fw);
			adev->sdma.instance[i].fw = NULL;
328 329 330 331 332 333 334 335 336 337 338 339
		}
	}
	return err;
}

/**
 * sdma_v3_0_ring_get_rptr - get the current read pointer
 *
 * @ring: amdgpu ring pointer
 *
 * Get the current rptr from the hardware (VI+).
 */
340
static uint64_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
341 342
{
	/* XXX check if swapping is necessary on BE */
343
	return ring->adev->wb.wb[ring->rptr_offs] >> 2;
344 345 346 347 348 349 350 351 352
}

/**
 * sdma_v3_0_ring_get_wptr - get the current write pointer
 *
 * @ring: amdgpu ring pointer
 *
 * Get the current wptr from the hardware (VI+).
 */
353
static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
354 355 356 357
{
	struct amdgpu_device *adev = ring->adev;
	u32 wptr;

358
	if (ring->use_doorbell || ring->use_pollmem) {
359 360 361
		/* XXX check if swapping is necessary on BE */
		wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
	} else {
A
Alex Deucher 已提交
362
		int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381

		wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
	}

	return wptr;
}

/**
 * sdma_v3_0_ring_set_wptr - commit the write pointer
 *
 * @ring: amdgpu ring pointer
 *
 * Write the wptr back to the hardware (VI+).
 */
static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
{
	struct amdgpu_device *adev = ring->adev;

	if (ring->use_doorbell) {
382
		u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs];
383
		/* XXX check if swapping is necessary on BE */
384
		WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
385
		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr) << 2);
386 387 388 389
	} else if (ring->use_pollmem) {
		u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs];

		WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
390
	} else {
A
Alex Deucher 已提交
391
		int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
392

393
		WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2);
394 395 396
	}
}

397 398
static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
A
Alex Deucher 已提交
399
	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
400 401 402 403
	int i;

	for (i = 0; i < count; i++)
		if (sdma && sdma->burst_nop && (i == 0))
404
			amdgpu_ring_write(ring, ring->funcs->nop |
405 406
				SDMA_PKT_NOP_HEADER_COUNT(count - 1));
		else
407
			amdgpu_ring_write(ring, ring->funcs->nop);
408 409
}

410 411 412 413 414 415 416 417 418
/**
 * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine
 *
 * @ring: amdgpu ring pointer
 * @ib: IB object to schedule
 *
 * Schedule an IB in the DMA ring (VI).
 */
static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
419
				   struct amdgpu_ib *ib,
420
				   unsigned vmid, bool ctx_switch)
421 422
{
	/* IB packet must end on a 8 DW boundary */
423
	sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
424 425

	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
426
			  SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
427 428 429 430 431 432 433 434 435 436
	/* base must be 32 byte aligned */
	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
	amdgpu_ring_write(ring, ib->length_dw);
	amdgpu_ring_write(ring, 0);
	amdgpu_ring_write(ring, 0);

}

/**
437
 * sdma_v3_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
438 439 440 441 442
 *
 * @ring: amdgpu ring pointer
 *
 * Emit an hdp flush packet on the requested DMA ring.
 */
443
static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
444 445 446
{
	u32 ref_and_mask = 0;

A
Alex Deucher 已提交
447
	if (ring == &ring->adev->sdma.instance[0].ring)
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
		ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
	else
		ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);

	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
	amdgpu_ring_write(ring, ref_and_mask); /* reference */
	amdgpu_ring_write(ring, ref_and_mask); /* mask */
	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}

/**
 * sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring
 *
 * @ring: amdgpu ring pointer
 * @fence: amdgpu fence object
 *
 * Add a DMA fence packet to the ring to write
 * the fence seq number and DMA trap packet to generate
 * an interrupt if needed (VI).
 */
static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
474
				      unsigned flags)
475
{
476
	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
477 478 479 480 481 482 483
	/* write the fence */
	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
	amdgpu_ring_write(ring, lower_32_bits(addr));
	amdgpu_ring_write(ring, upper_32_bits(addr));
	amdgpu_ring_write(ring, lower_32_bits(seq));

	/* optionally write high bits as well */
484
	if (write64bit) {
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
		addr += 4;
		amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
		amdgpu_ring_write(ring, lower_32_bits(addr));
		amdgpu_ring_write(ring, upper_32_bits(addr));
		amdgpu_ring_write(ring, upper_32_bits(seq));
	}

	/* generate an interrupt */
	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
	amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}

/**
 * sdma_v3_0_gfx_stop - stop the gfx async dma engines
 *
 * @adev: amdgpu_device pointer
 *
 * Stop the gfx async dma ring buffers (VI).
 */
static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
{
A
Alex Deucher 已提交
506 507
	struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
	struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
508 509 510 511 512
	u32 rb_cntl, ib_cntl;
	int i;

	if ((adev->mman.buffer_funcs_ring == sdma0) ||
	    (adev->mman.buffer_funcs_ring == sdma1))
513
		amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
514

A
Alex Deucher 已提交
515
	for (i = 0; i < adev->sdma.num_instances; i++) {
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
		rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
		ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
	}
	sdma0->ready = false;
	sdma1->ready = false;
}

/**
 * sdma_v3_0_rlc_stop - stop the compute async dma engines
 *
 * @adev: amdgpu_device pointer
 *
 * Stop the compute async dma queues (VI).
 */
static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev)
{
	/* XXX todo */
}

539 540 541 542 543 544 545 546 547 548
/**
 * sdma_v3_0_ctx_switch_enable - stop the async dma engines context switch
 *
 * @adev: amdgpu_device pointer
 * @enable: enable/disable the DMA MEs context switch.
 *
 * Halt or unhalt the async dma engines context switch (VI).
 */
static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
{
549
	u32 f32_cntl, phase_quantum = 0;
550 551
	int i;

552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
	if (amdgpu_sdma_phase_quantum) {
		unsigned value = amdgpu_sdma_phase_quantum;
		unsigned unit = 0;

		while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
				SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
			value = (value + 1) >> 1;
			unit++;
		}
		if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
			    SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
			value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
				 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
			unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
				SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
			WARN_ONCE(1,
			"clamping sdma_phase_quantum to %uK clock cycles\n",
				  value << unit);
		}
		phase_quantum =
			value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
			unit  << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
	}

A
Alex Deucher 已提交
576
	for (i = 0; i < adev->sdma.num_instances; i++) {
577
		f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
578
		if (enable) {
579 580
			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
					AUTO_CTXSW_ENABLE, 1);
581 582
			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
					ATC_L1_ENABLE, 1);
583 584 585 586 587 588
			if (amdgpu_sdma_phase_quantum) {
				WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i],
				       phase_quantum);
				WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i],
				       phase_quantum);
			}
589
		} else {
590 591
			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
					AUTO_CTXSW_ENABLE, 0);
592 593 594 595
			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
					ATC_L1_ENABLE, 1);
		}

596 597 598 599
		WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl);
	}
}

600 601 602 603 604 605 606 607 608 609 610 611 612
/**
 * sdma_v3_0_enable - stop the async dma engines
 *
 * @adev: amdgpu_device pointer
 * @enable: enable/disable the DMA MEs.
 *
 * Halt or unhalt the async dma engines (VI).
 */
static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
{
	u32 f32_cntl;
	int i;

613
	if (!enable) {
614 615 616 617
		sdma_v3_0_gfx_stop(adev);
		sdma_v3_0_rlc_stop(adev);
	}

A
Alex Deucher 已提交
618
	for (i = 0; i < adev->sdma.num_instances; i++) {
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
		f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
		if (enable)
			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
		else
			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
		WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl);
	}
}

/**
 * sdma_v3_0_gfx_resume - setup and start the async dma engines
 *
 * @adev: amdgpu_device pointer
 *
 * Set up the gfx DMA ring buffers and enable them (VI).
 * Returns 0 for success, error for failure.
 */
static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
{
	struct amdgpu_ring *ring;
639
	u32 rb_cntl, ib_cntl, wptr_poll_cntl;
640 641 642
	u32 rb_bufsz;
	u32 wb_offset;
	u32 doorbell;
643
	u64 wptr_gpu_addr;
644 645
	int i, j, r;

A
Alex Deucher 已提交
646 647
	for (i = 0; i < adev->sdma.num_instances; i++) {
		ring = &adev->sdma.instance[i].ring;
M
Monk Liu 已提交
648
		amdgpu_ring_clear_ring(ring);
649 650 651 652 653 654 655 656 657 658 659 660
		wb_offset = (ring->rptr_offs * 4);

		mutex_lock(&adev->srbm_mutex);
		for (j = 0; j < 16; j++) {
			vi_srbm_select(adev, 0, 0, 0, j);
			/* SDMA GFX */
			WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
			WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
		}
		vi_srbm_select(adev, 0, 0, 0, 0);
		mutex_unlock(&adev->srbm_mutex);

661 662 663
		WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
		       adev->gfx.config.gb_addr_config & 0x70);

664 665 666 667 668 669 670 671 672 673 674 675 676 677
		WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);

		/* Set ring buffer size in dwords */
		rb_bufsz = order_base_2(ring->ring_size / 4);
		rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
#ifdef __BIG_ENDIAN
		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
					RPTR_WRITEBACK_SWAP_ENABLE, 1);
#endif
		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);

		/* Initialize the ring buffer's read and write pointers */
678
		ring->wptr = 0;
679
		WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
680
		sdma_v3_0_ring_set_wptr(ring);
681 682
		WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
		WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705

		/* set the wb address whether it's enabled or not */
		WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
		       upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
		WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
		       lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);

		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);

		WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
		WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);

		doorbell = RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]);

		if (ring->use_doorbell) {
			doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL,
						 OFFSET, ring->doorbell_index);
			doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
		} else {
			doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
		}
		WREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i], doorbell);

706 707 708 709 710 711 712 713
		/* setup the wptr shadow polling */
		wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);

		WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO + sdma_offsets[i],
		       lower_32_bits(wptr_gpu_addr));
		WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI + sdma_offsets[i],
		       upper_32_bits(wptr_gpu_addr));
		wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]);
714 715 716 717
		if (ring->use_pollmem)
			wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
						       SDMA0_GFX_RB_WPTR_POLL_CNTL,
						       ENABLE, 1);
718
		else
719 720 721
			wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
						       SDMA0_GFX_RB_WPTR_POLL_CNTL,
						       ENABLE, 0);
722 723
		WREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i], wptr_poll_cntl);

724 725 726 727 728 729 730 731 732 733 734 735 736
		/* enable DMA RB */
		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
		WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);

		ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
#ifdef __BIG_ENDIAN
		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
#endif
		/* enable DMA IBs */
		WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);

		ring->ready = true;
737
	}
738

739 740 741 742 743 744 745
	/* unhalt the MEs */
	sdma_v3_0_enable(adev, true);
	/* enable sdma ring preemption */
	sdma_v3_0_ctx_switch_enable(adev, true);

	for (i = 0; i < adev->sdma.num_instances; i++) {
		ring = &adev->sdma.instance[i].ring;
746 747 748 749 750 751 752
		r = amdgpu_ring_test_ring(ring);
		if (r) {
			ring->ready = false;
			return r;
		}

		if (adev->mman.buffer_funcs_ring == ring)
753
			amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
	}

	return 0;
}

/**
 * sdma_v3_0_rlc_resume - setup and start the async dma engines
 *
 * @adev: amdgpu_device pointer
 *
 * Set up the compute DMA queues and enable them (VI).
 * Returns 0 for success, error for failure.
 */
static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev)
{
	/* XXX todo */
	return 0;
}

/**
 * sdma_v3_0_load_microcode - load the sDMA ME ucode
 *
 * @adev: amdgpu_device pointer
 *
 * Loads the sDMA0/1 ucode.
 * Returns 0 for success, -EINVAL if the ucode is not available.
 */
static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
{
	const struct sdma_firmware_header_v1_0 *hdr;
	const __le32 *fw_data;
	u32 fw_size;
	int i, j;

	/* halt the MEs */
	sdma_v3_0_enable(adev, false);

A
Alex Deucher 已提交
791 792 793 794
	for (i = 0; i < adev->sdma.num_instances; i++) {
		if (!adev->sdma.instance[i].fw)
			return -EINVAL;
		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
795 796 797
		amdgpu_ucode_print_sdma_hdr(&hdr->header);
		fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
		fw_data = (const __le32 *)
A
Alex Deucher 已提交
798
			(adev->sdma.instance[i].fw->data +
799 800 801 802
				le32_to_cpu(hdr->header.ucode_array_offset_bytes));
		WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
		for (j = 0; j < fw_size; j++)
			WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
A
Alex Deucher 已提交
803
		WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
	}

	return 0;
}

/**
 * sdma_v3_0_start - setup and start the async dma engines
 *
 * @adev: amdgpu_device pointer
 *
 * Set up the DMA engines and enable them (VI).
 * Returns 0 for success, error for failure.
 */
static int sdma_v3_0_start(struct amdgpu_device *adev)
{
819
	int r;
820

821 822 823 824
	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
		r = sdma_v3_0_load_microcode(adev);
		if (r)
			return r;
825 826
	}

827
	/* disable sdma engine before programing it */
828 829
	sdma_v3_0_ctx_switch_enable(adev, false);
	sdma_v3_0_enable(adev, false);
830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859

	/* start the gfx rings and rlc compute queues */
	r = sdma_v3_0_gfx_resume(adev);
	if (r)
		return r;
	r = sdma_v3_0_rlc_resume(adev);
	if (r)
		return r;

	return 0;
}

/**
 * sdma_v3_0_ring_test_ring - simple async dma engine test
 *
 * @ring: amdgpu_ring structure holding ring information
 *
 * Test the DMA engine by writing using it to write an
 * value to memory. (VI).
 * Returns 0 for success, error for failure.
 */
static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
{
	struct amdgpu_device *adev = ring->adev;
	unsigned i;
	unsigned index;
	int r;
	u32 tmp;
	u64 gpu_addr;

860
	r = amdgpu_device_wb_get(adev, &index);
861 862 863 864 865 866 867 868 869
	if (r) {
		dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
		return r;
	}

	gpu_addr = adev->wb.gpu_addr + (index * 4);
	tmp = 0xCAFEDEAD;
	adev->wb.wb[index] = cpu_to_le32(tmp);

870
	r = amdgpu_ring_alloc(ring, 5);
871 872
	if (r) {
		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
873
		amdgpu_device_wb_free(adev, index);
874 875 876 877 878 879 880 881 882
		return r;
	}

	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
	amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
	amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
	amdgpu_ring_write(ring, 0xDEADBEEF);
883
	amdgpu_ring_commit(ring);
884 885 886 887 888 889 890 891 892

	for (i = 0; i < adev->usec_timeout; i++) {
		tmp = le32_to_cpu(adev->wb.wb[index]);
		if (tmp == 0xDEADBEEF)
			break;
		DRM_UDELAY(1);
	}

	if (i < adev->usec_timeout) {
893
		DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
894 895 896 897 898
	} else {
		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
			  ring->idx, tmp);
		r = -EINVAL;
	}
899
	amdgpu_device_wb_free(adev, index);
900 901 902 903 904 905 906 907 908 909 910 911

	return r;
}

/**
 * sdma_v3_0_ring_test_ib - test an IB on the DMA engine
 *
 * @ring: amdgpu_ring structure holding ring information
 *
 * Test a simple IB in the DMA ring (VI).
 * Returns 0 on success, error on failure.
 */
912
static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
913 914 915
{
	struct amdgpu_device *adev = ring->adev;
	struct amdgpu_ib ib;
916
	struct dma_fence *f = NULL;
917 918 919
	unsigned index;
	u32 tmp = 0;
	u64 gpu_addr;
920
	long r;
921

922
	r = amdgpu_device_wb_get(adev, &index);
923
	if (r) {
924
		dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
925 926 927 928 929 930
		return r;
	}

	gpu_addr = adev->wb.gpu_addr + (index * 4);
	tmp = 0xCAFEDEAD;
	adev->wb.wb[index] = cpu_to_le32(tmp);
931
	memset(&ib, 0, sizeof(ib));
932
	r = amdgpu_ib_get(adev, NULL, 256, &ib);
933
	if (r) {
934
		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
935
		goto err0;
936 937 938 939 940 941 942 943 944 945 946 947 948
	}

	ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
	ib.ptr[1] = lower_32_bits(gpu_addr);
	ib.ptr[2] = upper_32_bits(gpu_addr);
	ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
	ib.ptr[4] = 0xDEADBEEF;
	ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
	ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
	ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
	ib.length_dw = 8;

949
	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
950 951 952
	if (r)
		goto err1;

953
	r = dma_fence_wait_timeout(f, false, timeout);
954 955 956 957 958 959
	if (r == 0) {
		DRM_ERROR("amdgpu: IB test timed out\n");
		r = -ETIMEDOUT;
		goto err1;
	} else if (r < 0) {
		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
960
		goto err1;
961
	}
962 963
	tmp = le32_to_cpu(adev->wb.wb[index]);
	if (tmp == 0xDEADBEEF) {
964
		DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
965
		r = 0;
966 967 968 969
	} else {
		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
		r = -EINVAL;
	}
970
err1:
971
	amdgpu_ib_free(adev, &ib, NULL);
972
	dma_fence_put(f);
973
err0:
974
	amdgpu_device_wb_free(adev, index);
975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
	return r;
}

/**
 * sdma_v3_0_vm_copy_pte - update PTEs by copying them from the GART
 *
 * @ib: indirect buffer to fill with commands
 * @pe: addr of the page entry
 * @src: src addr to copy from
 * @count: number of page entries to update
 *
 * Update PTEs by copying them from the GART using sDMA (CIK).
 */
static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib,
				  uint64_t pe, uint64_t src,
				  unsigned count)
{
992 993 994 995 996 997 998 999 1000 1001
	unsigned bytes = count * 8;

	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
	ib->ptr[ib->length_dw++] = bytes;
	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
	ib->ptr[ib->length_dw++] = lower_32_bits(src);
	ib->ptr[ib->length_dw++] = upper_32_bits(src);
	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1002 1003 1004 1005 1006 1007 1008
}

/**
 * sdma_v3_0_vm_write_pte - update PTEs by writing them manually
 *
 * @ib: indirect buffer to fill with commands
 * @pe: addr of the page entry
1009
 * @value: dst addr to write into pe
1010 1011 1012 1013 1014
 * @count: number of page entries to update
 * @incr: increase next addr by incr bytes
 *
 * Update PTEs by writing them manually using sDMA (CIK).
 */
1015 1016 1017
static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
				   uint64_t value, unsigned count,
				   uint32_t incr)
1018
{
1019 1020 1021
	unsigned ndw = count * 2;

	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1022
		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1023 1024 1025
	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
	ib->ptr[ib->length_dw++] = ndw;
1026
	for (; ndw > 0; ndw -= 2) {
1027 1028 1029
		ib->ptr[ib->length_dw++] = lower_32_bits(value);
		ib->ptr[ib->length_dw++] = upper_32_bits(value);
		value += incr;
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
	}
}

/**
 * sdma_v3_0_vm_set_pte_pde - update the page tables using sDMA
 *
 * @ib: indirect buffer to fill with commands
 * @pe: addr of the page entry
 * @addr: dst addr to write into pe
 * @count: number of page entries to update
 * @incr: increase next addr by incr bytes
 * @flags: access flags
 *
 * Update the page tables using sDMA (CIK).
 */
1045
static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
1046
				     uint64_t addr, unsigned count,
1047
				     uint32_t incr, uint64_t flags)
1048
{
1049 1050 1051 1052
	/* for physically contiguous pages (vram) */
	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
	ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1053 1054
	ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
	ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1055 1056 1057 1058 1059
	ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
	ib->ptr[ib->length_dw++] = incr; /* increment size */
	ib->ptr[ib->length_dw++] = 0;
	ib->ptr[ib->length_dw++] = count; /* number of entries */
1060 1061 1062
}

/**
1063
 * sdma_v3_0_ring_pad_ib - pad the IB to the required number of dw
1064 1065 1066 1067
 *
 * @ib: indirect buffer to fill with padding
 *
 */
1068
static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1069
{
1070
	struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
	u32 pad_count;
	int i;

	pad_count = (8 - (ib->length_dw & 0x7)) % 8;
	for (i = 0; i < pad_count; i++)
		if (sdma && sdma->burst_nop && (i == 0))
			ib->ptr[ib->length_dw++] =
				SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
				SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
		else
			ib->ptr[ib->length_dw++] =
				SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1083 1084 1085
}

/**
1086
 * sdma_v3_0_ring_emit_pipeline_sync - sync the pipeline
1087 1088 1089
 *
 * @ring: amdgpu_ring pointer
 *
1090
 * Make sure all previous operations are completed (CIK).
1091
 */
1092
static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1093
{
1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
	uint32_t seq = ring->fence_drv.sync_seq;
	uint64_t addr = ring->fence_drv.gpu_addr;

	/* wait for idle */
	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
			  SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
	amdgpu_ring_write(ring, addr & 0xfffffffc);
	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
	amdgpu_ring_write(ring, seq); /* reference */
	amdgpu_ring_write(ring, 0xfffffff); /* mask */
	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1108
}
1109

1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
/**
 * sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA
 *
 * @ring: amdgpu_ring pointer
 * @vm: amdgpu_vm pointer
 *
 * Update the page table base and flush the VM TLB
 * using sDMA (VI).
 */
static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1120 1121
					 unsigned vmid, unsigned pasid,
					 uint64_t pd_addr)
1122
{
1123
	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pasid, pd_addr);
1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136

	/* wait for flush */
	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */
	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
	amdgpu_ring_write(ring, 0);
	amdgpu_ring_write(ring, 0); /* reference */
	amdgpu_ring_write(ring, 0); /* mask */
	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}

1137 1138 1139 1140 1141 1142 1143 1144 1145
static void sdma_v3_0_ring_emit_wreg(struct amdgpu_ring *ring,
				     uint32_t reg, uint32_t val)
{
	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
			  SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
	amdgpu_ring_write(ring, reg);
	amdgpu_ring_write(ring, val);
}

1146
static int sdma_v3_0_early_init(void *handle)
1147
{
1148 1149
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

A
Alex Deucher 已提交
1150
	switch (adev->asic_type) {
1151 1152 1153
	case CHIP_STONEY:
		adev->sdma.num_instances = 1;
		break;
A
Alex Deucher 已提交
1154 1155 1156 1157 1158
	default:
		adev->sdma.num_instances = SDMA_MAX_INSTANCE;
		break;
	}

1159 1160 1161 1162 1163 1164 1165 1166
	sdma_v3_0_set_ring_funcs(adev);
	sdma_v3_0_set_buffer_funcs(adev);
	sdma_v3_0_set_vm_pte_funcs(adev);
	sdma_v3_0_set_irq_funcs(adev);

	return 0;
}

1167
static int sdma_v3_0_sw_init(void *handle)
1168 1169
{
	struct amdgpu_ring *ring;
A
Alex Deucher 已提交
1170
	int r, i;
1171
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1172 1173

	/* SDMA trap event */
1174 1175
	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
			      &adev->sdma.trap_irq);
1176 1177 1178 1179
	if (r)
		return r;

	/* SDMA Privileged inst */
1180 1181
	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
			      &adev->sdma.illegal_inst_irq);
1182 1183 1184 1185
	if (r)
		return r;

	/* SDMA Privileged inst */
1186 1187
	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
			      &adev->sdma.illegal_inst_irq);
1188 1189 1190 1191 1192 1193 1194 1195 1196
	if (r)
		return r;

	r = sdma_v3_0_init_microcode(adev);
	if (r) {
		DRM_ERROR("Failed to load sdma firmware!\n");
		return r;
	}

A
Alex Deucher 已提交
1197 1198 1199
	for (i = 0; i < adev->sdma.num_instances; i++) {
		ring = &adev->sdma.instance[i].ring;
		ring->ring_obj = NULL;
1200 1201 1202 1203 1204 1205 1206
		if (!amdgpu_sriov_vf(adev)) {
			ring->use_doorbell = true;
			ring->doorbell_index = (i == 0) ?
				AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
		} else {
			ring->use_pollmem = true;
		}
A
Alex Deucher 已提交
1207 1208

		sprintf(ring->name, "sdma%d", i);
1209
		r = amdgpu_ring_init(adev, ring, 1024,
A
Alex Deucher 已提交
1210 1211
				     &adev->sdma.trap_irq,
				     (i == 0) ?
1212 1213
				     AMDGPU_SDMA_IRQ_TRAP0 :
				     AMDGPU_SDMA_IRQ_TRAP1);
A
Alex Deucher 已提交
1214 1215 1216
		if (r)
			return r;
	}
1217 1218 1219 1220

	return r;
}

1221
static int sdma_v3_0_sw_fini(void *handle)
1222
{
1223
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
A
Alex Deucher 已提交
1224
	int i;
1225

A
Alex Deucher 已提交
1226 1227
	for (i = 0; i < adev->sdma.num_instances; i++)
		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1228

M
Monk Liu 已提交
1229
	sdma_v3_0_free_microcode(adev);
1230 1231 1232
	return 0;
}

1233
static int sdma_v3_0_hw_init(void *handle)
1234 1235
{
	int r;
1236
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1237 1238 1239 1240 1241 1242 1243 1244 1245 1246

	sdma_v3_0_init_golden_registers(adev);

	r = sdma_v3_0_start(adev);
	if (r)
		return r;

	return r;
}

1247
static int sdma_v3_0_hw_fini(void *handle)
1248
{
1249 1250
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

1251
	sdma_v3_0_ctx_switch_enable(adev, false);
1252 1253 1254 1255 1256
	sdma_v3_0_enable(adev, false);

	return 0;
}

1257
static int sdma_v3_0_suspend(void *handle)
1258
{
1259
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1260 1261 1262 1263

	return sdma_v3_0_hw_fini(adev);
}

1264
static int sdma_v3_0_resume(void *handle)
1265
{
1266
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1267 1268 1269 1270

	return sdma_v3_0_hw_init(adev);
}

1271
static bool sdma_v3_0_is_idle(void *handle)
1272
{
1273
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1274 1275 1276 1277 1278 1279 1280 1281 1282
	u32 tmp = RREG32(mmSRBM_STATUS2);

	if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
		   SRBM_STATUS2__SDMA1_BUSY_MASK))
	    return false;

	return true;
}

1283
static int sdma_v3_0_wait_for_idle(void *handle)
1284 1285 1286
{
	unsigned i;
	u32 tmp;
1287
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299

	for (i = 0; i < adev->usec_timeout; i++) {
		tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
				SRBM_STATUS2__SDMA1_BUSY_MASK);

		if (!tmp)
			return 0;
		udelay(1);
	}
	return -ETIMEDOUT;
}

1300
static bool sdma_v3_0_check_soft_reset(void *handle)
1301
{
1302
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1303
	u32 srbm_soft_reset = 0;
1304 1305
	u32 tmp = RREG32(mmSRBM_STATUS2);

1306 1307
	if ((tmp & SRBM_STATUS2__SDMA_BUSY_MASK) ||
	    (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)) {
1308 1309 1310 1311
		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
	}

1312 1313
	if (srbm_soft_reset) {
		adev->sdma.srbm_soft_reset = srbm_soft_reset;
1314
		return true;
1315 1316
	} else {
		adev->sdma.srbm_soft_reset = 0;
1317
		return false;
1318 1319 1320 1321 1322 1323 1324 1325
	}
}

static int sdma_v3_0_pre_soft_reset(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
	u32 srbm_soft_reset = 0;

1326
	if (!adev->sdma.srbm_soft_reset)
1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344
		return 0;

	srbm_soft_reset = adev->sdma.srbm_soft_reset;

	if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
	    REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
		sdma_v3_0_ctx_switch_enable(adev, false);
		sdma_v3_0_enable(adev, false);
	}

	return 0;
}

static int sdma_v3_0_post_soft_reset(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
	u32 srbm_soft_reset = 0;

1345
	if (!adev->sdma.srbm_soft_reset)
1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
		return 0;

	srbm_soft_reset = adev->sdma.srbm_soft_reset;

	if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
	    REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
		sdma_v3_0_gfx_resume(adev);
		sdma_v3_0_rlc_resume(adev);
	}

	return 0;
}

static int sdma_v3_0_soft_reset(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
	u32 srbm_soft_reset = 0;
	u32 tmp;

1365
	if (!adev->sdma.srbm_soft_reset)
1366 1367 1368 1369
		return 0;

	srbm_soft_reset = adev->sdma.srbm_soft_reset;

1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448
	if (srbm_soft_reset) {
		tmp = RREG32(mmSRBM_SOFT_RESET);
		tmp |= srbm_soft_reset;
		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
		WREG32(mmSRBM_SOFT_RESET, tmp);
		tmp = RREG32(mmSRBM_SOFT_RESET);

		udelay(50);

		tmp &= ~srbm_soft_reset;
		WREG32(mmSRBM_SOFT_RESET, tmp);
		tmp = RREG32(mmSRBM_SOFT_RESET);

		/* Wait a little for things to settle down */
		udelay(50);
	}

	return 0;
}

static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev,
					struct amdgpu_irq_src *source,
					unsigned type,
					enum amdgpu_interrupt_state state)
{
	u32 sdma_cntl;

	switch (type) {
	case AMDGPU_SDMA_IRQ_TRAP0:
		switch (state) {
		case AMDGPU_IRQ_STATE_DISABLE:
			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
			sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
			WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
			break;
		case AMDGPU_IRQ_STATE_ENABLE:
			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
			sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
			WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
			break;
		default:
			break;
		}
		break;
	case AMDGPU_SDMA_IRQ_TRAP1:
		switch (state) {
		case AMDGPU_IRQ_STATE_DISABLE:
			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
			sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
			WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
			break;
		case AMDGPU_IRQ_STATE_ENABLE:
			sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
			sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
			WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
			break;
		default:
			break;
		}
		break;
	default:
		break;
	}
	return 0;
}

static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
				      struct amdgpu_irq_src *source,
				      struct amdgpu_iv_entry *entry)
{
	u8 instance_id, queue_id;

	instance_id = (entry->ring_id & 0x3) >> 0;
	queue_id = (entry->ring_id & 0xc) >> 2;
	DRM_DEBUG("IH: SDMA trap\n");
	switch (instance_id) {
	case 0:
		switch (queue_id) {
		case 0:
A
Alex Deucher 已提交
1449
			amdgpu_fence_process(&adev->sdma.instance[0].ring);
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461
			break;
		case 1:
			/* XXX compute */
			break;
		case 2:
			/* XXX compute */
			break;
		}
		break;
	case 1:
		switch (queue_id) {
		case 0:
A
Alex Deucher 已提交
1462
			amdgpu_fence_process(&adev->sdma.instance[1].ring);
1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484
			break;
		case 1:
			/* XXX compute */
			break;
		case 2:
			/* XXX compute */
			break;
		}
		break;
	}
	return 0;
}

static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev,
					      struct amdgpu_irq_src *source,
					      struct amdgpu_iv_entry *entry)
{
	DRM_ERROR("Illegal instruction in SDMA command stream\n");
	schedule_work(&adev->reset_work);
	return 0;
}

1485
static void sdma_v3_0_update_sdma_medium_grain_clock_gating(
1486 1487 1488 1489
		struct amdgpu_device *adev,
		bool enable)
{
	uint32_t temp, data;
1490
	int i;
1491

1492
	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505
		for (i = 0; i < adev->sdma.num_instances; i++) {
			temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]);
			data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
				  SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
				  SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
				  SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
				  SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
				  SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
				  SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
				  SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
			if (data != temp)
				WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data);
		}
1506
	} else {
1507 1508 1509
		for (i = 0; i < adev->sdma.num_instances; i++) {
			temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]);
			data |= SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1510 1511 1512 1513 1514 1515 1516 1517
				SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
				SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
				SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
				SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
				SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
				SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
				SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK;

1518 1519 1520
			if (data != temp)
				WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data);
		}
1521 1522 1523
	}
}

1524
static void sdma_v3_0_update_sdma_medium_grain_light_sleep(
1525 1526 1527 1528
		struct amdgpu_device *adev,
		bool enable)
{
	uint32_t temp, data;
1529
	int i;
1530

1531
	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
1532 1533 1534
		for (i = 0; i < adev->sdma.num_instances; i++) {
			temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]);
			data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1535

1536 1537 1538
			if (temp != data)
				WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data);
		}
1539
	} else {
1540 1541 1542
		for (i = 0; i < adev->sdma.num_instances; i++) {
			temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]);
			data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1543

1544 1545 1546
			if (temp != data)
				WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data);
		}
1547 1548 1549
	}
}

1550 1551
static int sdma_v3_0_set_clockgating_state(void *handle,
					  enum amd_clockgating_state state)
1552
{
1553 1554
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;

1555 1556 1557
	if (amdgpu_sriov_vf(adev))
		return 0;

1558 1559
	switch (adev->asic_type) {
	case CHIP_FIJI:
1560 1561 1562
	case CHIP_CARRIZO:
	case CHIP_STONEY:
		sdma_v3_0_update_sdma_medium_grain_clock_gating(adev,
1563
				state == AMD_CG_STATE_GATE);
1564
		sdma_v3_0_update_sdma_medium_grain_light_sleep(adev,
1565
				state == AMD_CG_STATE_GATE);
1566 1567 1568 1569
		break;
	default:
		break;
	}
1570 1571 1572
	return 0;
}

1573 1574
static int sdma_v3_0_set_powergating_state(void *handle,
					  enum amd_powergating_state state)
1575 1576 1577 1578
{
	return 0;
}

1579 1580 1581 1582 1583
static void sdma_v3_0_get_clockgating_state(void *handle, u32 *flags)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
	int data;

1584 1585 1586
	if (amdgpu_sriov_vf(adev))
		*flags = 0;

1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597
	/* AMD_CG_SUPPORT_SDMA_MGCG */
	data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[0]);
	if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK))
		*flags |= AMD_CG_SUPPORT_SDMA_MGCG;

	/* AMD_CG_SUPPORT_SDMA_LS */
	data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[0]);
	if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
		*flags |= AMD_CG_SUPPORT_SDMA_LS;
}

1598
static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
1599
	.name = "sdma_v3_0",
1600 1601 1602 1603 1604 1605 1606 1607 1608 1609
	.early_init = sdma_v3_0_early_init,
	.late_init = NULL,
	.sw_init = sdma_v3_0_sw_init,
	.sw_fini = sdma_v3_0_sw_fini,
	.hw_init = sdma_v3_0_hw_init,
	.hw_fini = sdma_v3_0_hw_fini,
	.suspend = sdma_v3_0_suspend,
	.resume = sdma_v3_0_resume,
	.is_idle = sdma_v3_0_is_idle,
	.wait_for_idle = sdma_v3_0_wait_for_idle,
1610 1611 1612
	.check_soft_reset = sdma_v3_0_check_soft_reset,
	.pre_soft_reset = sdma_v3_0_pre_soft_reset,
	.post_soft_reset = sdma_v3_0_post_soft_reset,
1613 1614 1615
	.soft_reset = sdma_v3_0_soft_reset,
	.set_clockgating_state = sdma_v3_0_set_clockgating_state,
	.set_powergating_state = sdma_v3_0_set_powergating_state,
1616
	.get_clockgating_state = sdma_v3_0_get_clockgating_state,
1617 1618 1619
};

static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
1620
	.type = AMDGPU_RING_TYPE_SDMA,
1621 1622
	.align_mask = 0xf,
	.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1623
	.support_64bit_ptrs = false,
1624 1625 1626
	.get_rptr = sdma_v3_0_ring_get_rptr,
	.get_wptr = sdma_v3_0_ring_get_wptr,
	.set_wptr = sdma_v3_0_ring_set_wptr,
1627 1628
	.emit_frame_size =
		6 + /* sdma_v3_0_ring_emit_hdp_flush */
1629
		3 + /* hdp invalidate */
1630 1631 1632 1633
		6 + /* sdma_v3_0_ring_emit_pipeline_sync */
		12 + /* sdma_v3_0_ring_emit_vm_flush */
		10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
	.emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */
1634 1635
	.emit_ib = sdma_v3_0_ring_emit_ib,
	.emit_fence = sdma_v3_0_ring_emit_fence,
1636
	.emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
1637
	.emit_vm_flush = sdma_v3_0_ring_emit_vm_flush,
1638
	.emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush,
1639 1640
	.test_ring = sdma_v3_0_ring_test_ring,
	.test_ib = sdma_v3_0_ring_test_ib,
1641
	.insert_nop = sdma_v3_0_ring_insert_nop,
1642
	.pad_ib = sdma_v3_0_ring_pad_ib,
1643
	.emit_wreg = sdma_v3_0_ring_emit_wreg,
1644 1645 1646 1647
};

static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
{
A
Alex Deucher 已提交
1648 1649 1650 1651
	int i;

	for (i = 0; i < adev->sdma.num_instances; i++)
		adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664
}

static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
	.set = sdma_v3_0_set_trap_irq_state,
	.process = sdma_v3_0_process_trap_irq,
};

static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = {
	.process = sdma_v3_0_process_illegal_inst_irq,
};

static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
{
A
Alex Deucher 已提交
1665 1666 1667
	adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
	adev->sdma.trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
	adev->sdma.illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681
}

/**
 * sdma_v3_0_emit_copy_buffer - copy buffer using the sDMA engine
 *
 * @ring: amdgpu_ring structure holding ring information
 * @src_offset: src GPU address
 * @dst_offset: dst GPU address
 * @byte_count: number of bytes to xfer
 *
 * Copy GPU buffers using the DMA engine (VI).
 * Used by the amdgpu ttm implementation to move pages if
 * registered as the asic copy callback.
 */
1682
static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib,
1683 1684 1685 1686
				       uint64_t src_offset,
				       uint64_t dst_offset,
				       uint32_t byte_count)
{
1687 1688 1689 1690 1691 1692 1693 1694
	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
	ib->ptr[ib->length_dw++] = byte_count;
	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706
}

/**
 * sdma_v3_0_emit_fill_buffer - fill buffer using the sDMA engine
 *
 * @ring: amdgpu_ring structure holding ring information
 * @src_data: value to write to buffer
 * @dst_offset: dst GPU address
 * @byte_count: number of bytes to xfer
 *
 * Fill GPU buffers using the DMA engine (VI).
 */
1707
static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib,
1708 1709 1710 1711
				       uint32_t src_data,
				       uint64_t dst_offset,
				       uint32_t byte_count)
{
1712 1713 1714 1715 1716
	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
	ib->ptr[ib->length_dw++] = src_data;
	ib->ptr[ib->length_dw++] = byte_count;
1717 1718 1719
}

static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
1720
	.copy_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */
1721 1722 1723
	.copy_num_dw = 7,
	.emit_copy_buffer = sdma_v3_0_emit_copy_buffer,

1724
	.fill_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */
1725 1726 1727 1728 1729 1730 1731 1732
	.fill_num_dw = 5,
	.emit_fill_buffer = sdma_v3_0_emit_fill_buffer,
};

static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
{
	if (adev->mman.buffer_funcs == NULL) {
		adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
A
Alex Deucher 已提交
1733
		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1734 1735 1736 1737
	}
}

static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
1738
	.copy_pte_num_dw = 7,
1739
	.copy_pte = sdma_v3_0_vm_copy_pte,
1740

1741 1742 1743 1744 1745 1746
	.write_pte = sdma_v3_0_vm_write_pte,
	.set_pte_pde = sdma_v3_0_vm_set_pte_pde,
};

static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
{
1747 1748
	unsigned i;

1749 1750
	if (adev->vm_manager.vm_pte_funcs == NULL) {
		adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
1751 1752 1753 1754 1755
		for (i = 0; i < adev->sdma.num_instances; i++)
			adev->vm_manager.vm_pte_rings[i] =
				&adev->sdma.instance[i].ring;

		adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
1756 1757
	}
}
1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775

const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
{
	.type = AMD_IP_BLOCK_TYPE_SDMA,
	.major = 3,
	.minor = 0,
	.rev = 0,
	.funcs = &sdma_v3_0_ip_funcs,
};

const struct amdgpu_ip_block_version sdma_v3_1_ip_block =
{
	.type = AMD_IP_BLOCK_TYPE_SDMA,
	.major = 3,
	.minor = 1,
	.rev = 0,
	.funcs = &sdma_v3_0_ip_funcs,
};