amdgpu_vcn.c 20.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright 2016 Advanced Micro Devices, Inc.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */

#include <linux/firmware.h>
#include <linux/module.h>
29 30
#include <linux/pci.h>

31 32 33 34 35 36 37
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_vcn.h"
#include "soc15d.h"

/* Firmware Names */
#define FIRMWARE_RAVEN		"amdgpu/raven_vcn.bin"
38
#define FIRMWARE_PICASSO	"amdgpu/picasso_vcn.bin"
39
#define FIRMWARE_RAVEN2		"amdgpu/raven2_vcn.bin"
40
#define FIRMWARE_ARCTURUS 	"amdgpu/arcturus_vcn.bin"
41
#define FIRMWARE_RENOIR 	"amdgpu/renoir_vcn.bin"
42
#define FIRMWARE_NAVI10 	"amdgpu/navi10_vcn.bin"
J
James Zhu 已提交
43
#define FIRMWARE_NAVI14 	"amdgpu/navi14_vcn.bin"
44
#define FIRMWARE_NAVI12 	"amdgpu/navi12_vcn.bin"
45
#define FIRMWARE_SIENNA_CICHLID 	"amdgpu/sienna_cichlid_vcn.bin"
46 47

MODULE_FIRMWARE(FIRMWARE_RAVEN);
48
MODULE_FIRMWARE(FIRMWARE_PICASSO);
49
MODULE_FIRMWARE(FIRMWARE_RAVEN2);
50
MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
51
MODULE_FIRMWARE(FIRMWARE_RENOIR);
52
MODULE_FIRMWARE(FIRMWARE_NAVI10);
J
James Zhu 已提交
53
MODULE_FIRMWARE(FIRMWARE_NAVI14);
54
MODULE_FIRMWARE(FIRMWARE_NAVI12);
55
MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
56 57 58 59 60

static void amdgpu_vcn_idle_work_handler(struct work_struct *work);

int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
{
61
	unsigned long bo_size, fw_shared_bo_size;
62 63
	const char *fw_name;
	const struct common_firmware_header *hdr;
64
	unsigned char fw_check;
65
	int i, r;
66 67

	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
68 69
	mutex_init(&adev->vcn.vcn_pg_lock);
	atomic_set(&adev->vcn.total_submission_cnt, 0);
70 71
	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
		atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
72 73 74

	switch (adev->asic_type) {
	case CHIP_RAVEN:
A
Alex Deucher 已提交
75
		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
76
			fw_name = FIRMWARE_RAVEN2;
A
Alex Deucher 已提交
77
		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
78
			fw_name = FIRMWARE_PICASSO;
79 80
		else
			fw_name = FIRMWARE_RAVEN;
81
		break;
82 83
	case CHIP_ARCTURUS:
		fw_name = FIRMWARE_ARCTURUS;
84 85 86
		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
			adev->vcn.indirect_sram = true;
87
		break;
88 89 90 91 92 93
	case CHIP_RENOIR:
		fw_name = FIRMWARE_RENOIR;
		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
			adev->vcn.indirect_sram = true;
		break;
94 95
	case CHIP_NAVI10:
		fw_name = FIRMWARE_NAVI10;
96 97 98
		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
			adev->vcn.indirect_sram = true;
99
		break;
100
	case CHIP_NAVI14:
J
James Zhu 已提交
101
		fw_name = FIRMWARE_NAVI14;
102
		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
103
		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
104
			adev->vcn.indirect_sram = true;
J
James Zhu 已提交
105
		break;
106 107 108 109 110 111
	case CHIP_NAVI12:
		fw_name = FIRMWARE_NAVI12;
		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
			adev->vcn.indirect_sram = true;
		break;
112 113
	case CHIP_SIENNA_CICHLID:
		fw_name = FIRMWARE_SIENNA_CICHLID;
114 115 116
		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
			adev->vcn.indirect_sram = true;
117
		break;
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
	default:
		return -EINVAL;
	}

	r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
	if (r) {
		dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
			fw_name);
		return r;
	}

	r = amdgpu_ucode_validate(adev->vcn.fw);
	if (r) {
		dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
			fw_name);
		release_firmware(adev->vcn.fw);
		adev->vcn.fw = NULL;
		return r;
	}

	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
139
	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
140

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
	/* Bit 20-23, it is encode major and non-zero for new naming convention.
	 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
	 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
	 * is zero in old naming convention, this field is always zero so far.
	 * These four bits are used to tell which naming convention is present.
	 */
	fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
	if (fw_check) {
		unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;

		fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
		enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
		enc_major = fw_check;
		dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
		vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
		DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
			enc_major, enc_minor, dec_ver, vep, fw_rev);
	} else {
		unsigned int version_major, version_minor, family_id;

		family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
		version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
		version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
		DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
			version_major, version_minor, family_id);
	}
167

168
	bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
169
	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
170
		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
171 172

	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
173 174 175
		if (adev->vcn.harvest_config & (1 << i))
			continue;

176 177 178 179 180 181 182
		r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
						AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
						&adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
		if (r) {
			dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
			return r;
		}
183

184 185 186 187 188 189 190 191
		if (adev->vcn.indirect_sram) {
			r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
					AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
					&adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
			if (r) {
				dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
				return r;
			}
192
		}
193 194 195 196 197

		r = amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)),
				PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].fw_shared_bo,
				&adev->vcn.inst[i].fw_shared_gpu_addr, &adev->vcn.inst[i].fw_shared_cpu_addr);
		if (r) {
198
			dev_err(adev->dev, "VCN %d (%d) failed to allocate firmware shared bo\n", i, r);
199 200
			return r;
		}
201 202 203

		fw_shared_bo_size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
		adev->vcn.inst[i].saved_shm_bo = kvmalloc(fw_shared_bo_size, GFP_KERNEL);
204 205
	}

206 207 208 209 210
	return 0;
}

int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
{
211
	int i, j;
212

213 214
	cancel_delayed_work_sync(&adev->vcn.idle_work);

215
	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
216 217
		if (adev->vcn.harvest_config & (1 << j))
			continue;
218

219
		kvfree(adev->vcn.inst[j].saved_shm_bo);
220 221 222 223
		amdgpu_bo_free_kernel(&adev->vcn.inst[j].fw_shared_bo,
					  &adev->vcn.inst[j].fw_shared_gpu_addr,
					  (void **)&adev->vcn.inst[j].fw_shared_cpu_addr);

224 225 226 227 228
		if (adev->vcn.indirect_sram) {
			amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
						  &adev->vcn.inst[j].dpg_sram_gpu_addr,
						  (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
		}
229
		kvfree(adev->vcn.inst[j].saved_bo);
230

231 232 233
		amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
					  &adev->vcn.inst[j].gpu_addr,
					  (void **)&adev->vcn.inst[j].cpu_addr);
234

235
		amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
L
Leo Liu 已提交
236

237 238 239
		for (i = 0; i < adev->vcn.num_enc_rings; ++i)
			amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
	}
240

241
	release_firmware(adev->vcn.fw);
242
	mutex_destroy(&adev->vcn.vcn_pg_lock);
243 244 245 246 247 248 249 250

	return 0;
}

int amdgpu_vcn_suspend(struct amdgpu_device *adev)
{
	unsigned size;
	void *ptr;
251
	int i;
252

253 254
	cancel_delayed_work_sync(&adev->vcn.idle_work);

255
	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
256 257
		if (adev->vcn.harvest_config & (1 << i))
			continue;
258 259
		if (adev->vcn.inst[i].vcpu_bo == NULL)
			return 0;
260

261 262
		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
		ptr = adev->vcn.inst[i].cpu_addr;
263

264 265 266
		adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
		if (!adev->vcn.inst[i].saved_bo)
			return -ENOMEM;
267

268
		memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
269 270 271 272 273 274 275 276 277 278 279

		if (adev->vcn.inst[i].fw_shared_bo == NULL)
			return 0;

		if (!adev->vcn.inst[i].saved_shm_bo)
			return -ENOMEM;

		size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
		ptr = adev->vcn.inst[i].fw_shared_cpu_addr;

		memcpy_fromio(adev->vcn.inst[i].saved_shm_bo, ptr, size);
280
	}
281 282 283 284 285 286 287
	return 0;
}

int amdgpu_vcn_resume(struct amdgpu_device *adev)
{
	unsigned size;
	void *ptr;
288
	int i;
289

290
	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
291 292
		if (adev->vcn.harvest_config & (1 << i))
			continue;
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
		if (adev->vcn.inst[i].vcpu_bo == NULL)
			return -EINVAL;

		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
		ptr = adev->vcn.inst[i].cpu_addr;

		if (adev->vcn.inst[i].saved_bo != NULL) {
			memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
			kvfree(adev->vcn.inst[i].saved_bo);
			adev->vcn.inst[i].saved_bo = NULL;
		} else {
			const struct common_firmware_header *hdr;
			unsigned offset;

			hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
308
			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
309 310 311 312 313 314 315
				offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
				memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
					    le32_to_cpu(hdr->ucode_size_bytes));
				size -= le32_to_cpu(hdr->ucode_size_bytes);
				ptr += le32_to_cpu(hdr->ucode_size_bytes);
			}
			memset_io(ptr, 0, size);
316
		}
317 318 319 320 321 322 323 324 325 326 327

		if (adev->vcn.inst[i].fw_shared_bo == NULL)
			return -EINVAL;

		size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
		ptr = adev->vcn.inst[i].fw_shared_cpu_addr;

		if (adev->vcn.inst[i].saved_shm_bo != NULL)
			memcpy_toio(ptr, adev->vcn.inst[i].saved_shm_bo, size);
		else
			memset_io(ptr, 0, size);
328 329 330 331
	}
	return 0;
}

332 333 334 335
static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
{
	struct amdgpu_device *adev =
		container_of(work, struct amdgpu_device, vcn.idle_work.work);
336 337
	unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
	unsigned int i, j;
338

339
	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
340 341
		if (adev->vcn.harvest_config & (1 << j))
			continue;
342

343 344 345
		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
			fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
		}
346

347 348
		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
			struct dpg_pause_state new_state;
349

350 351
			if (fence[j] ||
				unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
352 353 354
				new_state.fw_based = VCN_DPG_STATE__PAUSE;
			else
				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
355

356
			adev->vcn.pause_dpg_mode(adev, j, &new_state);
357
		}
358

359 360 361
		fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
		fences += fence[j];
	}
362

363
	if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
364 365
		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
		       AMD_PG_STATE_GATE);
366 367 368 369 370 371 372 373 374
	} else {
		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
	}
}

void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
{
	struct amdgpu_device *adev = ring->adev;

375 376 377 378 379 380
	atomic_inc(&adev->vcn.total_submission_cnt);
	cancel_delayed_work_sync(&adev->vcn.idle_work);

	mutex_lock(&adev->vcn.vcn_pg_lock);
	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
	       AMD_PG_STATE_UNGATE);
381 382 383 384

	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
		struct dpg_pause_state new_state;

385 386
		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
			atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
387
			new_state.fw_based = VCN_DPG_STATE__PAUSE;
388 389 390
		} else {
			unsigned int fences = 0;
			unsigned int i;
391

392 393 394 395 396 397 398 399
			for (i = 0; i < adev->vcn.num_enc_rings; ++i)
				fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);

			if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
				new_state.fw_based = VCN_DPG_STATE__PAUSE;
			else
				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
		}
400

401
		adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
402
	}
403
	mutex_unlock(&adev->vcn.vcn_pg_lock);
404 405 406 407
}

void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
{
408 409 410 411
	if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
		ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
		atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);

412 413
	atomic_dec(&ring->adev->vcn.total_submission_cnt);

414 415 416
	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
}

417 418 419 420 421 422 423
int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
{
	struct amdgpu_device *adev = ring->adev;
	uint32_t tmp = 0;
	unsigned i;
	int r;

424 425 426 427
	/* VCN in SRIOV does not support direct register read/write */
	if (amdgpu_sriov_vf(adev))
		return 0;

428
	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
429
	r = amdgpu_ring_alloc(ring, 3);
430
	if (r)
431
		return r;
432
	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
433 434 435
	amdgpu_ring_write(ring, 0xDEADBEEF);
	amdgpu_ring_commit(ring);
	for (i = 0; i < adev->usec_timeout; i++) {
436
		tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
437 438
		if (tmp == 0xDEADBEEF)
			break;
439
		udelay(1);
440 441
	}

442 443 444
	if (i >= adev->usec_timeout)
		r = -ETIMEDOUT;

445 446 447
	return r;
}

448
static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
449
				   struct amdgpu_bo *bo,
450
				   struct dma_fence **fence)
451
{
452 453
	struct amdgpu_device *adev = ring->adev;
	struct dma_fence *f = NULL;
454 455 456 457 458
	struct amdgpu_job *job;
	struct amdgpu_ib *ib;
	uint64_t addr;
	int i, r;

459 460
	r = amdgpu_job_alloc_with_ib(adev, 64,
					AMDGPU_IB_POOL_DIRECT, &job);
461 462 463 464 465
	if (r)
		goto err;

	ib = &job->ibs[0];
	addr = amdgpu_bo_gpu_offset(bo);
L
Leo Liu 已提交
466
	ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
467
	ib->ptr[1] = addr;
L
Leo Liu 已提交
468
	ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
469
	ib->ptr[3] = addr >> 32;
L
Leo Liu 已提交
470
	ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
471 472
	ib->ptr[5] = 0;
	for (i = 6; i < 16; i += 2) {
L
Leo Liu 已提交
473
		ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
474 475 476 477
		ib->ptr[i+1] = 0;
	}
	ib->length_dw = 16;

478
	r = amdgpu_job_submit_direct(job, ring, &f);
479 480
	if (r)
		goto err_free;
481

482 483 484
	amdgpu_bo_fence(bo, f, false);
	amdgpu_bo_unreserve(bo);
	amdgpu_bo_unref(&bo);
485 486 487 488 489 490 491 492 493 494 495

	if (fence)
		*fence = dma_fence_get(f);
	dma_fence_put(f);

	return 0;

err_free:
	amdgpu_job_free(job);

err:
496 497
	amdgpu_bo_unreserve(bo);
	amdgpu_bo_unref(&bo);
498 499 500 501 502 503 504
	return r;
}

static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
			      struct dma_fence **fence)
{
	struct amdgpu_device *adev = ring->adev;
505
	struct amdgpu_bo *bo = NULL;
506 507 508
	uint32_t *msg;
	int r, i;

509 510 511
	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
				      AMDGPU_GEM_DOMAIN_VRAM,
				      &bo, NULL, (void **)&msg);
512 513 514
	if (r)
		return r;

515
	msg[0] = cpu_to_le32(0x00000028);
516
	msg[1] = cpu_to_le32(0x00000038);
517
	msg[2] = cpu_to_le32(0x00000001);
518
	msg[3] = cpu_to_le32(0x00000000);
519
	msg[4] = cpu_to_le32(handle);
520
	msg[5] = cpu_to_le32(0x00000000);
521 522
	msg[6] = cpu_to_le32(0x00000001);
	msg[7] = cpu_to_le32(0x00000028);
523
	msg[8] = cpu_to_le32(0x00000010);
524
	msg[9] = cpu_to_le32(0x00000000);
525 526
	msg[10] = cpu_to_le32(0x00000007);
	msg[11] = cpu_to_le32(0x00000000);
527 528 529
	msg[12] = cpu_to_le32(0x00000780);
	msg[13] = cpu_to_le32(0x00000440);
	for (i = 14; i < 1024; ++i)
530 531
		msg[i] = cpu_to_le32(0x0);

532
	return amdgpu_vcn_dec_send_msg(ring, bo, fence);
533 534 535
}

static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
536
			       struct dma_fence **fence)
537 538
{
	struct amdgpu_device *adev = ring->adev;
539
	struct amdgpu_bo *bo = NULL;
540 541 542
	uint32_t *msg;
	int r, i;

543 544 545
	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
				      AMDGPU_GEM_DOMAIN_VRAM,
				      &bo, NULL, (void **)&msg);
546 547 548
	if (r)
		return r;

549 550 551 552 553 554 555
	msg[0] = cpu_to_le32(0x00000028);
	msg[1] = cpu_to_le32(0x00000018);
	msg[2] = cpu_to_le32(0x00000000);
	msg[3] = cpu_to_le32(0x00000002);
	msg[4] = cpu_to_le32(handle);
	msg[5] = cpu_to_le32(0x00000000);
	for (i = 6; i < 1024; ++i)
556 557
		msg[i] = cpu_to_le32(0x0);

558
	return amdgpu_vcn_dec_send_msg(ring, bo, fence);
559 560 561 562 563 564 565 566
}

int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
	struct dma_fence *fence;
	long r;

	r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
567
	if (r)
568 569
		goto error;

570
	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
571
	if (r)
572 573 574
		goto error;

	r = dma_fence_wait_timeout(fence, false, timeout);
575
	if (r == 0)
576
		r = -ETIMEDOUT;
577
	else if (r > 0)
578 579 580 581 582 583
		r = 0;

	dma_fence_put(fence);
error:
	return r;
}
L
Leo Liu 已提交
584

585 586 587
int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
{
	struct amdgpu_device *adev = ring->adev;
588
	uint32_t rptr;
589 590 591
	unsigned i;
	int r;

592 593 594
	if (amdgpu_sriov_vf(adev))
		return 0;

595
	r = amdgpu_ring_alloc(ring, 16);
596
	if (r)
597
		return r;
598

599 600
	rptr = amdgpu_ring_get_rptr(ring);

601
	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
602 603 604 605 606
	amdgpu_ring_commit(ring);

	for (i = 0; i < adev->usec_timeout; i++) {
		if (amdgpu_ring_get_rptr(ring) != rptr)
			break;
607
		udelay(1);
608 609
	}

610
	if (i >= adev->usec_timeout)
611 612 613 614 615
		r = -ETIMEDOUT;

	return r;
}

L
Leo Liu 已提交
616
static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
617 618
					 struct amdgpu_bo *bo,
					 struct dma_fence **fence)
L
Leo Liu 已提交
619
{
L
Leo Liu 已提交
620
	const unsigned ib_size_dw = 16;
L
Leo Liu 已提交
621 622 623
	struct amdgpu_job *job;
	struct amdgpu_ib *ib;
	struct dma_fence *f = NULL;
624
	uint64_t addr;
L
Leo Liu 已提交
625 626
	int i, r;

627 628
	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
					AMDGPU_IB_POOL_DIRECT, &job);
L
Leo Liu 已提交
629 630 631 632
	if (r)
		return r;

	ib = &job->ibs[0];
633
	addr = amdgpu_bo_gpu_offset(bo);
L
Leo Liu 已提交
634 635

	ib->length_dw = 0;
L
Leo Liu 已提交
636 637
	ib->ptr[ib->length_dw++] = 0x00000018;
	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
L
Leo Liu 已提交
638
	ib->ptr[ib->length_dw++] = handle;
639 640
	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
	ib->ptr[ib->length_dw++] = addr;
L
Leo Liu 已提交
641
	ib->ptr[ib->length_dw++] = 0x0000000b;
L
Leo Liu 已提交
642

L
Leo Liu 已提交
643 644 645
	ib->ptr[ib->length_dw++] = 0x00000014;
	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
	ib->ptr[ib->length_dw++] = 0x0000001c;
L
Leo Liu 已提交
646 647 648
	ib->ptr[ib->length_dw++] = 0x00000000;
	ib->ptr[ib->length_dw++] = 0x00000000;

L
Leo Liu 已提交
649 650
	ib->ptr[ib->length_dw++] = 0x00000008;
	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
L
Leo Liu 已提交
651 652 653 654

	for (i = ib->length_dw; i < ib_size_dw; ++i)
		ib->ptr[i] = 0x0;

655
	r = amdgpu_job_submit_direct(job, ring, &f);
L
Leo Liu 已提交
656 657 658 659 660 661
	if (r)
		goto err;

	if (fence)
		*fence = dma_fence_get(f);
	dma_fence_put(f);
L
Leo Liu 已提交
662

L
Leo Liu 已提交
663 664 665 666 667 668 669 670
	return 0;

err:
	amdgpu_job_free(job);
	return r;
}

static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
671 672
					  struct amdgpu_bo *bo,
					  struct dma_fence **fence)
L
Leo Liu 已提交
673
{
L
Leo Liu 已提交
674
	const unsigned ib_size_dw = 16;
L
Leo Liu 已提交
675 676 677
	struct amdgpu_job *job;
	struct amdgpu_ib *ib;
	struct dma_fence *f = NULL;
678
	uint64_t addr;
L
Leo Liu 已提交
679 680
	int i, r;

681 682
	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
					AMDGPU_IB_POOL_DIRECT, &job);
L
Leo Liu 已提交
683 684 685 686
	if (r)
		return r;

	ib = &job->ibs[0];
687
	addr = amdgpu_bo_gpu_offset(bo);
L
Leo Liu 已提交
688 689

	ib->length_dw = 0;
L
Leo Liu 已提交
690 691
	ib->ptr[ib->length_dw++] = 0x00000018;
	ib->ptr[ib->length_dw++] = 0x00000001;
L
Leo Liu 已提交
692
	ib->ptr[ib->length_dw++] = handle;
693 694
	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
	ib->ptr[ib->length_dw++] = addr;
L
Leo Liu 已提交
695
	ib->ptr[ib->length_dw++] = 0x0000000b;
L
Leo Liu 已提交
696

L
Leo Liu 已提交
697 698 699
	ib->ptr[ib->length_dw++] = 0x00000014;
	ib->ptr[ib->length_dw++] = 0x00000002;
	ib->ptr[ib->length_dw++] = 0x0000001c;
L
Leo Liu 已提交
700 701 702
	ib->ptr[ib->length_dw++] = 0x00000000;
	ib->ptr[ib->length_dw++] = 0x00000000;

L
Leo Liu 已提交
703 704
	ib->ptr[ib->length_dw++] = 0x00000008;
	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
L
Leo Liu 已提交
705 706 707 708

	for (i = ib->length_dw; i < ib_size_dw; ++i)
		ib->ptr[i] = 0x0;

709
	r = amdgpu_job_submit_direct(job, ring, &f);
L
Leo Liu 已提交
710 711
	if (r)
		goto err;
L
Leo Liu 已提交
712 713 714 715

	if (fence)
		*fence = dma_fence_get(f);
	dma_fence_put(f);
L
Leo Liu 已提交
716

L
Leo Liu 已提交
717 718 719 720 721 722 723 724 725 726
	return 0;

err:
	amdgpu_job_free(job);
	return r;
}

int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
	struct dma_fence *fence = NULL;
727
	struct amdgpu_bo *bo = NULL;
L
Leo Liu 已提交
728 729
	long r;

730 731 732 733 734 735 736
	r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
				      AMDGPU_GEM_DOMAIN_VRAM,
				      &bo, NULL, NULL);
	if (r)
		return r;

	r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
737
	if (r)
L
Leo Liu 已提交
738 739
		goto error;

740
	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
741
	if (r)
L
Leo Liu 已提交
742 743 744
		goto error;

	r = dma_fence_wait_timeout(fence, false, timeout);
745
	if (r == 0)
L
Leo Liu 已提交
746
		r = -ETIMEDOUT;
747
	else if (r > 0)
L
Leo Liu 已提交
748
		r = 0;
749

L
Leo Liu 已提交
750 751
error:
	dma_fence_put(fence);
752 753
	amdgpu_bo_unreserve(bo);
	amdgpu_bo_unref(&bo);
L
Leo Liu 已提交
754 755
	return r;
}