amdgpu_uvd.c 32.4 KB
Newer Older
A
Alex Deucher 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
/*
 * Copyright 2011 Advanced Micro Devices, Inc.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */
/*
 * Authors:
 *    Christian König <deathsimple@vodafone.de>
 */

#include <linux/firmware.h>
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm.h>

#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_uvd.h"
#include "cikd.h"
#include "uvd/uvd_4_2_d.h"

/* 1 second timeout */
43
#define UVD_IDLE_TIMEOUT	msecs_to_jiffies(1000)
44 45 46 47 48 49 50

/* Firmware versions for VI */
#define FW_1_65_10	((1 << 24) | (65 << 16) | (10 << 8))
#define FW_1_87_11	((1 << 24) | (87 << 16) | (11 << 8))
#define FW_1_87_12	((1 << 24) | (87 << 16) | (12 << 8))
#define FW_1_37_15	((1 << 24) | (37 << 16) | (15 << 8))

51
/* Polaris10/11 firmware version */
52
#define FW_1_66_16	((1 << 24) | (66 << 16) | (16 << 8))
A
Alex Deucher 已提交
53 54 55

/* Firmware Names */
#ifdef CONFIG_DRM_AMDGPU_CIK
56 57 58 59 60
#define FIRMWARE_BONAIRE	"amdgpu/bonaire_uvd.bin"
#define FIRMWARE_KABINI	"amdgpu/kabini_uvd.bin"
#define FIRMWARE_KAVERI	"amdgpu/kaveri_uvd.bin"
#define FIRMWARE_HAWAII	"amdgpu/hawaii_uvd.bin"
#define FIRMWARE_MULLINS	"amdgpu/mullins_uvd.bin"
A
Alex Deucher 已提交
61
#endif
62 63
#define FIRMWARE_TONGA		"amdgpu/tonga_uvd.bin"
#define FIRMWARE_CARRIZO	"amdgpu/carrizo_uvd.bin"
64
#define FIRMWARE_FIJI		"amdgpu/fiji_uvd.bin"
S
Samuel Li 已提交
65
#define FIRMWARE_STONEY		"amdgpu/stoney_uvd.bin"
66
#define FIRMWARE_POLARIS10	"amdgpu/polaris10_uvd.bin"
R
Rex Zhu 已提交
67
#define FIRMWARE_POLARIS11	"amdgpu/polaris11_uvd.bin"
68
#define FIRMWARE_POLARIS12	"amdgpu/polaris12_uvd.bin"
69
#define FIRMWARE_VEGAM		"amdgpu/vegam_uvd.bin"
A
Alex Deucher 已提交
70

71
#define FIRMWARE_VEGA10		"amdgpu/vega10_uvd.bin"
72
#define FIRMWARE_VEGA12		"amdgpu/vega12_uvd.bin"
73
#define FIRMWARE_VEGA20		"amdgpu/vega20_uvd.bin"
74

75 76 77 78 79 80
/* These are common relative offsets for all asics, from uvd_7_0_offset.h,  */
#define UVD_GPCOM_VCPU_CMD		0x03c3
#define UVD_GPCOM_VCPU_DATA0	0x03c4
#define UVD_GPCOM_VCPU_DATA1	0x03c5
#define UVD_NO_OP				0x03ff
#define UVD_BASE_SI				0x3800
81

A
Alex Deucher 已提交
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
/**
 * amdgpu_uvd_cs_ctx - Command submission parser context
 *
 * Used for emulating virtual memory support on UVD 4.2.
 */
struct amdgpu_uvd_cs_ctx {
	struct amdgpu_cs_parser *parser;
	unsigned reg, count;
	unsigned data0, data1;
	unsigned idx;
	unsigned ib_idx;

	/* does the IB has a msg command */
	bool has_msg_cmd;

	/* minimum buffer sizes */
	unsigned *buf_sizes;
};

#ifdef CONFIG_DRM_AMDGPU_CIK
MODULE_FIRMWARE(FIRMWARE_BONAIRE);
MODULE_FIRMWARE(FIRMWARE_KABINI);
MODULE_FIRMWARE(FIRMWARE_KAVERI);
MODULE_FIRMWARE(FIRMWARE_HAWAII);
MODULE_FIRMWARE(FIRMWARE_MULLINS);
#endif
MODULE_FIRMWARE(FIRMWARE_TONGA);
MODULE_FIRMWARE(FIRMWARE_CARRIZO);
110
MODULE_FIRMWARE(FIRMWARE_FIJI);
S
Samuel Li 已提交
111
MODULE_FIRMWARE(FIRMWARE_STONEY);
112 113
MODULE_FIRMWARE(FIRMWARE_POLARIS10);
MODULE_FIRMWARE(FIRMWARE_POLARIS11);
114
MODULE_FIRMWARE(FIRMWARE_POLARIS12);
115
MODULE_FIRMWARE(FIRMWARE_VEGAM);
A
Alex Deucher 已提交
116

117
MODULE_FIRMWARE(FIRMWARE_VEGA10);
118
MODULE_FIRMWARE(FIRMWARE_VEGA12);
119
MODULE_FIRMWARE(FIRMWARE_VEGA20);
120

A
Alex Deucher 已提交
121 122 123 124
static void amdgpu_uvd_idle_work_handler(struct work_struct *work);

int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
{
125
	struct amdgpu_ring *ring;
126
	struct drm_sched_rq *rq;
A
Alex Deucher 已提交
127 128 129
	unsigned long bo_size;
	const char *fw_name;
	const struct common_firmware_header *hdr;
130
	unsigned family_id;
131
	int i, j, r;
A
Alex Deucher 已提交
132

133
	INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
A
Alex Deucher 已提交
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155

	switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK
	case CHIP_BONAIRE:
		fw_name = FIRMWARE_BONAIRE;
		break;
	case CHIP_KABINI:
		fw_name = FIRMWARE_KABINI;
		break;
	case CHIP_KAVERI:
		fw_name = FIRMWARE_KAVERI;
		break;
	case CHIP_HAWAII:
		fw_name = FIRMWARE_HAWAII;
		break;
	case CHIP_MULLINS:
		fw_name = FIRMWARE_MULLINS;
		break;
#endif
	case CHIP_TONGA:
		fw_name = FIRMWARE_TONGA;
		break;
156 157 158
	case CHIP_FIJI:
		fw_name = FIRMWARE_FIJI;
		break;
A
Alex Deucher 已提交
159 160 161
	case CHIP_CARRIZO:
		fw_name = FIRMWARE_CARRIZO;
		break;
S
Samuel Li 已提交
162 163 164
	case CHIP_STONEY:
		fw_name = FIRMWARE_STONEY;
		break;
165 166
	case CHIP_POLARIS10:
		fw_name = FIRMWARE_POLARIS10;
167
		break;
168 169
	case CHIP_POLARIS11:
		fw_name = FIRMWARE_POLARIS11;
170
		break;
171 172 173
	case CHIP_POLARIS12:
		fw_name = FIRMWARE_POLARIS12;
		break;
174 175 176
	case CHIP_VEGA10:
		fw_name = FIRMWARE_VEGA10;
		break;
177 178
	case CHIP_VEGA12:
		fw_name = FIRMWARE_VEGA12;
179
		break;
180 181 182
	case CHIP_VEGAM:
		fw_name = FIRMWARE_VEGAM;
		break;
183 184 185
	case CHIP_VEGA20:
		fw_name = FIRMWARE_VEGA20;
		break;
A
Alex Deucher 已提交
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
	default:
		return -EINVAL;
	}

	r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
	if (r) {
		dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",
			fw_name);
		return r;
	}

	r = amdgpu_ucode_validate(adev->uvd.fw);
	if (r) {
		dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
			fw_name);
		release_firmware(adev->uvd.fw);
		adev->uvd.fw = NULL;
		return r;
	}

206 207 208
	/* Set the default UVD handles that the firmware can handle */
	adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;

A
Alex Deucher 已提交
209 210
	hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
	family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
211 212

	if (adev->asic_type < CHIP_VEGA20) {
213 214
		unsigned version_major, version_minor;

215 216 217 218
		version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
		version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
		DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
			version_major, version_minor, family_id);
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237

		/*
		 * Limit the number of UVD handles depending on microcode major
		 * and minor versions. The firmware version which has 40 UVD
		 * instances support is 1.80. So all subsequent versions should
		 * also have the same support.
		 */
		if ((version_major > 0x01) ||
		    ((version_major == 0x01) && (version_minor >= 0x50)))
			adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;

		adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
					(family_id << 8));

		if ((adev->asic_type == CHIP_POLARIS10 ||
		     adev->asic_type == CHIP_POLARIS11) &&
		    (adev->uvd.fw_version < FW_1_66_16))
			DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
				  version_major, version_minor);
238 239 240 241 242 243 244 245
	} else {
		unsigned int enc_major, enc_minor, dec_minor;

		dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
		enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f;
		enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3;
		DRM_INFO("Found UVD firmware ENC: %hu.%hu DEC: .%hu Family ID: %hu\n",
			enc_major, enc_minor, dec_minor, family_id);
A
Alex Deucher 已提交
246

247 248
		adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;

249 250
		adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version);
	}
251

252
	bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
253
		  +  AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
254 255 256
	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);

257
	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
A
Alex Deucher 已提交
258

259 260 261 262 263 264 265
		r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
					    AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
					    &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
		if (r) {
			dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
			return r;
		}
266

267 268
		ring = &adev->uvd.inst[j].ring;
		rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
269 270
		r = drm_sched_entity_init(&adev->uvd.inst[j].entity, &rq,
					  1, NULL);
271 272 273 274
		if (r != 0) {
			DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
			return r;
		}
A
Alex Deucher 已提交
275

276 277 278 279 280
		for (i = 0; i < adev->uvd.max_handles; ++i) {
			atomic_set(&adev->uvd.inst[j].handles[i], 0);
			adev->uvd.inst[j].filp[i] = NULL;
		}
	}
A
Alex Deucher 已提交
281
	/* from uvd v5.0 HW addressing capacity increased to 64 bits */
282
	if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
A
Alex Deucher 已提交
283 284
		adev->uvd.address_64_bit = true;

285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
	switch (adev->asic_type) {
	case CHIP_TONGA:
		adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
		break;
	case CHIP_CARRIZO:
		adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11;
		break;
	case CHIP_FIJI:
		adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12;
		break;
	case CHIP_STONEY:
		adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15;
		break;
	default:
		adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10;
	}

A
Alex Deucher 已提交
302 303 304 305 306
	return 0;
}

int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
307
	int i, j;
A
Alex Deucher 已提交
308

309 310
	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
		kfree(adev->uvd.inst[j].saved_bo);
311

312
		drm_sched_entity_destroy(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
A
Alex Deucher 已提交
313

314 315 316
		amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
				      &adev->uvd.inst[j].gpu_addr,
				      (void **)&adev->uvd.inst[j].cpu_addr);
A
Alex Deucher 已提交
317

318
		amdgpu_ring_fini(&adev->uvd.inst[j].ring);
319

320 321 322
		for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
			amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
	}
A
Alex Deucher 已提交
323 324 325 326 327 328 329
	release_firmware(adev->uvd.fw);

	return 0;
}

int amdgpu_uvd_suspend(struct amdgpu_device *adev)
{
330 331
	unsigned size;
	void *ptr;
332
	int i, j;
A
Alex Deucher 已提交
333

334 335
	cancel_delayed_work_sync(&adev->uvd.idle_work);

336 337 338
	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
		if (adev->uvd.inst[j].vcpu_bo == NULL)
			continue;
A
Alex Deucher 已提交
339

340 341 342 343 344
		/* only valid for physical mode */
		if (adev->asic_type < CHIP_POLARIS10) {
			for (i = 0; i < adev->uvd.max_handles; ++i)
				if (atomic_read(&adev->uvd.inst[j].handles[i]))
					break;
345

346 347 348
			if (i == adev->uvd.max_handles)
				continue;
		}
A
Alex Deucher 已提交
349

350 351
		size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
		ptr = adev->uvd.inst[j].cpu_addr;
A
Alex Deucher 已提交
352

353 354 355
		adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL);
		if (!adev->uvd.inst[j].saved_bo)
			return -ENOMEM;
A
Alex Deucher 已提交
356

357 358
		memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
	}
A
Alex Deucher 已提交
359 360 361 362 363 364 365
	return 0;
}

int amdgpu_uvd_resume(struct amdgpu_device *adev)
{
	unsigned size;
	void *ptr;
366
	int i;
A
Alex Deucher 已提交
367

368 369 370
	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
		if (adev->uvd.inst[i].vcpu_bo == NULL)
			return -EINVAL;
A
Alex Deucher 已提交
371

372 373
		size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo);
		ptr = adev->uvd.inst[i].cpu_addr;
A
Alex Deucher 已提交
374

375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
		if (adev->uvd.inst[i].saved_bo != NULL) {
			memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
			kfree(adev->uvd.inst[i].saved_bo);
			adev->uvd.inst[i].saved_bo = NULL;
		} else {
			const struct common_firmware_header *hdr;
			unsigned offset;

			hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
				offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
				memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset,
					    le32_to_cpu(hdr->ucode_size_bytes));
				size -= le32_to_cpu(hdr->ucode_size_bytes);
				ptr += le32_to_cpu(hdr->ucode_size_bytes);
			}
			memset_io(ptr, 0, size);
			/* to restore uvd fence seq */
			amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring);
394
		}
395
	}
A
Alex Deucher 已提交
396 397 398 399 400
	return 0;
}

void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
{
401 402
	struct amdgpu_ring *ring;
	int i, j, r;
A
Alex Deucher 已提交
403

404 405
	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
		ring = &adev->uvd.inst[j].ring;
A
Alex Deucher 已提交
406

407 408 409 410 411 412 413 414 415 416 417
		for (i = 0; i < adev->uvd.max_handles; ++i) {
			uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]);
			if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) {
				struct dma_fence *fence;

				r = amdgpu_uvd_get_destroy_msg(ring, handle,
							       false, &fence);
				if (r) {
					DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r);
					continue;
				}
A
Alex Deucher 已提交
418

419 420 421 422 423 424
				dma_fence_wait(fence, false);
				dma_fence_put(fence);

				adev->uvd.inst[j].filp[i] = NULL;
				atomic_set(&adev->uvd.inst[j].handles[i], 0);
			}
A
Alex Deucher 已提交
425 426 427 428
		}
	}
}

429
static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
A
Alex Deucher 已提交
430 431
{
	int i;
432 433 434
	for (i = 0; i < abo->placement.num_placement; ++i) {
		abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
		abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
A
Alex Deucher 已提交
435 436 437
	}
}

438 439 440 441 442 443 444 445 446 447 448 449
static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
{
	uint32_t lo, hi;
	uint64_t addr;

	lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
	hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
	addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);

	return addr;
}

A
Alex Deucher 已提交
450 451 452 453 454 455 456 457 458 459
/**
 * amdgpu_uvd_cs_pass1 - first parsing round
 *
 * @ctx: UVD parser context
 *
 * Make sure UVD message and feedback buffers are in VRAM and
 * nobody is violating an 256MB boundary.
 */
static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
{
460
	struct ttm_operation_ctx tctx = { false, false };
A
Alex Deucher 已提交
461 462
	struct amdgpu_bo_va_mapping *mapping;
	struct amdgpu_bo *bo;
463 464
	uint32_t cmd;
	uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
A
Alex Deucher 已提交
465 466
	int r = 0;

467 468
	r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
	if (r) {
A
Alex Deucher 已提交
469
		DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
470
		return r;
A
Alex Deucher 已提交
471 472 473 474 475 476 477 478 479 480 481 482
	}

	if (!ctx->parser->adev->uvd.address_64_bit) {
		/* check if it's a message or feedback command */
		cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
		if (cmd == 0x0 || cmd == 0x3) {
			/* yes, force it into VRAM */
			uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
			amdgpu_ttm_placement_from_domain(bo, domain);
		}
		amdgpu_uvd_force_into_uvd_segment(bo);

483
		r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx);
A
Alex Deucher 已提交
484 485 486 487 488 489 490 491 492 493 494 495 496
	}

	return r;
}

/**
 * amdgpu_uvd_cs_msg_decode - handle UVD decode message
 *
 * @msg: pointer to message structure
 * @buf_sizes: returned buffer sizes
 *
 * Peek into the decode message and calculate the necessary buffer sizes.
 */
497 498
static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
	unsigned buf_sizes[])
A
Alex Deucher 已提交
499 500 501 502 503 504 505 506 507 508 509 510
{
	unsigned stream_type = msg[4];
	unsigned width = msg[6];
	unsigned height = msg[7];
	unsigned dpb_size = msg[9];
	unsigned pitch = msg[28];
	unsigned level = msg[57];

	unsigned width_in_mb = width / 16;
	unsigned height_in_mb = ALIGN(height / 16, 2);
	unsigned fs_in_mb = width_in_mb * height_in_mb;

J
Jammy Zhou 已提交
511
	unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
512
	unsigned min_ctx_size = ~0;
A
Alex Deucher 已提交
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596

	image_size = width * height;
	image_size += image_size / 2;
	image_size = ALIGN(image_size, 1024);

	switch (stream_type) {
	case 0: /* H264 */
		switch(level) {
		case 30:
			num_dpb_buffer = 8100 / fs_in_mb;
			break;
		case 31:
			num_dpb_buffer = 18000 / fs_in_mb;
			break;
		case 32:
			num_dpb_buffer = 20480 / fs_in_mb;
			break;
		case 41:
			num_dpb_buffer = 32768 / fs_in_mb;
			break;
		case 42:
			num_dpb_buffer = 34816 / fs_in_mb;
			break;
		case 50:
			num_dpb_buffer = 110400 / fs_in_mb;
			break;
		case 51:
			num_dpb_buffer = 184320 / fs_in_mb;
			break;
		default:
			num_dpb_buffer = 184320 / fs_in_mb;
			break;
		}
		num_dpb_buffer++;
		if (num_dpb_buffer > 17)
			num_dpb_buffer = 17;

		/* reference picture buffer */
		min_dpb_size = image_size * num_dpb_buffer;

		/* macroblock context buffer */
		min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192;

		/* IT surface buffer */
		min_dpb_size += width_in_mb * height_in_mb * 32;
		break;

	case 1: /* VC1 */

		/* reference picture buffer */
		min_dpb_size = image_size * 3;

		/* CONTEXT_BUFFER */
		min_dpb_size += width_in_mb * height_in_mb * 128;

		/* IT surface buffer */
		min_dpb_size += width_in_mb * 64;

		/* DB surface buffer */
		min_dpb_size += width_in_mb * 128;

		/* BP */
		tmp = max(width_in_mb, height_in_mb);
		min_dpb_size += ALIGN(tmp * 7 * 16, 64);
		break;

	case 3: /* MPEG2 */

		/* reference picture buffer */
		min_dpb_size = image_size * 3;
		break;

	case 4: /* MPEG4 */

		/* reference picture buffer */
		min_dpb_size = image_size * 3;

		/* CM */
		min_dpb_size += width_in_mb * height_in_mb * 64;

		/* IT surface buffer */
		min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
		break;

597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
	case 7: /* H264 Perf */
		switch(level) {
		case 30:
			num_dpb_buffer = 8100 / fs_in_mb;
			break;
		case 31:
			num_dpb_buffer = 18000 / fs_in_mb;
			break;
		case 32:
			num_dpb_buffer = 20480 / fs_in_mb;
			break;
		case 41:
			num_dpb_buffer = 32768 / fs_in_mb;
			break;
		case 42:
			num_dpb_buffer = 34816 / fs_in_mb;
			break;
		case 50:
			num_dpb_buffer = 110400 / fs_in_mb;
			break;
		case 51:
			num_dpb_buffer = 184320 / fs_in_mb;
			break;
		default:
			num_dpb_buffer = 184320 / fs_in_mb;
			break;
		}
		num_dpb_buffer++;
		if (num_dpb_buffer > 17)
			num_dpb_buffer = 17;

		/* reference picture buffer */
		min_dpb_size = image_size * num_dpb_buffer;

631
		if (!adev->uvd.use_ctx_buf){
632 633 634 635 636 637 638 639 640 641 642 643 644
			/* macroblock context buffer */
			min_dpb_size +=
				width_in_mb * height_in_mb * num_dpb_buffer * 192;

			/* IT surface buffer */
			min_dpb_size += width_in_mb * height_in_mb * 32;
		} else {
			/* macroblock context buffer */
			min_ctx_size =
				width_in_mb * height_in_mb * num_dpb_buffer * 192;
		}
		break;

645 646 647 648
	case 8: /* MJPEG */
		min_dpb_size = 0;
		break;

649 650 651 652 653 654
	case 16: /* H265 */
		image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
		image_size = ALIGN(image_size, 256);

		num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
		min_dpb_size = image_size * num_dpb_buffer;
655 656
		min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
					   * 16 * num_dpb_buffer + 52 * 1024;
657 658
		break;

A
Alex Deucher 已提交
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
	default:
		DRM_ERROR("UVD codec not handled %d!\n", stream_type);
		return -EINVAL;
	}

	if (width > pitch) {
		DRM_ERROR("Invalid UVD decoding target pitch!\n");
		return -EINVAL;
	}

	if (dpb_size < min_dpb_size) {
		DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
			  dpb_size, min_dpb_size);
		return -EINVAL;
	}

	buf_sizes[0x1] = dpb_size;
	buf_sizes[0x2] = image_size;
677
	buf_sizes[0x4] = min_ctx_size;
A
Alex Deucher 已提交
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
	return 0;
}

/**
 * amdgpu_uvd_cs_msg - handle UVD message
 *
 * @ctx: UVD parser context
 * @bo: buffer object containing the message
 * @offset: offset into the buffer object
 *
 * Peek into the UVD message and extract the session id.
 * Make sure that we don't open up to many sessions.
 */
static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
			     struct amdgpu_bo *bo, unsigned offset)
{
	struct amdgpu_device *adev = ctx->parser->adev;
	int32_t *msg, msg_type, handle;
	void *ptr;
697 698
	long r;
	int i;
699
	uint32_t ip_instance = ctx->parser->job->ring->me;
A
Alex Deucher 已提交
700 701

	if (offset & 0x3F) {
702
		DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance);
A
Alex Deucher 已提交
703 704 705 706 707
		return -EINVAL;
	}

	r = amdgpu_bo_kmap(bo, &ptr);
	if (r) {
708
		DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r);
A
Alex Deucher 已提交
709 710 711 712 713 714 715 716 717
		return r;
	}

	msg = ptr + offset;

	msg_type = msg[1];
	handle = msg[2];

	if (handle == 0) {
718
		DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance);
A
Alex Deucher 已提交
719 720 721
		return -EINVAL;
	}

722 723 724 725 726 727
	switch (msg_type) {
	case 0:
		/* it's a create msg, calc image size (width * height) */
		amdgpu_bo_kunmap(bo);

		/* try to alloc a new handle */
728
		for (i = 0; i < adev->uvd.max_handles; ++i) {
729 730
			if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
				DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle);
731 732 733
				return -EINVAL;
			}

734 735
			if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) {
				adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp;
736 737 738 739
				return 0;
			}
		}

740
		DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance);
741
		return -ENOSPC;
742 743

	case 1:
A
Alex Deucher 已提交
744
		/* it's a decode msg, calc buffer sizes */
745
		r = amdgpu_uvd_cs_msg_decode(adev, msg, ctx->buf_sizes);
A
Alex Deucher 已提交
746 747 748 749
		amdgpu_bo_kunmap(bo);
		if (r)
			return r;

750
		/* validate the handle */
751
		for (i = 0; i < adev->uvd.max_handles; ++i) {
752 753 754
			if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
				if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) {
					DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance);
755 756 757 758 759 760
					return -EINVAL;
				}
				return 0;
			}
		}

761
		DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle);
762 763 764
		return -ENOENT;

	case 2:
A
Alex Deucher 已提交
765
		/* it's a destroy msg, free the handle */
766
		for (i = 0; i < adev->uvd.max_handles; ++i)
767
			atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0);
A
Alex Deucher 已提交
768 769 770
		amdgpu_bo_kunmap(bo);
		return 0;

771
	default:
772
		DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type);
773
		return -EINVAL;
A
Alex Deucher 已提交
774
	}
775
	BUG();
A
Alex Deucher 已提交
776 777 778 779 780 781 782 783 784 785 786 787 788 789
	return -EINVAL;
}

/**
 * amdgpu_uvd_cs_pass2 - second parsing round
 *
 * @ctx: UVD parser context
 *
 * Patch buffer addresses, make sure buffer sizes are correct.
 */
static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
{
	struct amdgpu_bo_va_mapping *mapping;
	struct amdgpu_bo *bo;
790
	uint32_t cmd;
A
Alex Deucher 已提交
791
	uint64_t start, end;
792
	uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
A
Alex Deucher 已提交
793 794
	int r;

795 796
	r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
	if (r) {
797
		DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
798
		return r;
799
	}
A
Alex Deucher 已提交
800 801 802

	start = amdgpu_bo_gpu_offset(bo);

803
	end = (mapping->last + 1 - mapping->start);
A
Alex Deucher 已提交
804 805
	end = end * AMDGPU_GPU_PAGE_SIZE + start;

806
	addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
A
Alex Deucher 已提交
807 808
	start += addr;

809 810 811 812
	amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,
			    lower_32_bits(start));
	amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1,
			    upper_32_bits(start));
A
Alex Deucher 已提交
813 814 815 816 817 818 819 820 821 822

	cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
	if (cmd < 0x4) {
		if ((end - start) < ctx->buf_sizes[cmd]) {
			DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
				  (unsigned)(end - start),
				  ctx->buf_sizes[cmd]);
			return -EINVAL;
		}

823 824 825 826 827 828 829
	} else if (cmd == 0x206) {
		if ((end - start) < ctx->buf_sizes[4]) {
			DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
					  (unsigned)(end - start),
					  ctx->buf_sizes[4]);
			return -EINVAL;
		}
A
Alex Deucher 已提交
830 831 832 833 834 835 836 837 838 839 840 841 842
	} else if ((cmd != 0x100) && (cmd != 0x204)) {
		DRM_ERROR("invalid UVD command %X!\n", cmd);
		return -EINVAL;
	}

	if (!ctx->parser->adev->uvd.address_64_bit) {
		if ((start >> 28) != ((end - 1) >> 28)) {
			DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
				  start, end);
			return -EINVAL;
		}

		if ((cmd == 0 || cmd == 0x3) &&
843
		    (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) {
A
Alex Deucher 已提交
844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873
			DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
				  start, end);
			return -EINVAL;
		}
	}

	if (cmd == 0) {
		ctx->has_msg_cmd = true;
		r = amdgpu_uvd_cs_msg(ctx, bo, addr);
		if (r)
			return r;
	} else if (!ctx->has_msg_cmd) {
		DRM_ERROR("Message needed before other commands are send!\n");
		return -EINVAL;
	}

	return 0;
}

/**
 * amdgpu_uvd_cs_reg - parse register writes
 *
 * @ctx: UVD parser context
 * @cb: callback function
 *
 * Parse the register writes, call cb on each complete command.
 */
static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
			     int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
{
874
	struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
A
Alex Deucher 已提交
875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898
	int i, r;

	ctx->idx++;
	for (i = 0; i <= ctx->count; ++i) {
		unsigned reg = ctx->reg + i;

		if (ctx->idx >= ib->length_dw) {
			DRM_ERROR("Register command after end of CS!\n");
			return -EINVAL;
		}

		switch (reg) {
		case mmUVD_GPCOM_VCPU_DATA0:
			ctx->data0 = ctx->idx;
			break;
		case mmUVD_GPCOM_VCPU_DATA1:
			ctx->data1 = ctx->idx;
			break;
		case mmUVD_GPCOM_VCPU_CMD:
			r = cb(ctx);
			if (r)
				return r;
			break;
		case mmUVD_ENGINE_CNTL:
899
		case mmUVD_NO_OP:
A
Alex Deucher 已提交
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920
			break;
		default:
			DRM_ERROR("Invalid reg 0x%X!\n", reg);
			return -EINVAL;
		}
		ctx->idx++;
	}
	return 0;
}

/**
 * amdgpu_uvd_cs_packets - parse UVD packets
 *
 * @ctx: UVD parser context
 * @cb: callback function
 *
 * Parse the command stream packets.
 */
static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
				 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
{
921
	struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
A
Alex Deucher 已提交
922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957
	int r;

	for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) {
		uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx);
		unsigned type = CP_PACKET_GET_TYPE(cmd);
		switch (type) {
		case PACKET_TYPE0:
			ctx->reg = CP_PACKET0_GET_REG(cmd);
			ctx->count = CP_PACKET_GET_COUNT(cmd);
			r = amdgpu_uvd_cs_reg(ctx, cb);
			if (r)
				return r;
			break;
		case PACKET_TYPE2:
			++ctx->idx;
			break;
		default:
			DRM_ERROR("Unknown packet type %d !\n", type);
			return -EINVAL;
		}
	}
	return 0;
}

/**
 * amdgpu_uvd_ring_parse_cs - UVD command submission parser
 *
 * @parser: Command submission parser context
 *
 * Parse the command stream, patch in addresses as necessary.
 */
int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
{
	struct amdgpu_uvd_cs_ctx ctx = {};
	unsigned buf_sizes[] = {
		[0x00000000]	=	2048,
958 959
		[0x00000001]	=	0xFFFFFFFF,
		[0x00000002]	=	0xFFFFFFFF,
A
Alex Deucher 已提交
960
		[0x00000003]	=	2048,
961
		[0x00000004]	=	0xFFFFFFFF,
A
Alex Deucher 已提交
962
	};
963
	struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
A
Alex Deucher 已提交
964 965
	int r;

966 967 968
	parser->job->vm = NULL;
	ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);

A
Alex Deucher 已提交
969 970 971 972 973 974 975 976 977 978
	if (ib->length_dw % 16) {
		DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
			  ib->length_dw);
		return -EINVAL;
	}

	ctx.parser = parser;
	ctx.buf_sizes = buf_sizes;
	ctx.ib_idx = ib_idx;

979 980 981 982 983 984 985
	/* first round only required on chips without UVD 64 bit address support */
	if (!parser->adev->uvd.address_64_bit) {
		/* first round, make sure the buffers are actually in the UVD segment */
		r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
		if (r)
			return r;
	}
A
Alex Deucher 已提交
986 987 988 989 990 991 992 993 994 995 996 997 998 999

	/* second round, patch buffer addresses into the command stream */
	r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
	if (r)
		return r;

	if (!ctx.has_msg_cmd) {
		DRM_ERROR("UVD-IBs need a msg command!\n");
		return -EINVAL;
	}

	return 0;
}

1000
static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
1001
			       bool direct, struct dma_fence **fence)
A
Alex Deucher 已提交
1002
{
1003 1004
	struct amdgpu_device *adev = ring->adev;
	struct dma_fence *f = NULL;
1005 1006
	struct amdgpu_job *job;
	struct amdgpu_ib *ib;
1007
	uint32_t data[4];
1008 1009 1010
	uint64_t addr;
	long r;
	int i;
1011 1012
	unsigned offset_idx = 0;
	unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
A
Alex Deucher 已提交
1013

1014 1015
	amdgpu_bo_kunmap(bo);
	amdgpu_bo_unpin(bo);
A
Alex Deucher 已提交
1016

1017
	if (!ring->adev->uvd.address_64_bit) {
1018 1019
		struct ttm_operation_ctx ctx = { true, false };

A
Alex Deucher 已提交
1020 1021
		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
		amdgpu_uvd_force_into_uvd_segment(bo);
1022 1023 1024
		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
		if (r)
			goto err;
A
Alex Deucher 已提交
1025 1026
	}

1027
	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
1028
	if (r)
1029
		goto err;
A
Alex Deucher 已提交
1030

1031
	if (adev->asic_type >= CHIP_VEGA10) {
1032 1033 1034
		offset_idx = 1 + ring->me;
		offset[1] = adev->reg_offset[UVD_HWIP][0][1];
		offset[2] = adev->reg_offset[UVD_HWIP][1][1];
1035 1036
	}

1037 1038 1039 1040 1041
	data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0);
	data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0);
	data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0);
	data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);

1042
	ib = &job->ibs[0];
A
Alex Deucher 已提交
1043
	addr = amdgpu_bo_gpu_offset(bo);
1044
	ib->ptr[0] = data[0];
1045
	ib->ptr[1] = addr;
1046
	ib->ptr[2] = data[1];
1047
	ib->ptr[3] = addr >> 32;
1048
	ib->ptr[4] = data[2];
1049
	ib->ptr[5] = 0;
1050
	for (i = 6; i < 16; i += 2) {
1051
		ib->ptr[i] = data[3];
1052 1053
		ib->ptr[i+1] = 0;
	}
1054
	ib->length_dw = 16;
A
Alex Deucher 已提交
1055

1056
	if (direct) {
1057 1058 1059 1060 1061 1062 1063 1064
		r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
							true, false,
							msecs_to_jiffies(10));
		if (r == 0)
			r = -ETIMEDOUT;
		if (r < 0)
			goto err_free;

1065
		r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
1066
		job->fence = dma_fence_get(f);
1067 1068 1069 1070 1071
		if (r)
			goto err_free;

		amdgpu_job_free(job);
	} else {
1072 1073 1074 1075 1076
		r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
				     AMDGPU_FENCE_OWNER_UNDEFINED, false);
		if (r)
			goto err_free;

1077
		r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity,
1078 1079 1080 1081
				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
		if (r)
			goto err_free;
	}
A
Alex Deucher 已提交
1082

1083 1084 1085
	amdgpu_bo_fence(bo, f, false);
	amdgpu_bo_unreserve(bo);
	amdgpu_bo_unref(&bo);
A
Alex Deucher 已提交
1086

1087
	if (fence)
1088 1089
		*fence = dma_fence_get(f);
	dma_fence_put(f);
1090 1091

	return 0;
1092 1093 1094 1095

err_free:
	amdgpu_job_free(job);

A
Alex Deucher 已提交
1096
err:
1097 1098
	amdgpu_bo_unreserve(bo);
	amdgpu_bo_unref(&bo);
A
Alex Deucher 已提交
1099 1100 1101 1102 1103 1104 1105
	return r;
}

/* multiple fence commands without any stream commands in between can
   crash the vcpu so just try to emmit a dummy create/destroy msg to
   avoid this */
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
1106
			      struct dma_fence **fence)
A
Alex Deucher 已提交
1107 1108
{
	struct amdgpu_device *adev = ring->adev;
1109
	struct amdgpu_bo *bo = NULL;
A
Alex Deucher 已提交
1110 1111 1112
	uint32_t *msg;
	int r, i;

1113 1114 1115
	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
				      AMDGPU_GEM_DOMAIN_VRAM,
				      &bo, NULL, (void **)&msg);
A
Alex Deucher 已提交
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
	if (r)
		return r;

	/* stitch together an UVD create msg */
	msg[0] = cpu_to_le32(0x00000de4);
	msg[1] = cpu_to_le32(0x00000000);
	msg[2] = cpu_to_le32(handle);
	msg[3] = cpu_to_le32(0x00000000);
	msg[4] = cpu_to_le32(0x00000000);
	msg[5] = cpu_to_le32(0x00000000);
	msg[6] = cpu_to_le32(0x00000000);
	msg[7] = cpu_to_le32(0x00000780);
	msg[8] = cpu_to_le32(0x00000440);
	msg[9] = cpu_to_le32(0x00000000);
	msg[10] = cpu_to_le32(0x01b37000);
	for (i = 11; i < 1024; ++i)
		msg[i] = cpu_to_le32(0x0);

1134
	return amdgpu_uvd_send_msg(ring, bo, true, fence);
A
Alex Deucher 已提交
1135 1136 1137
}

int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
1138
			       bool direct, struct dma_fence **fence)
A
Alex Deucher 已提交
1139 1140
{
	struct amdgpu_device *adev = ring->adev;
1141
	struct amdgpu_bo *bo = NULL;
A
Alex Deucher 已提交
1142 1143 1144
	uint32_t *msg;
	int r, i;

1145 1146 1147
	r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
				      AMDGPU_GEM_DOMAIN_VRAM,
				      &bo, NULL, (void **)&msg);
A
Alex Deucher 已提交
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
	if (r)
		return r;

	/* stitch together an UVD destroy msg */
	msg[0] = cpu_to_le32(0x00000de4);
	msg[1] = cpu_to_le32(0x00000002);
	msg[2] = cpu_to_le32(handle);
	msg[3] = cpu_to_le32(0x00000000);
	for (i = 4; i < 1024; ++i)
		msg[i] = cpu_to_le32(0x0);

1159
	return amdgpu_uvd_send_msg(ring, bo, direct, fence);
A
Alex Deucher 已提交
1160 1161 1162 1163 1164
}

static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
{
	struct amdgpu_device *adev =
1165
		container_of(work, struct amdgpu_device, uvd.idle_work.work);
1166
	unsigned fences = 0, i, j;
1167 1168 1169

	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
		fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
1170 1171 1172
		for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
			fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
		}
1173
	}
A
Alex Deucher 已提交
1174

1175
	if (fences == 0) {
A
Alex Deucher 已提交
1176 1177 1178 1179
		if (adev->pm.dpm_enabled) {
			amdgpu_dpm_enable_uvd(adev, false);
		} else {
			amdgpu_asic_set_uvd_clocks(adev, 0, 0);
1180
			/* shutdown the UVD block */
1181 1182 1183 1184
			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
							       AMD_PG_STATE_GATE);
			amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
							       AMD_CG_STATE_GATE);
A
Alex Deucher 已提交
1185 1186
		}
	} else {
1187
		schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
A
Alex Deucher 已提交
1188 1189 1190
	}
}

1191
void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
A
Alex Deucher 已提交
1192
{
1193
	struct amdgpu_device *adev = ring->adev;
1194
	bool set_clocks;
A
Alex Deucher 已提交
1195

1196 1197 1198
	if (amdgpu_sriov_vf(adev))
		return;

1199
	set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
A
Alex Deucher 已提交
1200 1201 1202 1203 1204
	if (set_clocks) {
		if (adev->pm.dpm_enabled) {
			amdgpu_dpm_enable_uvd(adev, true);
		} else {
			amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
1205 1206 1207 1208
			amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
							       AMD_CG_STATE_UNGATE);
			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
							       AMD_PG_STATE_UNGATE);
A
Alex Deucher 已提交
1209 1210 1211
		}
	}
}
1212 1213 1214

void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
{
1215
	if (!amdgpu_sriov_vf(ring->adev))
1216
		schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
1217
}
1218 1219 1220 1221 1222 1223 1224 1225

/**
 * amdgpu_uvd_ring_test_ib - test ib execution
 *
 * @ring: amdgpu_ring pointer
 *
 * Test if we can successfully execute an IB
 */
1226
int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1227
{
1228
	struct dma_fence *fence;
1229
	long r;
1230
	uint32_t ip_instance = ring->me;
1231 1232 1233

	r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
	if (r) {
1234
		DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r);
1235 1236 1237 1238 1239
		goto error;
	}

	r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
	if (r) {
1240
		DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r);
1241 1242 1243
		goto error;
	}

1244
	r = dma_fence_wait_timeout(fence, false, timeout);
1245
	if (r == 0) {
1246
		DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance);
1247 1248
		r = -ETIMEDOUT;
	} else if (r < 0) {
1249
		DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r);
1250
	} else {
1251
		DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx);
1252
		r = 0;
1253
	}
1254

1255
	dma_fence_put(fence);
1256 1257

error:
1258 1259
	return r;
}
1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278

/**
 * amdgpu_uvd_used_handles - returns used UVD handles
 *
 * @adev: amdgpu_device pointer
 *
 * Returns the number of UVD handles in use
 */
uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
{
	unsigned i;
	uint32_t used_handles = 0;

	for (i = 0; i < adev->uvd.max_handles; ++i) {
		/*
		 * Handles can be freed in any order, and not
		 * necessarily linear. So we need to count
		 * all non-zero handles.
		 */
1279
		if (atomic_read(&adev->uvd.inst->handles[i]))
1280 1281 1282 1283 1284
			used_handles++;
	}

	return used_handles;
}