amdgpu_ib.c 12.8 KB
Newer Older
A
Alex Deucher 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 *          Christian König
 */
#include <linux/seq_file.h>
#include <linux/slab.h>
31

A
Alex Deucher 已提交
32
#include <drm/amdgpu_drm.h>
33 34
#include <drm/drm_debugfs.h>

A
Alex Deucher 已提交
35 36
#include "amdgpu.h"
#include "atom.h"
37
#include "amdgpu_trace.h"
A
Alex Deucher 已提交
38

39
#define AMDGPU_IB_TEST_TIMEOUT	msecs_to_jiffies(1000)
40
#define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT	msecs_to_jiffies(2000)
41

A
Alex Deucher 已提交
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
/*
 * IB
 * IBs (Indirect Buffers) and areas of GPU accessible memory where
 * commands are stored.  You can put a pointer to the IB in the
 * command ring and the hw will fetch the commands from the IB
 * and execute them.  Generally userspace acceleration drivers
 * produce command buffers which are send to the kernel and
 * put in IBs for execution by the requested ring.
 */

/**
 * amdgpu_ib_get - request an IB (Indirect Buffer)
 *
 * @ring: ring index the IB is associated with
 * @size: requested IB size
 * @ib: IB object returned
 *
 * Request an IB (all asics).  IBs are allocated using the
 * suballocator.
 * Returns 0 on success, error on failure.
 */
63
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
64 65
		  unsigned size, enum amdgpu_ib_pool_type pool_type,
		  struct amdgpu_ib *ib)
A
Alex Deucher 已提交
66 67 68 69
{
	int r;

	if (size) {
70
		r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type],
A
Alex Deucher 已提交
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
				      &ib->sa_bo, size, 256);
		if (r) {
			dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
			return r;
		}

		ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);

		if (!vm)
			ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
	}

	return 0;
}

/**
 * amdgpu_ib_free - free an IB (Indirect Buffer)
 *
 * @adev: amdgpu_device pointer
 * @ib: IB object to free
91
 * @f: the fence SA bo need wait on for the ib alloation
A
Alex Deucher 已提交
92 93 94
 *
 * Free an IB (all asics).
 */
95
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
96
		    struct dma_fence *f)
A
Alex Deucher 已提交
97
{
98
	amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
A
Alex Deucher 已提交
99 100 101 102 103 104 105 106
}

/**
 * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring
 *
 * @adev: amdgpu_device pointer
 * @num_ibs: number of IBs to schedule
 * @ibs: IB objects to schedule
107
 * @f: fence created during this submission
A
Alex Deucher 已提交
108 109 110 111 112 113 114 115 116 117 118 119 120 121
 *
 * Schedule an IB on the associated ring (all asics).
 * Returns 0 on success, error on failure.
 *
 * On SI, there are two parallel engines fed from the primary ring,
 * the CE (Constant Engine) and the DE (Drawing Engine).  Since
 * resource descriptors have moved to memory, the CE allows you to
 * prime the caches while the DE is updating register state so that
 * the resource descriptors will be already in cache when the draw is
 * processed.  To accomplish this, the userspace driver submits two
 * IBs, one for the CE and one for the DE.  If there is a CE IB (called
 * a CONST_IB), it will be put on the ring prior to the DE IB.  Prior
 * to SI there was just a DE IB.
 */
122
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
123 124
		       struct amdgpu_ib *ibs, struct amdgpu_job *job,
		       struct dma_fence **f)
A
Alex Deucher 已提交
125
{
126
	struct amdgpu_device *adev = ring->adev;
A
Alex Deucher 已提交
127
	struct amdgpu_ib *ib = &ibs[0];
128
	struct dma_fence *tmp = NULL;
129
	bool skip_preamble, need_ctx_switch;
130 131
	unsigned patch_offset = ~0;
	struct amdgpu_vm *vm;
132
	uint64_t fence_ctx;
133
	uint32_t status = 0, alloc_size;
134
	unsigned fence_flags = 0;
135
	bool secure;
M
Monk Liu 已提交
136

137
	unsigned i;
A
Alex Deucher 已提交
138
	int r = 0;
M
Monk Liu 已提交
139
	bool need_pipe_sync = false;
A
Alex Deucher 已提交
140 141 142 143

	if (num_ibs == 0)
		return -EINVAL;

144 145
	/* ring tests don't use a job */
	if (job) {
146
		vm = job->vm;
147 148
		fence_ctx = job->base.s_fence ?
			job->base.s_fence->scheduled.context : 0;
149 150
	} else {
		vm = NULL;
151
		fence_ctx = 0;
152
	}
153

154
	if (!ring->sched.ready) {
155
		dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
A
Alex Deucher 已提交
156 157
		return -EINVAL;
	}
158

159
	if (vm && !job->vmid) {
160 161 162 163
		dev_err(adev->dev, "VM IB without ID\n");
		return -EINVAL;
	}

164 165 166 167 168 169
	if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) &&
	    (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)) {
		dev_err(adev->dev, "secure submissions not supported on compute rings\n");
		return -EINVAL;
	}

170 171
	alloc_size = ring->funcs->emit_frame_size + num_ibs *
		ring->funcs->emit_ib_size;
172 173

	r = amdgpu_ring_alloc(ring, alloc_size);
A
Alex Deucher 已提交
174 175 176 177
	if (r) {
		dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
		return r;
	}
178

179
	need_ctx_switch = ring->current_ctx != fence_ctx;
180
	if (ring->funcs->emit_pipeline_sync && job &&
181
	    ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
182
	     (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
183
	     amdgpu_vm_need_pipeline_sync(ring, job))) {
M
Monk Liu 已提交
184
		need_pipe_sync = true;
185 186 187 188

		if (tmp)
			trace_amdgpu_ib_pipe_sync(job, tmp);

189 190
		dma_fence_put(tmp);
	}
A
Alex Deucher 已提交
191

192 193 194
	if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && ring->funcs->emit_mem_sync)
		ring->funcs->emit_mem_sync(ring);

195 196 197
	if (ring->funcs->insert_start)
		ring->funcs->insert_start(ring);

198
	if (job) {
M
Monk Liu 已提交
199
		r = amdgpu_vm_flush(ring, job, need_pipe_sync);
200 201 202 203
		if (r) {
			amdgpu_ring_undo(ring);
			return r;
		}
204
	}
205

206
	if (job && ring->funcs->init_cond_exec)
207 208
		patch_offset = amdgpu_ring_init_cond_exec(ring);

209
#ifdef CONFIG_X86_64
210
	if (!(adev->flags & AMD_IS_APU))
211
#endif
212 213 214 215 216 217
	{
		if (ring->funcs->emit_hdp_flush)
			amdgpu_ring_emit_hdp_flush(ring);
		else
			amdgpu_asic_flush_hdp(adev, ring);
	}
218

219 220 221
	if (need_ctx_switch)
		status |= AMDGPU_HAVE_CTX_SWITCH;

222
	skip_preamble = ring->current_ctx == fence_ctx;
223 224
	if (job && ring->funcs->emit_cntxcntl) {
		status |= job->preamble_status;
225
		status |= job->preemption_status;
226
		amdgpu_ring_emit_cntxcntl(ring, status);
227 228
	}

229 230
	/* Setup initial TMZiness and send it off.
	 */
231
	secure = false;
232 233 234 235 236
	if (job && ring->funcs->emit_frame_cntl) {
		secure = ib->flags & AMDGPU_IB_FLAGS_SECURE;
		amdgpu_ring_emit_frame_cntl(ring, true, secure);
	}

A
Alex Deucher 已提交
237
	for (i = 0; i < num_ibs; ++i) {
238
		ib = &ibs[i];
239 240

		/* drop preamble IBs if we don't have a context switch */
241
		if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
242 243 244 245
		    skip_preamble &&
		    !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
		    !amdgpu_mcbp &&
		    !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
246 247
			continue;

248 249 250 251 252
		if (job && ring->funcs->emit_frame_cntl) {
			if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) {
				amdgpu_ring_emit_frame_cntl(ring, false, secure);
				secure = !secure;
				amdgpu_ring_emit_frame_cntl(ring, true, secure);
253 254 255
			}
		}

256 257
		amdgpu_ring_emit_ib(ring, job, ib, status);
		status &= ~AMDGPU_HAVE_CTX_SWITCH;
A
Alex Deucher 已提交
258 259
	}

260 261
	if (job && ring->funcs->emit_frame_cntl)
		amdgpu_ring_emit_frame_cntl(ring, false, secure);
262

263
#ifdef CONFIG_X86_64
264
	if (!(adev->flags & AMD_IS_APU))
265
#endif
266
		amdgpu_asic_invalidate_hdp(adev, ring);
267

268 269 270
	if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
		fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;

271 272 273 274 275 276
	/* wrap the last IB with fence */
	if (job && job->uf_addr) {
		amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
				       fence_flags | AMDGPU_FENCE_FLAG_64BIT);
	}

277
	r = amdgpu_fence_emit(ring, f, fence_flags);
A
Alex Deucher 已提交
278 279
	if (r) {
		dev_err(adev->dev, "failed to emit fence (%d)\n", r);
280 281
		if (job && job->vmid)
			amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid);
282
		amdgpu_ring_undo(ring);
A
Alex Deucher 已提交
283 284 285
		return r;
	}

286 287 288
	if (ring->funcs->insert_end)
		ring->funcs->insert_end(ring);

M
Monk Liu 已提交
289 290 291
	if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
		amdgpu_ring_patch_cond_exec(ring, patch_offset);

292
	ring->current_ctx = fence_ctx;
293
	if (vm && ring->funcs->emit_switch_buffer)
294
		amdgpu_ring_emit_switch_buffer(ring);
295
	amdgpu_ring_commit(ring);
A
Alex Deucher 已提交
296 297 298 299 300 301 302 303 304 305 306 307 308 309
	return 0;
}

/**
 * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool
 *
 * @adev: amdgpu_device pointer
 *
 * Initialize the suballocator to manage a pool of memory
 * for use as IBs (all asics).
 * Returns 0 on success, error on failure.
 */
int amdgpu_ib_pool_init(struct amdgpu_device *adev)
{
310
	unsigned size;
311
	int r, i;
A
Alex Deucher 已提交
312

313
	if (adev->ib_pool_ready)
A
Alex Deucher 已提交
314
		return 0;
315

316 317 318 319
	for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
		if (i == AMDGPU_IB_POOL_DIRECT)
			size = PAGE_SIZE * 2;
		else
320 321 322 323 324 325 326
			size = AMDGPU_IB_POOL_SIZE;

		r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i],
					      size, AMDGPU_GPU_PAGE_SIZE,
					      AMDGPU_GEM_DOMAIN_GTT);
		if (r)
			goto error;
A
Alex Deucher 已提交
327 328
	}
	adev->ib_pool_ready = true;
329

A
Alex Deucher 已提交
330
	return 0;
331 332 333 334 335

error:
	while (i--)
		amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
	return r;
A
Alex Deucher 已提交
336 337 338 339 340 341 342 343 344 345 346 347
}

/**
 * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool
 *
 * @adev: amdgpu_device pointer
 *
 * Tear down the suballocator managing the pool of memory
 * for use as IBs (all asics).
 */
void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
{
348 349
	int i;

350 351 352 353 354 355
	if (!adev->ib_pool_ready)
		return;

	for (i = 0; i < AMDGPU_IB_POOL_MAX; i++)
		amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
	adev->ib_pool_ready = false;
A
Alex Deucher 已提交
356 357 358 359 360 361 362 363 364 365 366 367 368 369
}

/**
 * amdgpu_ib_ring_tests - test IBs on the rings
 *
 * @adev: amdgpu_device pointer
 *
 * Test an IB (Indirect Buffer) on each ring.
 * If the test fails, disable the ring.
 * Returns 0 on success, error if the primary GFX ring
 * IB test fails.
 */
int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
{
370
	long tmo_gfx, tmo_mm;
371 372
	int r, ret = 0;
	unsigned i;
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390

	tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
	if (amdgpu_sriov_vf(adev)) {
		/* for MM engines in hypervisor side they are not scheduled together
		 * with CP and SDMA engines, so even in exclusive mode MM engine could
		 * still running on other VF thus the IB TEST TIMEOUT for MM engines
		 * under SR-IOV should be set to a long time. 8 sec should be enough
		 * for the MM comes back to this VF.
		 */
		tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT;
	}

	if (amdgpu_sriov_runtime(adev)) {
		/* for CP & SDMA engines since they are scheduled together so
		 * need to make the timeout width enough to cover the time
		 * cost waiting for it coming back under RUNTIME only
		*/
		tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
391 392
	} else if (adev->gmc.xgmi.hive_id) {
		tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT;
393
	}
A
Alex Deucher 已提交
394

395
	for (i = 0; i < adev->num_rings; ++i) {
A
Alex Deucher 已提交
396
		struct amdgpu_ring *ring = adev->rings[i];
397
		long tmo;
A
Alex Deucher 已提交
398

399 400
		/* KIQ rings don't have an IB test because we never submit IBs
		 * to them and they have no interrupt support.
401
		 */
402
		if (!ring->sched.ready || !ring->funcs->test_ib)
403 404
			continue;

405 406 407 408 409
		/* MM engine need more time */
		if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
			ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
			ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
			ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
410 411
			ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
			ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
412 413 414 415 416
			tmo = tmo_mm;
		else
			tmo = tmo_gfx;

		r = amdgpu_ring_test_ib(ring, tmo);
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
		if (!r) {
			DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n",
				      ring->name);
			continue;
		}

		ring->sched.ready = false;
		DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n",
			  ring->name, r);

		if (ring == &adev->gfx.gfx_ring[0]) {
			/* oh, oh, that's really bad */
			adev->accel_working = false;
			return r;

		} else {
			ret = r;
A
Alex Deucher 已提交
434 435
		}
	}
436
	return ret;
A
Alex Deucher 已提交
437 438 439 440 441 442 443 444 445 446 447 448 449
}

/*
 * Debugfs info
 */
#if defined(CONFIG_DEBUG_FS)

static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct drm_device *dev = node->minor->dev;
	struct amdgpu_device *adev = dev->dev_private;

450 451 452 453 454 455 456 457
	seq_printf(m, "--------------------- DELAYED --------------------- \n");
	amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED],
				     m);
	seq_printf(m, "-------------------- IMMEDIATE -------------------- \n");
	amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE],
				     m);
	seq_printf(m, "--------------------- DIRECT ---------------------- \n");
	amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m);
A
Alex Deucher 已提交
458 459 460 461

	return 0;
}

462
static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
A
Alex Deucher 已提交
463 464 465 466 467
	{"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL},
};

#endif

468
int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
A
Alex Deucher 已提交
469 470 471 472 473 474 475
{
#if defined(CONFIG_DEBUG_FS)
	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1);
#else
	return 0;
#endif
}