amdgpu_cs.c 42.2 KB
Newer Older
A
Alex Deucher 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Copyright 2008 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 *
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 */
27 28

#include <linux/file.h>
29
#include <linux/pagemap.h>
30
#include <linux/sync_file.h>
31

A
Alex Deucher 已提交
32
#include <drm/amdgpu_drm.h>
33
#include <drm/drm_syncobj.h>
A
Alex Deucher 已提交
34 35
#include "amdgpu.h"
#include "amdgpu_trace.h"
36
#include "amdgpu_gmc.h"
37
#include "amdgpu_gem.h"
A
Alex Deucher 已提交
38

39
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
40 41
				      struct drm_amdgpu_cs_chunk_fence *data,
				      uint32_t *offset)
42 43
{
	struct drm_gem_object *gobj;
44
	struct amdgpu_bo *bo;
45
	unsigned long size;
46
	int r;
47

48
	gobj = drm_gem_object_lookup(p->filp, data->handle);
49 50 51
	if (gobj == NULL)
		return -EINVAL;

52
	bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
53
	p->uf_entry.priority = 0;
54
	p->uf_entry.tv.bo = &bo->tbo;
55 56
	/* One for TTM and one for the CS job */
	p->uf_entry.tv.num_shared = 2;
57

58
	drm_gem_object_put_unlocked(gobj);
59

60
	size = amdgpu_bo_size(bo);
61 62 63 64 65
	if (size != PAGE_SIZE || (data->offset + 8) > size) {
		r = -EINVAL;
		goto error_unref;
	}

66
	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
67 68
		r = -EINVAL;
		goto error_unref;
69 70
	}

71 72
	*offset = data->offset;

73
	return 0;
74 75

error_unref:
76
	amdgpu_bo_unref(&bo);
77
	return r;
78 79
}

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
				      struct drm_amdgpu_bo_list_in *data)
{
	int r;
	struct drm_amdgpu_bo_list_entry *info = NULL;

	r = amdgpu_bo_create_list_entry_array(data, &info);
	if (r)
		return r;

	r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
				  &p->bo_list);
	if (r)
		goto error_free;

	kvfree(info);
	return 0;

error_free:
	if (info)
		kvfree(info);

	return r;
}

static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
A
Alex Deucher 已提交
106
{
107
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
108
	struct amdgpu_vm *vm = &fpriv->vm;
A
Alex Deucher 已提交
109
	uint64_t *chunk_array_user;
110
	uint64_t *chunk_array;
111
	unsigned size, num_ibs = 0;
112
	uint32_t uf_offset = 0;
113
	int i;
114
	int ret;
A
Alex Deucher 已提交
115

116 117 118 119 120 121
	if (cs->in.num_chunks == 0)
		return 0;

	chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
	if (!chunk_array)
		return -ENOMEM;
A
Alex Deucher 已提交
122

123 124
	p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
	if (!p->ctx) {
125 126
		ret = -EINVAL;
		goto free_chunk;
127
	}
128

129 130
	mutex_lock(&p->ctx->lock);

131 132 133 134 135 136
	/* skip guilty context job */
	if (atomic_read(&p->ctx->guilty) == 1) {
		ret = -ECANCELED;
		goto free_chunk;
	}

A
Alex Deucher 已提交
137
	/* get chunks */
138
	chunk_array_user = u64_to_user_ptr(cs->in.chunks);
A
Alex Deucher 已提交
139 140
	if (copy_from_user(chunk_array, chunk_array_user,
			   sizeof(uint64_t)*cs->in.num_chunks)) {
141
		ret = -EFAULT;
142
		goto free_chunk;
A
Alex Deucher 已提交
143 144 145
	}

	p->nchunks = cs->in.num_chunks;
146
	p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
A
Alex Deucher 已提交
147
			    GFP_KERNEL);
148 149
	if (!p->chunks) {
		ret = -ENOMEM;
150
		goto free_chunk;
A
Alex Deucher 已提交
151 152 153 154 155 156 157
	}

	for (i = 0; i < p->nchunks; i++) {
		struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
		struct drm_amdgpu_cs_chunk user_chunk;
		uint32_t __user *cdata;

158
		chunk_ptr = u64_to_user_ptr(chunk_array[i]);
A
Alex Deucher 已提交
159 160
		if (copy_from_user(&user_chunk, chunk_ptr,
				       sizeof(struct drm_amdgpu_cs_chunk))) {
161 162 163
			ret = -EFAULT;
			i--;
			goto free_partial_kdata;
A
Alex Deucher 已提交
164 165 166 167 168
		}
		p->chunks[i].chunk_id = user_chunk.chunk_id;
		p->chunks[i].length_dw = user_chunk.length_dw;

		size = p->chunks[i].length_dw;
169
		cdata = u64_to_user_ptr(user_chunk.chunk_data);
A
Alex Deucher 已提交
170

M
Michal Hocko 已提交
171
		p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
A
Alex Deucher 已提交
172
		if (p->chunks[i].kdata == NULL) {
173 174 175
			ret = -ENOMEM;
			i--;
			goto free_partial_kdata;
A
Alex Deucher 已提交
176 177 178
		}
		size *= sizeof(uint32_t);
		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
179 180
			ret = -EFAULT;
			goto free_partial_kdata;
A
Alex Deucher 已提交
181 182
		}

183 184
		switch (p->chunks[i].chunk_id) {
		case AMDGPU_CHUNK_ID_IB:
185
			++num_ibs;
186 187 188
			break;

		case AMDGPU_CHUNK_ID_FENCE:
A
Alex Deucher 已提交
189
			size = sizeof(struct drm_amdgpu_cs_chunk_fence);
190
			if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
191 192
				ret = -EINVAL;
				goto free_partial_kdata;
A
Alex Deucher 已提交
193
			}
194

195 196
			ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
							 &uf_offset);
197 198 199
			if (ret)
				goto free_partial_kdata;

200 201
			break;

202 203 204 205 206 207 208 209 210 211 212 213 214
		case AMDGPU_CHUNK_ID_BO_HANDLES:
			size = sizeof(struct drm_amdgpu_bo_list_in);
			if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
				ret = -EINVAL;
				goto free_partial_kdata;
			}

			ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
			if (ret)
				goto free_partial_kdata;

			break;

215
		case AMDGPU_CHUNK_ID_DEPENDENCIES:
216 217
		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
218
		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
219 220
		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
221 222
			break;

223
		default:
224 225
			ret = -EINVAL;
			goto free_partial_kdata;
A
Alex Deucher 已提交
226 227 228
		}
	}

229
	ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
230
	if (ret)
C
Christian König 已提交
231
		goto free_all_kdata;
A
Alex Deucher 已提交
232

233 234 235 236
	if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
		ret = -ECANCELED;
		goto free_all_kdata;
	}
237

238
	if (p->uf_entry.tv.bo)
239
		p->job->uf_addr = uf_offset;
A
Alex Deucher 已提交
240
	kfree(chunk_array);
241 242 243 244

	/* Use this opportunity to fill in task info for the vm */
	amdgpu_vm_set_task_info(vm);

245 246 247 248 249 250
	return 0;

free_all_kdata:
	i = p->nchunks - 1;
free_partial_kdata:
	for (; i >= 0; i--)
M
Michal Hocko 已提交
251
		kvfree(p->chunks[i].kdata);
252
	kfree(p->chunks);
253 254
	p->chunks = NULL;
	p->nchunks = 0;
255 256 257 258
free_chunk:
	kfree(chunk_array);

	return ret;
A
Alex Deucher 已提交
259 260
}

261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
/* Convert microseconds to bytes. */
static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
{
	if (us <= 0 || !adev->mm_stats.log2_max_MBps)
		return 0;

	/* Since accum_us is incremented by a million per second, just
	 * multiply it by the number of MB/s to get the number of bytes.
	 */
	return us << adev->mm_stats.log2_max_MBps;
}

static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
{
	if (!adev->mm_stats.log2_max_MBps)
		return 0;

	return bytes >> adev->mm_stats.log2_max_MBps;
}

/* Returns how many bytes TTM can move right now. If no bytes can be moved,
 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
 * which means it can go over the threshold once. If that happens, the driver
 * will be in debt and no other buffer migrations can be done until that debt
 * is repaid.
 *
 * This approach allows moving a buffer of any size (it's important to allow
 * that).
 *
 * The currency is simply time in microseconds and it increases as the clock
 * ticks. The accumulated microseconds (us) are converted to bytes and
 * returned.
A
Alex Deucher 已提交
293
 */
294 295 296
static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
					      u64 *max_bytes,
					      u64 *max_vis_bytes)
A
Alex Deucher 已提交
297
{
298 299
	s64 time_us, increment_us;
	u64 free_vram, total_vram, used_vram;
A
Alex Deucher 已提交
300

301 302
	/* Allow a maximum of 200 accumulated ms. This is basically per-IB
	 * throttling.
A
Alex Deucher 已提交
303
	 *
304 305 306 307 308
	 * It means that in order to get full max MBps, at least 5 IBs per
	 * second must be submitted and not more than 200ms apart from each
	 * other.
	 */
	const s64 us_upper_bound = 200000;
A
Alex Deucher 已提交
309

310 311 312 313 314
	if (!adev->mm_stats.log2_max_MBps) {
		*max_bytes = 0;
		*max_vis_bytes = 0;
		return;
	}
315

316
	total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
317
	used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
318 319 320 321 322 323 324 325 326 327 328 329 330 331
	free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;

	spin_lock(&adev->mm_stats.lock);

	/* Increase the amount of accumulated us. */
	time_us = ktime_to_us(ktime_get());
	increment_us = time_us - adev->mm_stats.last_update_us;
	adev->mm_stats.last_update_us = time_us;
	adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
                                      us_upper_bound);

	/* This prevents the short period of low performance when the VRAM
	 * usage is low and the driver is in debt or doesn't have enough
	 * accumulated us to fill VRAM quickly.
A
Alex Deucher 已提交
332
	 *
333 334 335 336
	 * The situation can occur in these cases:
	 * - a lot of VRAM is freed by userspace
	 * - the presence of a big buffer causes a lot of evictions
	 *   (solution: split buffers into smaller ones)
A
Alex Deucher 已提交
337
	 *
338 339
	 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
	 * accum_us to a positive number.
A
Alex Deucher 已提交
340
	 */
341 342 343 344 345 346 347 348 349 350 351 352 353
	if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
		s64 min_us;

		/* Be more aggresive on dGPUs. Try to fill a portion of free
		 * VRAM now.
		 */
		if (!(adev->flags & AMD_IS_APU))
			min_us = bytes_to_us(adev, free_vram / 4);
		else
			min_us = 0; /* Reset accum_us on APUs. */

		adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
	}
A
Alex Deucher 已提交
354

355
	/* This is set to 0 if the driver is in debt to disallow (optional)
356 357
	 * buffer moves.
	 */
358 359 360
	*max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);

	/* Do the same for visible VRAM if half of it is free */
361
	if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
362
		u64 total_vis_vram = adev->gmc.visible_vram_size;
363 364
		u64 used_vis_vram =
			amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380

		if (used_vis_vram < total_vis_vram) {
			u64 free_vis_vram = total_vis_vram - used_vis_vram;
			adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
							  increment_us, us_upper_bound);

			if (free_vis_vram >= total_vis_vram / 2)
				adev->mm_stats.accum_us_vis =
					max(bytes_to_us(adev, free_vis_vram / 2),
					    adev->mm_stats.accum_us_vis);
		}

		*max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
	} else {
		*max_vis_bytes = 0;
	}
381 382 383 384 385 386 387 388

	spin_unlock(&adev->mm_stats.lock);
}

/* Report how many bytes have really been moved for the last command
 * submission. This can result in a debt that can stop buffer migrations
 * temporarily.
 */
389 390
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
				  u64 num_vis_bytes)
391 392 393
{
	spin_lock(&adev->mm_stats.lock);
	adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
394
	adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
395
	spin_unlock(&adev->mm_stats.lock);
A
Alex Deucher 已提交
396 397
}

398 399 400
static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
				 struct amdgpu_bo *bo)
{
401
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
402 403 404
	struct ttm_operation_ctx ctx = {
		.interruptible = true,
		.no_wait_gpu = false,
405 406
		.resv = bo->tbo.resv,
		.flags = 0
407
	};
408 409 410 411 412 413
	uint32_t domain;
	int r;

	if (bo->pin_count)
		return 0;

414 415
	/* Don't move this buffer if we have depleted our allowance
	 * to move it. Don't move anything if the threshold is zero.
416
	 */
417
	if (p->bytes_moved < p->bytes_moved_threshold) {
418
		if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
419 420 421 422 423 424
		    (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
			/* And don't move a CPU_ACCESS_REQUIRED BO to limited
			 * visible VRAM if we've depleted our allowance to do
			 * that.
			 */
			if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
K
Kent Russell 已提交
425
				domain = bo->preferred_domains;
426 427 428
			else
				domain = bo->allowed_domains;
		} else {
K
Kent Russell 已提交
429
			domain = bo->preferred_domains;
430 431
		}
	} else {
432
		domain = bo->allowed_domains;
433
	}
434 435

retry:
436
	amdgpu_bo_placement_from_domain(bo, domain);
437
	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
438 439

	p->bytes_moved += ctx.bytes_moved;
440
	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
441
	    amdgpu_bo_in_cpu_visible_vram(bo))
442
		p->bytes_moved_vis += ctx.bytes_moved;
443

444 445 446
	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
		domain = bo->allowed_domains;
		goto retry;
447 448 449 450 451
	}

	return r;
}

452 453
/* Last resort, try to evict something from the current working set */
static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
454
				struct amdgpu_bo *validated)
455
{
456
	uint32_t domain = validated->allowed_domains;
457
	struct ttm_operation_ctx ctx = { true, false };
458 459 460 461 462 463 464 465 466
	int r;

	if (!p->evictable)
		return false;

	for (;&p->evictable->tv.head != &p->validated;
	     p->evictable = list_prev_entry(p->evictable, tv.head)) {

		struct amdgpu_bo_list_entry *candidate = p->evictable;
467
		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(candidate->tv.bo);
468
		struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
469
		bool update_bytes_moved_vis;
470 471 472
		uint32_t other;

		/* If we reached our current BO we can forget it */
473
		if (bo == validated)
474 475
			break;

476 477 478 479
		/* We can't move pinned BOs here */
		if (bo->pin_count)
			continue;

480 481 482 483 484 485 486 487 488 489 490 491
		other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);

		/* Check if this BO is in one of the domains we need space for */
		if (!(other & domain))
			continue;

		/* Check if we can move this BO somewhere else */
		other = bo->allowed_domains & ~domain;
		if (!other)
			continue;

		/* Good we can try to move this BO somewhere else */
492
		update_bytes_moved_vis =
493 494
				!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
				amdgpu_bo_in_cpu_visible_vram(bo);
495
		amdgpu_bo_placement_from_domain(bo, other);
496
		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
497
		p->bytes_moved += ctx.bytes_moved;
498
		if (update_bytes_moved_vis)
499
			p->bytes_moved_vis += ctx.bytes_moved;
500 501 502 503 504 505 506 507 508 509 510 511 512

		if (unlikely(r))
			break;

		p->evictable = list_prev_entry(p->evictable, tv.head);
		list_move(&candidate->tv.head, &p->validated);

		return true;
	}

	return false;
}

513 514 515 516 517 518 519 520 521 522 523 524
static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
{
	struct amdgpu_cs_parser *p = param;
	int r;

	do {
		r = amdgpu_cs_bo_validate(p, bo);
	} while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
	if (r)
		return r;

	if (bo->shadow)
A
Alex Xie 已提交
525
		r = amdgpu_cs_bo_validate(p, bo->shadow);
526 527 528 529

	return r;
}

530
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
531
			    struct list_head *validated)
A
Alex Deucher 已提交
532
{
533
	struct ttm_operation_ctx ctx = { true, false };
A
Alex Deucher 已提交
534 535 536
	struct amdgpu_bo_list_entry *lobj;
	int r;

537
	list_for_each_entry(lobj, validated, tv.head) {
538
		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
539
		bool binding_userptr = false;
540
		struct mm_struct *usermm;
A
Alex Deucher 已提交
541

542 543 544 545
		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
		if (usermm && usermm != current->mm)
			return -EPERM;

546 547
		if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
		    lobj->user_invalidated && lobj->user_pages) {
548 549
			amdgpu_bo_placement_from_domain(bo,
							AMDGPU_GEM_DOMAIN_CPU);
550
			r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
551 552
			if (r)
				return r;
553

554 555
			amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
						     lobj->user_pages);
556 557 558
			binding_userptr = true;
		}

559 560 561
		if (p->evictable == lobj)
			p->evictable = NULL;

562
		r = amdgpu_cs_validate(p, bo);
563
		if (r)
564
			return r;
565

566
		if (binding_userptr) {
M
Michal Hocko 已提交
567
			kvfree(lobj->user_pages);
568 569
			lobj->user_pages = NULL;
		}
A
Alex Deucher 已提交
570 571 572 573
	}
	return 0;
}

574 575
static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
				union drm_amdgpu_cs *cs)
A
Alex Deucher 已提交
576 577
{
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
578
	struct amdgpu_vm *vm = &fpriv->vm;
579
	struct amdgpu_bo_list_entry *e;
580
	struct list_head duplicates;
581 582 583
	struct amdgpu_bo *gds;
	struct amdgpu_bo *gws;
	struct amdgpu_bo *oa;
584
	int r;
A
Alex Deucher 已提交
585

586 587
	INIT_LIST_HEAD(&p->validated);

588
	/* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
589 590 591
	if (cs->in.bo_list_handle) {
		if (p->bo_list)
			return -EINVAL;
592

593 594 595 596
		r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
				       &p->bo_list);
		if (r)
			return r;
597 598 599 600 601 602
	} else if (!p->bo_list) {
		/* Create a empty bo_list when no handle is provided */
		r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
					  &p->bo_list);
		if (r)
			return r;
603 604
	}

605
	/* One for TTM and one for the CS job */
606
	amdgpu_bo_list_for_each_entry(e, p->bo_list)
607
		e->tv.num_shared = 2;
608

609 610 611
	amdgpu_bo_list_get_list(p->bo_list, &p->validated);
	if (p->bo_list->first_userptr != p->bo_list->num_entries)
		p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
A
Alex Deucher 已提交
612

613
	INIT_LIST_HEAD(&duplicates);
614
	amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
A
Alex Deucher 已提交
615

616
	if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
617 618
		list_add(&p->uf_entry.tv.head, &p->validated);

619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
	/* Get userptr backing pages. If pages are updated after registered
	 * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
	 * amdgpu_ttm_backend_bind() to flush and invalidate new pages
	 */
	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
		bool userpage_invalidated = false;
		int i;

		e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
					sizeof(struct page *),
					GFP_KERNEL | __GFP_ZERO);
		if (!e->user_pages) {
			DRM_ERROR("calloc failure\n");
			return -ENOMEM;
634 635
		}

636 637 638 639 640
		r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, e->user_pages);
		if (r) {
			kvfree(e->user_pages);
			e->user_pages = NULL;
			return r;
641 642
		}

643 644 645 646
		for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
			if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
				userpage_invalidated = true;
				break;
647 648
			}
		}
649 650
		e->user_invalidated = userpage_invalidated;
	}
651

652
	r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
653
				   &duplicates, false);
654 655 656 657
	if (unlikely(r != 0)) {
		if (r != -ERESTARTSYS)
			DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
		goto out;
658
	}
659

660 661
	amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
					  &p->bytes_moved_vis_threshold);
662
	p->bytes_moved = 0;
663
	p->bytes_moved_vis = 0;
664 665 666
	p->evictable = list_last_entry(&p->validated,
				       struct amdgpu_bo_list_entry,
				       tv.head);
667

668 669 670 671 672 673 674
	r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
				      amdgpu_cs_validate, p);
	if (r) {
		DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
		goto error_validate;
	}

675
	r = amdgpu_cs_list_validate(p, &duplicates);
676
	if (r)
677 678
		goto error_validate;

679
	r = amdgpu_cs_list_validate(p, &p->validated);
680
	if (r)
681 682
		goto error_validate;

683 684
	amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
				     p->bytes_moved_vis);
685

686 687 688
	gds = p->bo_list->gds_obj;
	gws = p->bo_list->gws_obj;
	oa = p->bo_list->oa_obj;
689

690 691 692 693 694 695 696 697
	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);

		/* Make sure we use the exclusive slot for shared BOs */
		if (bo->prime_shared_count)
			e->tv.num_shared = 0;
		e->bo_va = amdgpu_vm_bo_find(vm, bo);
	}
698

699
	if (gds) {
700 701
		p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
		p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
702 703
	}
	if (gws) {
704 705
		p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
		p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
706 707
	}
	if (oa) {
708 709
		p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
		p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
710
	}
711

712 713
	if (!r && p->uf_entry.tv.bo) {
		struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
714

715
		r = amdgpu_ttm_alloc_gart(&uf->tbo);
716 717
		p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
	}
718

719
error_validate:
720
	if (r)
721
		ttm_eu_backoff_reservation(&p->ticket, &p->validated);
722
out:
A
Alex Deucher 已提交
723 724 725 726 727 728 729 730 731
	return r;
}

static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
{
	struct amdgpu_bo_list_entry *e;
	int r;

	list_for_each_entry(e, &p->validated, tv.head) {
732 733 734
		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
		struct reservation_object *resv = bo->tbo.resv;

735
		r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
736
				     amdgpu_bo_explicit_sync(bo));
A
Alex Deucher 已提交
737 738 739 740 741 742 743

		if (r)
			return r;
	}
	return 0;
}

744 745 746 747 748 749 750 751
/**
 * cs_parser_fini() - clean parser states
 * @parser:	parser structure holding parsing context.
 * @error:	error number
 *
 * If error is set than unvalidate buffer, otherwise just free memory
 * used by parsing context.
 **/
752 753
static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
				  bool backoff)
C
Chunming Zhou 已提交
754
{
755 756
	unsigned i;

757
	if (error && backoff)
A
Alex Deucher 已提交
758 759
		ttm_eu_backoff_reservation(&parser->ticket,
					   &parser->validated);
760

761 762 763 764 765
	for (i = 0; i < parser->num_post_deps; i++) {
		drm_syncobj_put(parser->post_deps[i].syncobj);
		kfree(parser->post_deps[i].chain);
	}
	kfree(parser->post_deps);
766

767
	dma_fence_put(parser->fence);
768

769 770
	if (parser->ctx) {
		mutex_unlock(&parser->ctx->lock);
771
		amdgpu_ctx_put(parser->ctx);
772
	}
773 774 775
	if (parser->bo_list)
		amdgpu_bo_list_put(parser->bo_list);

A
Alex Deucher 已提交
776
	for (i = 0; i < parser->nchunks; i++)
M
Michal Hocko 已提交
777
		kvfree(parser->chunks[i].kdata);
A
Alex Deucher 已提交
778
	kfree(parser->chunks);
779 780
	if (parser->job)
		amdgpu_job_free(parser->job);
781 782 783 784 785
	if (parser->uf_entry.tv.bo) {
		struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);

		amdgpu_bo_unref(&uf);
	}
A
Alex Deucher 已提交
786 787
}

788
static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
A
Alex Deucher 已提交
789
{
790
	struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
791
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
792
	struct amdgpu_device *adev = p->adev;
793
	struct amdgpu_vm *vm = &fpriv->vm;
794
	struct amdgpu_bo_list_entry *e;
A
Alex Deucher 已提交
795 796
	struct amdgpu_bo_va *bo_va;
	struct amdgpu_bo *bo;
797
	int r;
A
Alex Deucher 已提交
798

799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
	/* Only for UVD/VCE VM emulation */
	if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
		unsigned i, j;

		for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
			struct drm_amdgpu_cs_chunk_ib *chunk_ib;
			struct amdgpu_bo_va_mapping *m;
			struct amdgpu_bo *aobj = NULL;
			struct amdgpu_cs_chunk *chunk;
			uint64_t offset, va_start;
			struct amdgpu_ib *ib;
			uint8_t *kptr;

			chunk = &p->chunks[i];
			ib = &p->job->ibs[j];
			chunk_ib = chunk->kdata;

			if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
				continue;

819
			va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
			r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
			if (r) {
				DRM_ERROR("IB va_start is invalid\n");
				return r;
			}

			if ((va_start + chunk_ib->ib_bytes) >
			    (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
				DRM_ERROR("IB va_start+ib_bytes is invalid\n");
				return -EINVAL;
			}

			/* the IB should be reserved at this point */
			r = amdgpu_bo_kmap(aobj, (void **)&kptr);
			if (r) {
				return r;
			}

			offset = m->start * AMDGPU_GPU_PAGE_SIZE;
			kptr += va_start - offset;

			if (ring->funcs->parse_cs) {
				memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
				amdgpu_bo_kunmap(aobj);

				r = amdgpu_ring_parse_cs(ring, p, j);
				if (r)
					return r;
			} else {
				ib->ptr = (uint32_t *)kptr;
				r = amdgpu_ring_patch_cs_in_place(ring, p, j);
				amdgpu_bo_kunmap(aobj);
				if (r)
					return r;
			}

			j++;
		}
	}

	if (!p->job->vm)
		return amdgpu_cs_sync_rings(p);


864
	r = amdgpu_vm_clear_freed(adev, vm, NULL);
A
Alex Deucher 已提交
865 866 867
	if (r)
		return r;

868 869 870 871 872
	r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
	if (r)
		return r;

	r = amdgpu_sync_fence(adev, &p->job->sync,
873
			      fpriv->prt_va->last_pt_update, false);
874 875 876
	if (r)
		return r;

877
	if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
M
Monk Liu 已提交
878
		struct dma_fence *f;
879 880

		bo_va = fpriv->csa_va;
M
Monk Liu 已提交
881 882 883 884 885 886
		BUG_ON(!bo_va);
		r = amdgpu_vm_bo_update(adev, bo_va, false);
		if (r)
			return r;

		f = bo_va->last_pt_update;
887
		r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
M
Monk Liu 已提交
888 889 890 891
		if (r)
			return r;
	}

892 893
	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
		struct dma_fence *f;
A
Alex Deucher 已提交
894

895
		/* ignore duplicates */
896
		bo = ttm_to_amdgpu_bo(e->tv.bo);
897 898
		if (!bo)
			continue;
A
Alex Deucher 已提交
899

900 901 902
		bo_va = e->bo_va;
		if (bo_va == NULL)
			continue;
A
Alex Deucher 已提交
903

904 905 906
		r = amdgpu_vm_bo_update(adev, bo_va, false);
		if (r)
			return r;
907

908 909 910 911
		f = bo_va->last_pt_update;
		r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
		if (r)
			return r;
912 913
	}

914
	r = amdgpu_vm_handle_moved(adev, vm);
915 916 917
	if (r)
		return r;

918 919 920 921
	r = amdgpu_vm_update_directories(adev, vm);
	if (r)
		return r;

922
	r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
923 924
	if (r)
		return r;
925

926
	p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
927

928
	if (amdgpu_vm_debug) {
929
		/* Invalidate all BOs to test for userspace bugs */
930
		amdgpu_bo_list_for_each_entry(e, p->bo_list) {
931
			struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
932

933
			/* ignore duplicates */
934
			if (!bo)
935 936
				continue;

937
			amdgpu_vm_bo_invalidate(adev, bo, false);
A
Alex Deucher 已提交
938
		}
939 940
	}

941
	return amdgpu_cs_sync_rings(p);
A
Alex Deucher 已提交
942 943 944 945 946 947 948
}

static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
			     struct amdgpu_cs_parser *parser)
{
	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
	struct amdgpu_vm *vm = &fpriv->vm;
M
Monk Liu 已提交
949
	int r, ce_preempt = 0, de_preempt = 0;
950 951
	struct amdgpu_ring *ring;
	int i, j;
A
Alex Deucher 已提交
952

953
	for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
A
Alex Deucher 已提交
954 955 956
		struct amdgpu_cs_chunk *chunk;
		struct amdgpu_ib *ib;
		struct drm_amdgpu_cs_chunk_ib *chunk_ib;
957
		struct drm_sched_entity *entity;
A
Alex Deucher 已提交
958 959

		chunk = &parser->chunks[i];
960
		ib = &parser->job->ibs[j];
A
Alex Deucher 已提交
961 962 963 964 965
		chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;

		if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
			continue;

966 967
		if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
		    (amdgpu_mcbp || amdgpu_sriov_vf(adev))) {
968
			if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
969 970 971 972
				if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
					ce_preempt++;
				else
					de_preempt++;
973
			}
974 975 976

			/* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
			if (ce_preempt > 1 || de_preempt > 1)
977
				return -EINVAL;
M
Monk Liu 已提交
978 979
		}

980 981 982
		r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
					  chunk_ib->ip_instance, chunk_ib->ring,
					  &entity);
983
		if (r)
A
Alex Deucher 已提交
984 985
			return r;

986 987 988
		if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
			parser->job->preamble_status |=
				AMDGPU_PREAMBLE_IB_PRESENT;
989

990
		if (parser->entity && parser->entity != entity)
991 992
			return -EINVAL;

993
		parser->entity = entity;
994

995 996 997
		ring = to_amdgpu_ring(entity->rq->sched);
		r =  amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
				   chunk_ib->ib_bytes : 0, ib);
998 999 1000
		if (r) {
			DRM_ERROR("Failed to get ib !\n");
			return r;
A
Alex Deucher 已提交
1001 1002
		}

1003
		ib->gpu_addr = chunk_ib->va_start;
1004
		ib->length_dw = chunk_ib->ib_bytes / 4;
1005
		ib->flags = chunk_ib->flags;
1006

A
Alex Deucher 已提交
1007 1008 1009
		j++;
	}

1010
	/* MM engine doesn't support user fences */
1011
	ring = to_amdgpu_ring(parser->entity->rq->sched);
1012
	if (parser->job->uf_addr && ring->funcs->no_user_fence)
1013
		return -EINVAL;
A
Alex Deucher 已提交
1014

1015
	return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity);
A
Alex Deucher 已提交
1016 1017
}

1018 1019
static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
				       struct amdgpu_cs_chunk *chunk)
1020
{
1021
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1022 1023 1024
	unsigned num_deps;
	int i, r;
	struct drm_amdgpu_cs_chunk_dep *deps;
1025

1026 1027 1028
	deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
	num_deps = chunk->length_dw * 4 /
		sizeof(struct drm_amdgpu_cs_chunk_dep);
1029

1030 1031
	for (i = 0; i < num_deps; ++i) {
		struct amdgpu_ctx *ctx;
1032
		struct drm_sched_entity *entity;
1033
		struct dma_fence *fence;
1034

1035 1036 1037
		ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
		if (ctx == NULL)
			return -EINVAL;
1038

1039 1040 1041
		r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
					  deps[i].ip_instance,
					  deps[i].ring, &entity);
1042 1043 1044 1045
		if (r) {
			amdgpu_ctx_put(ctx);
			return r;
		}
1046

1047
		fence = amdgpu_ctx_get_fence(ctx, entity,
1048
					     deps[i].handle);
1049 1050 1051 1052 1053 1054 1055 1056 1057

		if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
			struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
			struct dma_fence *old = fence;

			fence = dma_fence_get(&s_fence->scheduled);
			dma_fence_put(old);
		}

1058 1059 1060 1061 1062
		if (IS_ERR(fence)) {
			r = PTR_ERR(fence);
			amdgpu_ctx_put(ctx);
			return r;
		} else if (fence) {
1063 1064
			r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
					true);
1065 1066 1067 1068 1069 1070 1071 1072
			dma_fence_put(fence);
			amdgpu_ctx_put(ctx);
			if (r)
				return r;
		}
	}
	return 0;
}
1073

1074
static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
1075 1076
						 uint32_t handle, u64 point,
						 u64 flags)
1077 1078
{
	struct dma_fence *fence;
1079 1080 1081 1082 1083 1084
	int r;

	r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
	if (r) {
		DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
			  handle, point, r);
1085
		return r;
1086
	}
1087

1088
	r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
1089 1090 1091 1092 1093 1094 1095 1096
	dma_fence_put(fence);

	return r;
}

static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
					    struct amdgpu_cs_chunk *chunk)
{
1097
	struct drm_amdgpu_cs_chunk_sem *deps;
1098 1099 1100 1101 1102 1103
	unsigned num_deps;
	int i, r;

	deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
	num_deps = chunk->length_dw * 4 /
		sizeof(struct drm_amdgpu_cs_chunk_sem);
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
	for (i = 0; i < num_deps; ++i) {
		r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
							  0, 0);
		if (r)
			return r;
	}

	return 0;
}

1114

1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
						     struct amdgpu_cs_chunk *chunk)
{
	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
	unsigned num_deps;
	int i, r;

	syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
	num_deps = chunk->length_dw * 4 /
		sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1125
	for (i = 0; i < num_deps; ++i) {
1126 1127 1128 1129
		r = amdgpu_syncobj_lookup_and_add_to_sync(p,
							  syncobj_deps[i].handle,
							  syncobj_deps[i].point,
							  syncobj_deps[i].flags);
1130 1131 1132
		if (r)
			return r;
	}
1133

1134 1135 1136 1137 1138 1139
	return 0;
}

static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
					     struct amdgpu_cs_chunk *chunk)
{
1140
	struct drm_amdgpu_cs_chunk_sem *deps;
1141 1142
	unsigned num_deps;
	int i;
1143

1144 1145 1146 1147
	deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
	num_deps = chunk->length_dw * 4 /
		sizeof(struct drm_amdgpu_cs_chunk_sem);

1148 1149 1150
	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
				     GFP_KERNEL);
	p->num_post_deps = 0;
1151

1152
	if (!p->post_deps)
1153 1154
		return -ENOMEM;

1155

1156
	for (i = 0; i < num_deps; ++i) {
1157 1158 1159
		p->post_deps[i].syncobj =
			drm_syncobj_find(p->filp, deps[i].handle);
		if (!p->post_deps[i].syncobj)
1160
			return -EINVAL;
1161 1162 1163
		p->post_deps[i].chain = NULL;
		p->post_deps[i].point = 0;
		p->num_post_deps++;
1164
	}
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208

	return 0;
}


static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
						      struct amdgpu_cs_chunk
						      *chunk)
{
	struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
	unsigned num_deps;
	int i;

	syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
	num_deps = chunk->length_dw * 4 /
		sizeof(struct drm_amdgpu_cs_chunk_syncobj);

	p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
				     GFP_KERNEL);
	p->num_post_deps = 0;

	if (!p->post_deps)
		return -ENOMEM;

	for (i = 0; i < num_deps; ++i) {
		struct amdgpu_cs_post_dep *dep = &p->post_deps[i];

		dep->chain = NULL;
		if (syncobj_deps[i].point) {
			dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL);
			if (!dep->chain)
				return -ENOMEM;
		}

		dep->syncobj = drm_syncobj_find(p->filp,
						syncobj_deps[i].handle);
		if (!dep->syncobj) {
			kfree(dep->chain);
			return -EINVAL;
		}
		dep->point = syncobj_deps[i].point;
		p->num_post_deps++;
	}

1209 1210 1211
	return 0;
}

1212 1213 1214 1215
static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
				  struct amdgpu_cs_parser *p)
{
	int i, r;
1216

1217 1218
	for (i = 0; i < p->nchunks; ++i) {
		struct amdgpu_cs_chunk *chunk;
1219

1220
		chunk = &p->chunks[i];
1221

1222 1223 1224
		switch (chunk->chunk_id) {
		case AMDGPU_CHUNK_ID_DEPENDENCIES:
		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
1225 1226 1227
			r = amdgpu_cs_process_fence_dep(p, chunk);
			if (r)
				return r;
1228 1229
			break;
		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
1230 1231 1232
			r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
			if (r)
				return r;
1233 1234
			break;
		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
1235 1236 1237
			r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
			if (r)
				return r;
1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
			break;
		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
			r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
			if (r)
				return r;
			break;
		case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
			r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
			if (r)
				return r;
			break;
1249 1250 1251 1252 1253 1254
		}
	}

	return 0;
}

1255 1256 1257 1258
static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
{
	int i;

1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
	for (i = 0; i < p->num_post_deps; ++i) {
		if (p->post_deps[i].chain && p->post_deps[i].point) {
			drm_syncobj_add_point(p->post_deps[i].syncobj,
					      p->post_deps[i].chain,
					      p->fence, p->post_deps[i].point);
			p->post_deps[i].chain = NULL;
		} else {
			drm_syncobj_replace_fence(p->post_deps[i].syncobj,
						  p->fence);
		}
	}
1270 1271
}

1272 1273 1274
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
			    union drm_amdgpu_cs *cs)
{
1275
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1276
	struct drm_sched_entity *entity = p->entity;
1277
	enum drm_sched_priority priority;
1278
	struct amdgpu_ring *ring;
1279
	struct amdgpu_bo_list_entry *e;
1280
	struct amdgpu_job *job;
1281
	uint64_t seq;
1282
	int r;
1283

1284 1285 1286 1287 1288 1289 1290
	job = p->job;
	p->job = NULL;

	r = drm_sched_job_init(&job->base, entity, p->filp);
	if (r)
		goto error_unlock;

1291 1292 1293 1294
	/* No memory allocation is allowed while holding the mn lock.
	 * p->mn is hold until amdgpu_cs_submit is finished and fence is added
	 * to BOs.
	 */
1295
	amdgpu_mn_lock(p->mn);
1296 1297 1298 1299

	/* If userptr are invalidated after amdgpu_cs_parser_bos(), return
	 * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
	 */
1300
	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1301
		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1302

1303 1304 1305 1306 1307
		r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
	}
	if (r) {
		r = -EAGAIN;
		goto error_abort;
1308 1309
	}

1310
	job->owner = p->filp;
1311
	p->fence = dma_fence_get(&job->base.s_fence->finished);
1312

1313
	amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
1314 1315
	amdgpu_cs_post_dependencies(p);

1316 1317 1318 1319 1320 1321
	if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
	    !p->ctx->preamble_presented) {
		job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
		p->ctx->preamble_presented = true;
	}

1322 1323 1324
	cs->out.handle = seq;
	job->uf_sequence = seq;

1325
	amdgpu_job_free_resources(job);
1326 1327

	trace_amdgpu_cs_ioctl(job);
1328
	amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1329
	priority = job->base.s_priority;
1330
	drm_sched_entity_push_job(&job->base, entity);
1331

1332
	ring = to_amdgpu_ring(entity->rq->sched);
1333 1334
	amdgpu_ring_priority_get(ring, priority);

1335 1336
	amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);

1337 1338 1339
	ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
	amdgpu_mn_unlock(p->mn);

1340
	return 0;
1341 1342

error_abort:
1343
	drm_sched_job_cleanup(&job->base);
1344
	amdgpu_mn_unlock(p->mn);
1345 1346 1347 1348

error_unlock:
	amdgpu_job_free(job);
	return r;
1349 1350
}

C
Chunming Zhou 已提交
1351 1352 1353 1354
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
	struct amdgpu_device *adev = dev->dev_private;
	union drm_amdgpu_cs *cs = data;
1355
	struct amdgpu_cs_parser parser = {};
1356 1357
	bool reserved_buffers = false;
	int i, r;
C
Chunming Zhou 已提交
1358

1359
	if (!adev->accel_working)
C
Chunming Zhou 已提交
1360
		return -EBUSY;
1361

1362 1363 1364 1365
	parser.adev = adev;
	parser.filp = filp;

	r = amdgpu_cs_parser_init(&parser, data);
A
Alex Deucher 已提交
1366
	if (r) {
1367
		DRM_ERROR("Failed to initialize parser %d!\n", r);
1368
		goto out;
1369 1370
	}

1371 1372 1373 1374
	r = amdgpu_cs_ib_fill(adev, &parser);
	if (r)
		goto out;

1375 1376 1377 1378 1379 1380
	r = amdgpu_cs_dependencies(adev, &parser);
	if (r) {
		DRM_ERROR("Failed in the dependencies handling %d!\n", r);
		goto out;
	}

1381 1382 1383 1384
	r = amdgpu_cs_parser_bos(&parser, data);
	if (r) {
		if (r == -ENOMEM)
			DRM_ERROR("Not enough memory for command submission!\n");
1385
		else if (r != -ERESTARTSYS && r != -EAGAIN)
1386 1387
			DRM_ERROR("Failed to process the buffer list %d!\n", r);
		goto out;
1388 1389
	}

1390
	reserved_buffers = true;
1391

1392
	for (i = 0; i < parser.job->num_ibs; i++)
1393
		trace_amdgpu_cs(&parser, i);
1394

1395
	r = amdgpu_cs_vm_handling(&parser);
1396 1397 1398
	if (r)
		goto out;

C
Christian König 已提交
1399
	r = amdgpu_cs_submit(&parser, cs);
A
Alex Deucher 已提交
1400 1401

out:
1402
	amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
1403

A
Alex Deucher 已提交
1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420
	return r;
}

/**
 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
 *
 * @dev: drm device
 * @data: data from userspace
 * @filp: file private
 *
 * Wait for the command submission identified by handle to finish.
 */
int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
			 struct drm_file *filp)
{
	union drm_amdgpu_wait_cs *wait = data;
	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1421
	struct drm_sched_entity *entity;
1422
	struct amdgpu_ctx *ctx;
1423
	struct dma_fence *fence;
A
Alex Deucher 已提交
1424 1425
	long r;

1426 1427 1428
	ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
	if (ctx == NULL)
		return -EINVAL;
A
Alex Deucher 已提交
1429

1430 1431
	r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
				  wait->in.ring, &entity);
1432 1433 1434 1435 1436
	if (r) {
		amdgpu_ctx_put(ctx);
		return r;
	}

1437
	fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1438 1439 1440
	if (IS_ERR(fence))
		r = PTR_ERR(fence);
	else if (fence) {
1441
		r = dma_fence_wait_timeout(fence, true, timeout);
1442 1443
		if (r > 0 && fence->error)
			r = fence->error;
1444
		dma_fence_put(fence);
1445 1446
	} else
		r = 1;
C
Chunming Zhou 已提交
1447

1448
	amdgpu_ctx_put(ctx);
A
Alex Deucher 已提交
1449 1450 1451 1452 1453 1454 1455 1456 1457
	if (r < 0)
		return r;

	memset(wait, 0, sizeof(*wait));
	wait->out.status = (r == 0);

	return 0;
}

1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468
/**
 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
 *
 * @adev: amdgpu device
 * @filp: file private
 * @user: drm_amdgpu_fence copied from user space
 */
static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
					     struct drm_file *filp,
					     struct drm_amdgpu_fence *user)
{
1469
	struct drm_sched_entity *entity;
1470 1471 1472 1473 1474 1475 1476 1477
	struct amdgpu_ctx *ctx;
	struct dma_fence *fence;
	int r;

	ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
	if (ctx == NULL)
		return ERR_PTR(-EINVAL);

1478 1479
	r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
				  user->ring, &entity);
1480 1481 1482 1483 1484
	if (r) {
		amdgpu_ctx_put(ctx);
		return ERR_PTR(r);
	}

1485
	fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1486 1487 1488 1489 1490
	amdgpu_ctx_put(ctx);

	return fence;
}

1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504
int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *filp)
{
	struct amdgpu_device *adev = dev->dev_private;
	union drm_amdgpu_fence_to_handle *info = data;
	struct dma_fence *fence;
	struct drm_syncobj *syncobj;
	struct sync_file *sync_file;
	int fd, r;

	fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
	if (IS_ERR(fence))
		return PTR_ERR(fence);

1505 1506 1507
	if (!fence)
		fence = dma_fence_get_stub();

1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549
	switch (info->in.what) {
	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
		r = drm_syncobj_create(&syncobj, 0, fence);
		dma_fence_put(fence);
		if (r)
			return r;
		r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
		drm_syncobj_put(syncobj);
		return r;

	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
		r = drm_syncobj_create(&syncobj, 0, fence);
		dma_fence_put(fence);
		if (r)
			return r;
		r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
		drm_syncobj_put(syncobj);
		return r;

	case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
		fd = get_unused_fd_flags(O_CLOEXEC);
		if (fd < 0) {
			dma_fence_put(fence);
			return fd;
		}

		sync_file = sync_file_create(fence);
		dma_fence_put(fence);
		if (!sync_file) {
			put_unused_fd(fd);
			return -ENOMEM;
		}

		fd_install(fd, sync_file->file);
		info->out.handle = fd;
		return 0;

	default:
		return -EINVAL;
	}
}

1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577
/**
 * amdgpu_cs_wait_all_fence - wait on all fences to signal
 *
 * @adev: amdgpu device
 * @filp: file private
 * @wait: wait parameters
 * @fences: array of drm_amdgpu_fence
 */
static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
				     struct drm_file *filp,
				     union drm_amdgpu_wait_fences *wait,
				     struct drm_amdgpu_fence *fences)
{
	uint32_t fence_count = wait->in.fence_count;
	unsigned int i;
	long r = 1;

	for (i = 0; i < fence_count; i++) {
		struct dma_fence *fence;
		unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);

		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
		if (IS_ERR(fence))
			return PTR_ERR(fence);
		else if (!fence)
			continue;

		r = dma_fence_wait_timeout(fence, true, timeout);
1578
		dma_fence_put(fence);
1579 1580 1581 1582 1583
		if (r < 0)
			return r;

		if (r == 0)
			break;
1584 1585 1586

		if (fence->error)
			return fence->error;
1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
	}

	memset(wait, 0, sizeof(*wait));
	wait->out.status = (r > 0);

	return 0;
}

/**
 * amdgpu_cs_wait_any_fence - wait on any fence to signal
 *
 * @adev: amdgpu device
 * @filp: file private
 * @wait: wait parameters
 * @fences: array of drm_amdgpu_fence
 */
static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
				    struct drm_file *filp,
				    union drm_amdgpu_wait_fences *wait,
				    struct drm_amdgpu_fence *fences)
{
	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
	uint32_t fence_count = wait->in.fence_count;
	uint32_t first = ~0;
	struct dma_fence **array;
	unsigned int i;
	long r;

	/* Prepare the fence array */
	array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);

	if (array == NULL)
		return -ENOMEM;

	for (i = 0; i < fence_count; i++) {
		struct dma_fence *fence;

		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
		if (IS_ERR(fence)) {
			r = PTR_ERR(fence);
			goto err_free_fence_array;
		} else if (fence) {
			array[i] = fence;
		} else { /* NULL, the fence has been already signaled */
			r = 1;
M
Monk Liu 已提交
1632
			first = i;
1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645
			goto out;
		}
	}

	r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
				       &first);
	if (r < 0)
		goto err_free_fence_array;

out:
	memset(wait, 0, sizeof(*wait));
	wait->out.status = (r > 0);
	wait->out.first_signaled = first;
1646

1647
	if (first < fence_count && array[first])
1648 1649 1650
		r = array[first]->error;
	else
		r = 0;
1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682

err_free_fence_array:
	for (i = 0; i < fence_count; i++)
		dma_fence_put(array[i]);
	kfree(array);

	return r;
}

/**
 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
 *
 * @dev: drm device
 * @data: data from userspace
 * @filp: file private
 */
int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
				struct drm_file *filp)
{
	struct amdgpu_device *adev = dev->dev_private;
	union drm_amdgpu_wait_fences *wait = data;
	uint32_t fence_count = wait->in.fence_count;
	struct drm_amdgpu_fence *fences_user;
	struct drm_amdgpu_fence *fences;
	int r;

	/* Get the fences from userspace */
	fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
			GFP_KERNEL);
	if (fences == NULL)
		return -ENOMEM;

1683
	fences_user = u64_to_user_ptr(wait->in.fences);
1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700
	if (copy_from_user(fences, fences_user,
		sizeof(struct drm_amdgpu_fence) * fence_count)) {
		r = -EFAULT;
		goto err_free_fences;
	}

	if (wait->in.wait_all)
		r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
	else
		r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);

err_free_fences:
	kfree(fences);

	return r;
}

A
Alex Deucher 已提交
1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711
/**
 * amdgpu_cs_find_bo_va - find bo_va for VM address
 *
 * @parser: command submission parser context
 * @addr: VM address
 * @bo: resulting BO of the mapping found
 *
 * Search the buffer objects in the command submission context for a certain
 * virtual memory address. Returns allocation structure when found, NULL
 * otherwise.
 */
1712 1713 1714
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
			   uint64_t addr, struct amdgpu_bo **bo,
			   struct amdgpu_bo_va_mapping **map)
A
Alex Deucher 已提交
1715
{
1716
	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1717
	struct ttm_operation_ctx ctx = { false, false };
1718
	struct amdgpu_vm *vm = &fpriv->vm;
A
Alex Deucher 已提交
1719
	struct amdgpu_bo_va_mapping *mapping;
1720 1721
	int r;

A
Alex Deucher 已提交
1722
	addr /= AMDGPU_GPU_PAGE_SIZE;
1723

1724 1725 1726
	mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
	if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
		return -EINVAL;
1727

1728 1729
	*bo = mapping->bo_va->base.bo;
	*map = mapping;
1730

1731 1732 1733
	/* Double check that the BO is reserved by this CS */
	if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
		return -EINVAL;
1734

1735 1736
	if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
		(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1737
		amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1738
		r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1739
		if (r)
1740
			return r;
1741 1742
	}

1743
	return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1744
}