amdgpu_cs.c 40.4 KB
Newer Older
A
Alex Deucher 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Copyright 2008 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 *
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 */
27
#include <linux/pagemap.h>
28
#include <linux/sync_file.h>
A
Alex Deucher 已提交
29 30
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
31
#include <drm/drm_syncobj.h>
A
Alex Deucher 已提交
32 33
#include "amdgpu.h"
#include "amdgpu_trace.h"
34
#include "amdgpu_gmc.h"
35
#include "amdgpu_gem.h"
A
Alex Deucher 已提交
36

37
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
38 39
				      struct drm_amdgpu_cs_chunk_fence *data,
				      uint32_t *offset)
40 41
{
	struct drm_gem_object *gobj;
42
	struct amdgpu_bo *bo;
43
	unsigned long size;
44
	int r;
45

46
	gobj = drm_gem_object_lookup(p->filp, data->handle);
47 48 49
	if (gobj == NULL)
		return -EINVAL;

50
	bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
51
	p->uf_entry.priority = 0;
52
	p->uf_entry.tv.bo = &bo->tbo;
53
	p->uf_entry.tv.num_shared = 1;
54
	p->uf_entry.user_pages = NULL;
55

56
	drm_gem_object_put_unlocked(gobj);
57

58
	size = amdgpu_bo_size(bo);
59 60 61 62 63
	if (size != PAGE_SIZE || (data->offset + 8) > size) {
		r = -EINVAL;
		goto error_unref;
	}

64
	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
65 66
		r = -EINVAL;
		goto error_unref;
67 68
	}

69 70
	*offset = data->offset;

71
	return 0;
72 73

error_unref:
74
	amdgpu_bo_unref(&bo);
75
	return r;
76 77
}

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
				      struct drm_amdgpu_bo_list_in *data)
{
	int r;
	struct drm_amdgpu_bo_list_entry *info = NULL;

	r = amdgpu_bo_create_list_entry_array(data, &info);
	if (r)
		return r;

	r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
				  &p->bo_list);
	if (r)
		goto error_free;

	kvfree(info);
	return 0;

error_free:
	if (info)
		kvfree(info);

	return r;
}

static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
A
Alex Deucher 已提交
104
{
105
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
106
	struct amdgpu_vm *vm = &fpriv->vm;
A
Alex Deucher 已提交
107
	uint64_t *chunk_array_user;
108
	uint64_t *chunk_array;
109
	unsigned size, num_ibs = 0;
110
	uint32_t uf_offset = 0;
111
	int i;
112
	int ret;
A
Alex Deucher 已提交
113

114 115 116 117 118 119
	if (cs->in.num_chunks == 0)
		return 0;

	chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
	if (!chunk_array)
		return -ENOMEM;
A
Alex Deucher 已提交
120

121 122
	p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
	if (!p->ctx) {
123 124
		ret = -EINVAL;
		goto free_chunk;
125
	}
126

127 128 129 130 131 132
	/* skip guilty context job */
	if (atomic_read(&p->ctx->guilty) == 1) {
		ret = -ECANCELED;
		goto free_chunk;
	}

133 134
	mutex_lock(&p->ctx->lock);

A
Alex Deucher 已提交
135
	/* get chunks */
136
	chunk_array_user = u64_to_user_ptr(cs->in.chunks);
A
Alex Deucher 已提交
137 138
	if (copy_from_user(chunk_array, chunk_array_user,
			   sizeof(uint64_t)*cs->in.num_chunks)) {
139
		ret = -EFAULT;
140
		goto free_chunk;
A
Alex Deucher 已提交
141 142 143
	}

	p->nchunks = cs->in.num_chunks;
144
	p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
A
Alex Deucher 已提交
145
			    GFP_KERNEL);
146 147
	if (!p->chunks) {
		ret = -ENOMEM;
148
		goto free_chunk;
A
Alex Deucher 已提交
149 150 151 152 153 154 155
	}

	for (i = 0; i < p->nchunks; i++) {
		struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
		struct drm_amdgpu_cs_chunk user_chunk;
		uint32_t __user *cdata;

156
		chunk_ptr = u64_to_user_ptr(chunk_array[i]);
A
Alex Deucher 已提交
157 158
		if (copy_from_user(&user_chunk, chunk_ptr,
				       sizeof(struct drm_amdgpu_cs_chunk))) {
159 160 161
			ret = -EFAULT;
			i--;
			goto free_partial_kdata;
A
Alex Deucher 已提交
162 163 164 165 166
		}
		p->chunks[i].chunk_id = user_chunk.chunk_id;
		p->chunks[i].length_dw = user_chunk.length_dw;

		size = p->chunks[i].length_dw;
167
		cdata = u64_to_user_ptr(user_chunk.chunk_data);
A
Alex Deucher 已提交
168

M
Michal Hocko 已提交
169
		p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
A
Alex Deucher 已提交
170
		if (p->chunks[i].kdata == NULL) {
171 172 173
			ret = -ENOMEM;
			i--;
			goto free_partial_kdata;
A
Alex Deucher 已提交
174 175 176
		}
		size *= sizeof(uint32_t);
		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
177 178
			ret = -EFAULT;
			goto free_partial_kdata;
A
Alex Deucher 已提交
179 180
		}

181 182
		switch (p->chunks[i].chunk_id) {
		case AMDGPU_CHUNK_ID_IB:
183
			++num_ibs;
184 185 186
			break;

		case AMDGPU_CHUNK_ID_FENCE:
A
Alex Deucher 已提交
187
			size = sizeof(struct drm_amdgpu_cs_chunk_fence);
188
			if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
189 190
				ret = -EINVAL;
				goto free_partial_kdata;
A
Alex Deucher 已提交
191
			}
192

193 194
			ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
							 &uf_offset);
195 196 197
			if (ret)
				goto free_partial_kdata;

198 199
			break;

200 201 202 203 204 205 206 207 208 209 210 211 212
		case AMDGPU_CHUNK_ID_BO_HANDLES:
			size = sizeof(struct drm_amdgpu_bo_list_in);
			if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
				ret = -EINVAL;
				goto free_partial_kdata;
			}

			ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
			if (ret)
				goto free_partial_kdata;

			break;

213
		case AMDGPU_CHUNK_ID_DEPENDENCIES:
214 215
		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
216 217
			break;

218
		default:
219 220
			ret = -EINVAL;
			goto free_partial_kdata;
A
Alex Deucher 已提交
221 222 223
		}
	}

224
	ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
225
	if (ret)
C
Christian König 已提交
226
		goto free_all_kdata;
A
Alex Deucher 已提交
227

228 229 230 231
	if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
		ret = -ECANCELED;
		goto free_all_kdata;
	}
232

233
	if (p->uf_entry.tv.bo)
234
		p->job->uf_addr = uf_offset;
A
Alex Deucher 已提交
235
	kfree(chunk_array);
236 237 238 239

	/* Use this opportunity to fill in task info for the vm */
	amdgpu_vm_set_task_info(vm);

240 241 242 243 244 245
	return 0;

free_all_kdata:
	i = p->nchunks - 1;
free_partial_kdata:
	for (; i >= 0; i--)
M
Michal Hocko 已提交
246
		kvfree(p->chunks[i].kdata);
247
	kfree(p->chunks);
248 249
	p->chunks = NULL;
	p->nchunks = 0;
250 251 252 253
free_chunk:
	kfree(chunk_array);

	return ret;
A
Alex Deucher 已提交
254 255
}

256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
/* Convert microseconds to bytes. */
static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
{
	if (us <= 0 || !adev->mm_stats.log2_max_MBps)
		return 0;

	/* Since accum_us is incremented by a million per second, just
	 * multiply it by the number of MB/s to get the number of bytes.
	 */
	return us << adev->mm_stats.log2_max_MBps;
}

static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
{
	if (!adev->mm_stats.log2_max_MBps)
		return 0;

	return bytes >> adev->mm_stats.log2_max_MBps;
}

/* Returns how many bytes TTM can move right now. If no bytes can be moved,
 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
 * which means it can go over the threshold once. If that happens, the driver
 * will be in debt and no other buffer migrations can be done until that debt
 * is repaid.
 *
 * This approach allows moving a buffer of any size (it's important to allow
 * that).
 *
 * The currency is simply time in microseconds and it increases as the clock
 * ticks. The accumulated microseconds (us) are converted to bytes and
 * returned.
A
Alex Deucher 已提交
288
 */
289 290 291
static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
					      u64 *max_bytes,
					      u64 *max_vis_bytes)
A
Alex Deucher 已提交
292
{
293 294
	s64 time_us, increment_us;
	u64 free_vram, total_vram, used_vram;
A
Alex Deucher 已提交
295

296 297
	/* Allow a maximum of 200 accumulated ms. This is basically per-IB
	 * throttling.
A
Alex Deucher 已提交
298
	 *
299 300 301 302 303
	 * It means that in order to get full max MBps, at least 5 IBs per
	 * second must be submitted and not more than 200ms apart from each
	 * other.
	 */
	const s64 us_upper_bound = 200000;
A
Alex Deucher 已提交
304

305 306 307 308 309
	if (!adev->mm_stats.log2_max_MBps) {
		*max_bytes = 0;
		*max_vis_bytes = 0;
		return;
	}
310

311
	total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
312
	used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
313 314 315 316 317 318 319 320 321 322 323 324 325 326
	free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;

	spin_lock(&adev->mm_stats.lock);

	/* Increase the amount of accumulated us. */
	time_us = ktime_to_us(ktime_get());
	increment_us = time_us - adev->mm_stats.last_update_us;
	adev->mm_stats.last_update_us = time_us;
	adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
                                      us_upper_bound);

	/* This prevents the short period of low performance when the VRAM
	 * usage is low and the driver is in debt or doesn't have enough
	 * accumulated us to fill VRAM quickly.
A
Alex Deucher 已提交
327
	 *
328 329 330 331
	 * The situation can occur in these cases:
	 * - a lot of VRAM is freed by userspace
	 * - the presence of a big buffer causes a lot of evictions
	 *   (solution: split buffers into smaller ones)
A
Alex Deucher 已提交
332
	 *
333 334
	 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
	 * accum_us to a positive number.
A
Alex Deucher 已提交
335
	 */
336 337 338 339 340 341 342 343 344 345 346 347 348
	if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
		s64 min_us;

		/* Be more aggresive on dGPUs. Try to fill a portion of free
		 * VRAM now.
		 */
		if (!(adev->flags & AMD_IS_APU))
			min_us = bytes_to_us(adev, free_vram / 4);
		else
			min_us = 0; /* Reset accum_us on APUs. */

		adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
	}
A
Alex Deucher 已提交
349

350
	/* This is set to 0 if the driver is in debt to disallow (optional)
351 352
	 * buffer moves.
	 */
353 354 355
	*max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);

	/* Do the same for visible VRAM if half of it is free */
356
	if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
357
		u64 total_vis_vram = adev->gmc.visible_vram_size;
358 359
		u64 used_vis_vram =
			amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375

		if (used_vis_vram < total_vis_vram) {
			u64 free_vis_vram = total_vis_vram - used_vis_vram;
			adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
							  increment_us, us_upper_bound);

			if (free_vis_vram >= total_vis_vram / 2)
				adev->mm_stats.accum_us_vis =
					max(bytes_to_us(adev, free_vis_vram / 2),
					    adev->mm_stats.accum_us_vis);
		}

		*max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
	} else {
		*max_vis_bytes = 0;
	}
376 377 378 379 380 381 382 383

	spin_unlock(&adev->mm_stats.lock);
}

/* Report how many bytes have really been moved for the last command
 * submission. This can result in a debt that can stop buffer migrations
 * temporarily.
 */
384 385
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
				  u64 num_vis_bytes)
386 387 388
{
	spin_lock(&adev->mm_stats.lock);
	adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
389
	adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
390
	spin_unlock(&adev->mm_stats.lock);
A
Alex Deucher 已提交
391 392
}

393 394 395
static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
				 struct amdgpu_bo *bo)
{
396
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
397 398 399
	struct ttm_operation_ctx ctx = {
		.interruptible = true,
		.no_wait_gpu = false,
400 401
		.resv = bo->tbo.resv,
		.flags = 0
402
	};
403 404 405 406 407 408
	uint32_t domain;
	int r;

	if (bo->pin_count)
		return 0;

409 410
	/* Don't move this buffer if we have depleted our allowance
	 * to move it. Don't move anything if the threshold is zero.
411
	 */
412
	if (p->bytes_moved < p->bytes_moved_threshold) {
413
		if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
414 415 416 417 418 419
		    (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
			/* And don't move a CPU_ACCESS_REQUIRED BO to limited
			 * visible VRAM if we've depleted our allowance to do
			 * that.
			 */
			if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
K
Kent Russell 已提交
420
				domain = bo->preferred_domains;
421 422 423
			else
				domain = bo->allowed_domains;
		} else {
K
Kent Russell 已提交
424
			domain = bo->preferred_domains;
425 426
		}
	} else {
427
		domain = bo->allowed_domains;
428
	}
429 430

retry:
431
	amdgpu_bo_placement_from_domain(bo, domain);
432
	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
433 434

	p->bytes_moved += ctx.bytes_moved;
435
	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
436
	    amdgpu_bo_in_cpu_visible_vram(bo))
437
		p->bytes_moved_vis += ctx.bytes_moved;
438

439 440 441
	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
		domain = bo->allowed_domains;
		goto retry;
442 443 444 445 446
	}

	return r;
}

447 448
/* Last resort, try to evict something from the current working set */
static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
449
				struct amdgpu_bo *validated)
450
{
451
	uint32_t domain = validated->allowed_domains;
452
	struct ttm_operation_ctx ctx = { true, false };
453 454 455 456 457 458 459 460 461
	int r;

	if (!p->evictable)
		return false;

	for (;&p->evictable->tv.head != &p->validated;
	     p->evictable = list_prev_entry(p->evictable, tv.head)) {

		struct amdgpu_bo_list_entry *candidate = p->evictable;
462
		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(candidate->tv.bo);
463
		struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
464
		bool update_bytes_moved_vis;
465 466 467
		uint32_t other;

		/* If we reached our current BO we can forget it */
468
		if (bo == validated)
469 470
			break;

471 472 473 474
		/* We can't move pinned BOs here */
		if (bo->pin_count)
			continue;

475 476 477 478 479 480 481 482 483 484 485 486
		other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);

		/* Check if this BO is in one of the domains we need space for */
		if (!(other & domain))
			continue;

		/* Check if we can move this BO somewhere else */
		other = bo->allowed_domains & ~domain;
		if (!other)
			continue;

		/* Good we can try to move this BO somewhere else */
487
		update_bytes_moved_vis =
488 489
				!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
				amdgpu_bo_in_cpu_visible_vram(bo);
490
		amdgpu_bo_placement_from_domain(bo, other);
491
		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
492
		p->bytes_moved += ctx.bytes_moved;
493
		if (update_bytes_moved_vis)
494
			p->bytes_moved_vis += ctx.bytes_moved;
495 496 497 498 499 500 501 502 503 504 505 506 507

		if (unlikely(r))
			break;

		p->evictable = list_prev_entry(p->evictable, tv.head);
		list_move(&candidate->tv.head, &p->validated);

		return true;
	}

	return false;
}

508 509 510 511 512 513 514 515 516 517 518 519
static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
{
	struct amdgpu_cs_parser *p = param;
	int r;

	do {
		r = amdgpu_cs_bo_validate(p, bo);
	} while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
	if (r)
		return r;

	if (bo->shadow)
A
Alex Xie 已提交
520
		r = amdgpu_cs_bo_validate(p, bo->shadow);
521 522 523 524

	return r;
}

525
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
526
			    struct list_head *validated)
A
Alex Deucher 已提交
527
{
528
	struct ttm_operation_ctx ctx = { true, false };
A
Alex Deucher 已提交
529 530 531
	struct amdgpu_bo_list_entry *lobj;
	int r;

532
	list_for_each_entry(lobj, validated, tv.head) {
533
		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
534
		bool binding_userptr = false;
535
		struct mm_struct *usermm;
A
Alex Deucher 已提交
536

537 538 539 540
		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
		if (usermm && usermm != current->mm)
			return -EPERM;

541
		/* Check if we have user pages and nobody bound the BO already */
542 543
		if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
		    lobj->user_pages) {
544 545
			amdgpu_bo_placement_from_domain(bo,
							AMDGPU_GEM_DOMAIN_CPU);
546
			r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
547 548
			if (r)
				return r;
549 550
			amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
						     lobj->user_pages);
551 552 553
			binding_userptr = true;
		}

554 555 556
		if (p->evictable == lobj)
			p->evictable = NULL;

557
		r = amdgpu_cs_validate(p, bo);
558
		if (r)
559
			return r;
560

561
		if (binding_userptr) {
M
Michal Hocko 已提交
562
			kvfree(lobj->user_pages);
563 564
			lobj->user_pages = NULL;
		}
A
Alex Deucher 已提交
565 566 567 568
	}
	return 0;
}

569 570
static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
				union drm_amdgpu_cs *cs)
A
Alex Deucher 已提交
571 572
{
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
573
	struct amdgpu_vm *vm = &fpriv->vm;
574
	struct amdgpu_bo_list_entry *e;
575
	struct list_head duplicates;
576 577 578
	struct amdgpu_bo *gds;
	struct amdgpu_bo *gws;
	struct amdgpu_bo *oa;
579
	unsigned tries = 10;
580
	int r;
A
Alex Deucher 已提交
581

582 583
	INIT_LIST_HEAD(&p->validated);

584
	/* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
585 586 587
	if (cs->in.bo_list_handle) {
		if (p->bo_list)
			return -EINVAL;
588

589 590 591 592
		r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
				       &p->bo_list);
		if (r)
			return r;
593 594 595 596 597 598
	} else if (!p->bo_list) {
		/* Create a empty bo_list when no handle is provided */
		r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
					  &p->bo_list);
		if (r)
			return r;
599 600
	}

601 602 603
	amdgpu_bo_list_for_each_entry(e, p->bo_list)
		e->tv.num_shared = 1;

604 605 606
	amdgpu_bo_list_get_list(p->bo_list, &p->validated);
	if (p->bo_list->first_userptr != p->bo_list->num_entries)
		p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
A
Alex Deucher 已提交
607

608
	INIT_LIST_HEAD(&duplicates);
609
	amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
A
Alex Deucher 已提交
610

611
	if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
612 613
		list_add(&p->uf_entry.tv.head, &p->validated);

614 615 616 617 618
	while (1) {
		struct list_head need_pages;

		r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
					   &duplicates);
619
		if (unlikely(r != 0)) {
620 621
			if (r != -ERESTARTSYS)
				DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
622
			goto error_free_pages;
623
		}
624 625

		INIT_LIST_HEAD(&need_pages);
626
		amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
627
			struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
628

629
			if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
630 631 632
				 &e->user_invalidated) && e->user_pages) {

				/* We acquired a page array, but somebody
633
				 * invalidated it. Free it and try again
634 635
				 */
				release_pages(e->user_pages,
636
					      bo->tbo.ttm->num_pages);
M
Michal Hocko 已提交
637
				kvfree(e->user_pages);
638 639 640
				e->user_pages = NULL;
			}

641
			if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
642 643 644 645
			    !e->user_pages) {
				list_del(&e->tv.head);
				list_add(&e->tv.head, &need_pages);

646
				amdgpu_bo_unreserve(bo);
647 648 649 650 651 652 653 654 655
			}
		}

		if (list_empty(&need_pages))
			break;

		/* Unreserve everything again. */
		ttm_eu_backoff_reservation(&p->ticket, &p->validated);

656
		/* We tried too many times, just abort */
657 658
		if (!--tries) {
			r = -EDEADLK;
659
			DRM_ERROR("deadlock in %s\n", __func__);
660 661 662
			goto error_free_pages;
		}

A
Alex Xie 已提交
663
		/* Fill the page arrays for all userptrs. */
664
		list_for_each_entry(e, &need_pages, tv.head) {
665
			struct ttm_tt *ttm = e->tv.bo->ttm;
666

M
Michal Hocko 已提交
667 668 669
			e->user_pages = kvmalloc_array(ttm->num_pages,
							 sizeof(struct page*),
							 GFP_KERNEL | __GFP_ZERO);
670 671
			if (!e->user_pages) {
				r = -ENOMEM;
672
				DRM_ERROR("calloc failure in %s\n", __func__);
673 674 675 676 677
				goto error_free_pages;
			}

			r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
			if (r) {
678
				DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
M
Michal Hocko 已提交
679
				kvfree(e->user_pages);
680 681 682 683 684 685 686 687
				e->user_pages = NULL;
				goto error_free_pages;
			}
		}

		/* And try again. */
		list_splice(&need_pages, &p->validated);
	}
688

689 690
	amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
					  &p->bytes_moved_vis_threshold);
691
	p->bytes_moved = 0;
692
	p->bytes_moved_vis = 0;
693 694 695
	p->evictable = list_last_entry(&p->validated,
				       struct amdgpu_bo_list_entry,
				       tv.head);
696

697 698 699 700 701 702 703
	r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
				      amdgpu_cs_validate, p);
	if (r) {
		DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
		goto error_validate;
	}

704
	r = amdgpu_cs_list_validate(p, &duplicates);
705 706
	if (r) {
		DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
707
		goto error_validate;
708
	}
709

710
	r = amdgpu_cs_list_validate(p, &p->validated);
711 712
	if (r) {
		DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n");
713
		goto error_validate;
714
	}
715

716 717
	amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
				     p->bytes_moved_vis);
718

719 720 721
	gds = p->bo_list->gds_obj;
	gws = p->bo_list->gws_obj;
	oa = p->bo_list->oa_obj;
722

723 724 725 726 727 728 729 730
	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);

		/* Make sure we use the exclusive slot for shared BOs */
		if (bo->prime_shared_count)
			e->tv.num_shared = 0;
		e->bo_va = amdgpu_vm_bo_find(vm, bo);
	}
731

732
	if (gds) {
733 734
		p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
		p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
735 736
	}
	if (gws) {
737 738
		p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
		p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
739 740
	}
	if (oa) {
741 742
		p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
		p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
743
	}
744

745 746
	if (!r && p->uf_entry.tv.bo) {
		struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
747

748
		r = amdgpu_ttm_alloc_gart(&uf->tbo);
749 750
		p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
	}
751

752
error_validate:
753
	if (r)
754
		ttm_eu_backoff_reservation(&p->ticket, &p->validated);
A
Alex Deucher 已提交
755

756 757
error_free_pages:

758 759 760
	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
		if (!e->user_pages)
			continue;
761

762
		release_pages(e->user_pages, e->tv.bo->ttm->num_pages);
763
		kvfree(e->user_pages);
764 765
	}

A
Alex Deucher 已提交
766 767 768 769 770 771 772 773 774
	return r;
}

static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
{
	struct amdgpu_bo_list_entry *e;
	int r;

	list_for_each_entry(e, &p->validated, tv.head) {
775 776 777
		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
		struct reservation_object *resv = bo->tbo.resv;

778
		r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
779
				     amdgpu_bo_explicit_sync(bo));
A
Alex Deucher 已提交
780 781 782 783 784 785 786

		if (r)
			return r;
	}
	return 0;
}

787 788 789 790 791 792 793 794
/**
 * cs_parser_fini() - clean parser states
 * @parser:	parser structure holding parsing context.
 * @error:	error number
 *
 * If error is set than unvalidate buffer, otherwise just free memory
 * used by parsing context.
 **/
795 796
static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
				  bool backoff)
C
Chunming Zhou 已提交
797
{
798 799
	unsigned i;

800
	if (error && backoff)
A
Alex Deucher 已提交
801 802
		ttm_eu_backoff_reservation(&parser->ticket,
					   &parser->validated);
803 804 805 806 807

	for (i = 0; i < parser->num_post_dep_syncobjs; i++)
		drm_syncobj_put(parser->post_dep_syncobjs[i]);
	kfree(parser->post_dep_syncobjs);

808
	dma_fence_put(parser->fence);
809

810 811
	if (parser->ctx) {
		mutex_unlock(&parser->ctx->lock);
812
		amdgpu_ctx_put(parser->ctx);
813
	}
814 815 816
	if (parser->bo_list)
		amdgpu_bo_list_put(parser->bo_list);

A
Alex Deucher 已提交
817
	for (i = 0; i < parser->nchunks; i++)
M
Michal Hocko 已提交
818
		kvfree(parser->chunks[i].kdata);
A
Alex Deucher 已提交
819
	kfree(parser->chunks);
820 821
	if (parser->job)
		amdgpu_job_free(parser->job);
822 823 824 825 826
	if (parser->uf_entry.tv.bo) {
		struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);

		amdgpu_bo_unref(&uf);
	}
A
Alex Deucher 已提交
827 828
}

829
static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
A
Alex Deucher 已提交
830
{
831
	struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
832
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
833
	struct amdgpu_device *adev = p->adev;
834
	struct amdgpu_vm *vm = &fpriv->vm;
835
	struct amdgpu_bo_list_entry *e;
A
Alex Deucher 已提交
836 837
	struct amdgpu_bo_va *bo_va;
	struct amdgpu_bo *bo;
838
	int r;
A
Alex Deucher 已提交
839

840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859
	/* Only for UVD/VCE VM emulation */
	if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
		unsigned i, j;

		for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
			struct drm_amdgpu_cs_chunk_ib *chunk_ib;
			struct amdgpu_bo_va_mapping *m;
			struct amdgpu_bo *aobj = NULL;
			struct amdgpu_cs_chunk *chunk;
			uint64_t offset, va_start;
			struct amdgpu_ib *ib;
			uint8_t *kptr;

			chunk = &p->chunks[i];
			ib = &p->job->ibs[j];
			chunk_ib = chunk->kdata;

			if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
				continue;

860
			va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
			r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
			if (r) {
				DRM_ERROR("IB va_start is invalid\n");
				return r;
			}

			if ((va_start + chunk_ib->ib_bytes) >
			    (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
				DRM_ERROR("IB va_start+ib_bytes is invalid\n");
				return -EINVAL;
			}

			/* the IB should be reserved at this point */
			r = amdgpu_bo_kmap(aobj, (void **)&kptr);
			if (r) {
				return r;
			}

			offset = m->start * AMDGPU_GPU_PAGE_SIZE;
			kptr += va_start - offset;

			if (ring->funcs->parse_cs) {
				memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
				amdgpu_bo_kunmap(aobj);

				r = amdgpu_ring_parse_cs(ring, p, j);
				if (r)
					return r;
			} else {
				ib->ptr = (uint32_t *)kptr;
				r = amdgpu_ring_patch_cs_in_place(ring, p, j);
				amdgpu_bo_kunmap(aobj);
				if (r)
					return r;
			}

			j++;
		}
	}

	if (!p->job->vm)
		return amdgpu_cs_sync_rings(p);


905
	r = amdgpu_vm_clear_freed(adev, vm, NULL);
A
Alex Deucher 已提交
906 907 908
	if (r)
		return r;

909 910 911 912 913
	r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
	if (r)
		return r;

	r = amdgpu_sync_fence(adev, &p->job->sync,
914
			      fpriv->prt_va->last_pt_update, false);
915 916 917
	if (r)
		return r;

M
Monk Liu 已提交
918 919
	if (amdgpu_sriov_vf(adev)) {
		struct dma_fence *f;
920 921

		bo_va = fpriv->csa_va;
M
Monk Liu 已提交
922 923 924 925 926 927
		BUG_ON(!bo_va);
		r = amdgpu_vm_bo_update(adev, bo_va, false);
		if (r)
			return r;

		f = bo_va->last_pt_update;
928
		r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
M
Monk Liu 已提交
929 930 931 932
		if (r)
			return r;
	}

933 934
	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
		struct dma_fence *f;
A
Alex Deucher 已提交
935

936
		/* ignore duplicates */
937
		bo = ttm_to_amdgpu_bo(e->tv.bo);
938 939
		if (!bo)
			continue;
A
Alex Deucher 已提交
940

941 942 943
		bo_va = e->bo_va;
		if (bo_va == NULL)
			continue;
A
Alex Deucher 已提交
944

945 946 947
		r = amdgpu_vm_bo_update(adev, bo_va, false);
		if (r)
			return r;
948

949 950 951 952
		f = bo_va->last_pt_update;
		r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
		if (r)
			return r;
953 954
	}

955
	r = amdgpu_vm_handle_moved(adev, vm);
956 957 958
	if (r)
		return r;

959 960 961 962
	r = amdgpu_vm_update_directories(adev, vm);
	if (r)
		return r;

963
	r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
964 965
	if (r)
		return r;
966

967
	p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
968

969
	if (amdgpu_vm_debug) {
970
		/* Invalidate all BOs to test for userspace bugs */
971
		amdgpu_bo_list_for_each_entry(e, p->bo_list) {
972
			struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
973

974
			/* ignore duplicates */
975
			if (!bo)
976 977
				continue;

978
			amdgpu_vm_bo_invalidate(adev, bo, false);
A
Alex Deucher 已提交
979
		}
980 981
	}

982
	return amdgpu_cs_sync_rings(p);
A
Alex Deucher 已提交
983 984 985 986 987 988 989
}

static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
			     struct amdgpu_cs_parser *parser)
{
	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
	struct amdgpu_vm *vm = &fpriv->vm;
M
Monk Liu 已提交
990
	int r, ce_preempt = 0, de_preempt = 0;
991 992
	struct amdgpu_ring *ring;
	int i, j;
A
Alex Deucher 已提交
993

994
	for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
A
Alex Deucher 已提交
995 996 997
		struct amdgpu_cs_chunk *chunk;
		struct amdgpu_ib *ib;
		struct drm_amdgpu_cs_chunk_ib *chunk_ib;
998
		struct drm_sched_entity *entity;
A
Alex Deucher 已提交
999 1000

		chunk = &parser->chunks[i];
1001
		ib = &parser->job->ibs[j];
A
Alex Deucher 已提交
1002 1003 1004 1005 1006
		chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;

		if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
			continue;

1007
		if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && amdgpu_sriov_vf(adev)) {
1008
			if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
1009 1010 1011 1012
				if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
					ce_preempt++;
				else
					de_preempt++;
1013
			}
1014 1015 1016

			/* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
			if (ce_preempt > 1 || de_preempt > 1)
1017
				return -EINVAL;
M
Monk Liu 已提交
1018 1019
		}

1020 1021 1022
		r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
					  chunk_ib->ip_instance, chunk_ib->ring,
					  &entity);
1023
		if (r)
A
Alex Deucher 已提交
1024 1025
			return r;

1026 1027 1028
		if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
			parser->job->preamble_status |=
				AMDGPU_PREAMBLE_IB_PRESENT;
1029

1030
		if (parser->entity && parser->entity != entity)
1031 1032
			return -EINVAL;

1033
		parser->entity = entity;
1034

1035 1036 1037
		ring = to_amdgpu_ring(entity->rq->sched);
		r =  amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
				   chunk_ib->ib_bytes : 0, ib);
1038 1039 1040
		if (r) {
			DRM_ERROR("Failed to get ib !\n");
			return r;
A
Alex Deucher 已提交
1041 1042
		}

1043
		ib->gpu_addr = chunk_ib->va_start;
1044
		ib->length_dw = chunk_ib->ib_bytes / 4;
1045
		ib->flags = chunk_ib->flags;
1046

A
Alex Deucher 已提交
1047 1048 1049
		j++;
	}

1050
	/* UVD & VCE fw doesn't support user fences */
1051
	ring = to_amdgpu_ring(parser->entity->rq->sched);
1052
	if (parser->job->uf_addr && (
1053 1054
	    ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
	    ring->funcs->type == AMDGPU_RING_TYPE_VCE))
1055
		return -EINVAL;
A
Alex Deucher 已提交
1056

1057
	return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity);
A
Alex Deucher 已提交
1058 1059
}

1060 1061
static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
				       struct amdgpu_cs_chunk *chunk)
1062
{
1063
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1064 1065 1066
	unsigned num_deps;
	int i, r;
	struct drm_amdgpu_cs_chunk_dep *deps;
1067

1068 1069 1070
	deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
	num_deps = chunk->length_dw * 4 /
		sizeof(struct drm_amdgpu_cs_chunk_dep);
1071

1072 1073
	for (i = 0; i < num_deps; ++i) {
		struct amdgpu_ctx *ctx;
1074
		struct drm_sched_entity *entity;
1075
		struct dma_fence *fence;
1076

1077 1078 1079
		ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
		if (ctx == NULL)
			return -EINVAL;
1080

1081 1082 1083
		r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
					  deps[i].ip_instance,
					  deps[i].ring, &entity);
1084 1085 1086 1087
		if (r) {
			amdgpu_ctx_put(ctx);
			return r;
		}
1088

1089
		fence = amdgpu_ctx_get_fence(ctx, entity,
1090 1091 1092 1093 1094 1095
					     deps[i].handle);
		if (IS_ERR(fence)) {
			r = PTR_ERR(fence);
			amdgpu_ctx_put(ctx);
			return r;
		} else if (fence) {
1096 1097
			r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
					true);
1098 1099 1100 1101 1102 1103 1104 1105
			dma_fence_put(fence);
			amdgpu_ctx_put(ctx);
			if (r)
				return r;
		}
	}
	return 0;
}
1106

1107 1108 1109 1110 1111
static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
						 uint32_t handle)
{
	int r;
	struct dma_fence *fence;
1112
	r = drm_syncobj_find_fence(p->filp, handle, 0, 0, &fence);
1113 1114 1115
	if (r)
		return r;

1116
	r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
	dma_fence_put(fence);

	return r;
}

static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
					    struct amdgpu_cs_chunk *chunk)
{
	unsigned num_deps;
	int i, r;
	struct drm_amdgpu_cs_chunk_sem *deps;

	deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
	num_deps = chunk->length_dw * 4 /
		sizeof(struct drm_amdgpu_cs_chunk_sem);

	for (i = 0; i < num_deps; ++i) {
		r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle);
		if (r)
			return r;
	}
	return 0;
}

static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
					     struct amdgpu_cs_chunk *chunk)
{
	unsigned num_deps;
	int i;
	struct drm_amdgpu_cs_chunk_sem *deps;
	deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
	num_deps = chunk->length_dw * 4 /
		sizeof(struct drm_amdgpu_cs_chunk_sem);

	p->post_dep_syncobjs = kmalloc_array(num_deps,
					     sizeof(struct drm_syncobj *),
					     GFP_KERNEL);
	p->num_post_dep_syncobjs = 0;

1156 1157 1158
	if (!p->post_dep_syncobjs)
		return -ENOMEM;

1159 1160 1161 1162 1163 1164 1165 1166 1167
	for (i = 0; i < num_deps; ++i) {
		p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle);
		if (!p->post_dep_syncobjs[i])
			return -EINVAL;
		p->num_post_dep_syncobjs++;
	}
	return 0;
}

1168 1169 1170 1171
static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
				  struct amdgpu_cs_parser *p)
{
	int i, r;
1172

1173 1174
	for (i = 0; i < p->nchunks; ++i) {
		struct amdgpu_cs_chunk *chunk;
1175

1176
		chunk = &p->chunks[i];
1177

1178 1179 1180 1181
		if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) {
			r = amdgpu_cs_process_fence_dep(p, chunk);
			if (r)
				return r;
1182 1183 1184 1185 1186 1187 1188 1189
		} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) {
			r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
			if (r)
				return r;
		} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) {
			r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
			if (r)
				return r;
1190 1191 1192 1193 1194 1195
		}
	}

	return 0;
}

1196 1197 1198 1199
static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
{
	int i;

1200
	for (i = 0; i < p->num_post_dep_syncobjs; ++i)
1201
		drm_syncobj_replace_fence(p->post_dep_syncobjs[i], 0, p->fence);
1202 1203
}

1204 1205 1206
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
			    union drm_amdgpu_cs *cs)
{
1207
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1208
	struct drm_sched_entity *entity = p->entity;
1209
	enum drm_sched_priority priority;
1210
	struct amdgpu_ring *ring;
1211
	struct amdgpu_bo_list_entry *e;
1212
	struct amdgpu_job *job;
1213 1214
	uint64_t seq;

1215
	int r;
1216

1217 1218 1219 1220 1221 1222 1223 1224
	job = p->job;
	p->job = NULL;

	r = drm_sched_job_init(&job->base, entity, p->filp);
	if (r)
		goto error_unlock;

	/* No memory allocation is allowed while holding the mn lock */
1225
	amdgpu_mn_lock(p->mn);
1226
	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1227
		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1228

1229
		if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
1230 1231
			r = -ERESTARTSYS;
			goto error_abort;
1232 1233 1234
		}
	}

1235
	job->owner = p->filp;
1236
	p->fence = dma_fence_get(&job->base.s_fence->finished);
1237

1238
	amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
1239 1240
	amdgpu_cs_post_dependencies(p);

1241 1242 1243 1244 1245 1246
	if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
	    !p->ctx->preamble_presented) {
		job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
		p->ctx->preamble_presented = true;
	}

1247 1248 1249
	cs->out.handle = seq;
	job->uf_sequence = seq;

1250
	amdgpu_job_free_resources(job);
1251 1252

	trace_amdgpu_cs_ioctl(job);
1253
	amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1254
	priority = job->base.s_priority;
1255
	drm_sched_entity_push_job(&job->base, entity);
1256

1257
	ring = to_amdgpu_ring(entity->rq->sched);
1258 1259
	amdgpu_ring_priority_get(ring, priority);

1260 1261
	amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);

1262 1263 1264
	ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
	amdgpu_mn_unlock(p->mn);

1265
	return 0;
1266 1267

error_abort:
1268
	drm_sched_job_cleanup(&job->base);
1269
	amdgpu_mn_unlock(p->mn);
1270 1271 1272 1273

error_unlock:
	amdgpu_job_free(job);
	return r;
1274 1275
}

C
Chunming Zhou 已提交
1276 1277 1278 1279
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
	struct amdgpu_device *adev = dev->dev_private;
	union drm_amdgpu_cs *cs = data;
1280
	struct amdgpu_cs_parser parser = {};
1281 1282
	bool reserved_buffers = false;
	int i, r;
C
Chunming Zhou 已提交
1283

1284
	if (!adev->accel_working)
C
Chunming Zhou 已提交
1285
		return -EBUSY;
1286

1287 1288 1289 1290
	parser.adev = adev;
	parser.filp = filp;

	r = amdgpu_cs_parser_init(&parser, data);
A
Alex Deucher 已提交
1291
	if (r) {
1292
		DRM_ERROR("Failed to initialize parser %d!\n", r);
1293
		goto out;
1294 1295
	}

1296 1297 1298 1299
	r = amdgpu_cs_ib_fill(adev, &parser);
	if (r)
		goto out;

1300 1301 1302 1303 1304 1305
	r = amdgpu_cs_dependencies(adev, &parser);
	if (r) {
		DRM_ERROR("Failed in the dependencies handling %d!\n", r);
		goto out;
	}

1306 1307 1308 1309 1310 1311 1312
	r = amdgpu_cs_parser_bos(&parser, data);
	if (r) {
		if (r == -ENOMEM)
			DRM_ERROR("Not enough memory for command submission!\n");
		else if (r != -ERESTARTSYS)
			DRM_ERROR("Failed to process the buffer list %d!\n", r);
		goto out;
1313 1314
	}

1315
	reserved_buffers = true;
1316

1317
	for (i = 0; i < parser.job->num_ibs; i++)
1318
		trace_amdgpu_cs(&parser, i);
1319

1320
	r = amdgpu_cs_vm_handling(&parser);
1321 1322 1323
	if (r)
		goto out;

C
Christian König 已提交
1324
	r = amdgpu_cs_submit(&parser, cs);
A
Alex Deucher 已提交
1325 1326

out:
1327
	amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
A
Alex Deucher 已提交
1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344
	return r;
}

/**
 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
 *
 * @dev: drm device
 * @data: data from userspace
 * @filp: file private
 *
 * Wait for the command submission identified by handle to finish.
 */
int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
			 struct drm_file *filp)
{
	union drm_amdgpu_wait_cs *wait = data;
	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1345
	struct drm_sched_entity *entity;
1346
	struct amdgpu_ctx *ctx;
1347
	struct dma_fence *fence;
A
Alex Deucher 已提交
1348 1349
	long r;

1350 1351 1352
	ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
	if (ctx == NULL)
		return -EINVAL;
A
Alex Deucher 已提交
1353

1354 1355
	r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
				  wait->in.ring, &entity);
1356 1357 1358 1359 1360
	if (r) {
		amdgpu_ctx_put(ctx);
		return r;
	}

1361
	fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1362 1363 1364
	if (IS_ERR(fence))
		r = PTR_ERR(fence);
	else if (fence) {
1365
		r = dma_fence_wait_timeout(fence, true, timeout);
1366 1367
		if (r > 0 && fence->error)
			r = fence->error;
1368
		dma_fence_put(fence);
1369 1370
	} else
		r = 1;
C
Chunming Zhou 已提交
1371

1372
	amdgpu_ctx_put(ctx);
A
Alex Deucher 已提交
1373 1374 1375 1376 1377 1378 1379 1380 1381
	if (r < 0)
		return r;

	memset(wait, 0, sizeof(*wait));
	wait->out.status = (r == 0);

	return 0;
}

1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392
/**
 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
 *
 * @adev: amdgpu device
 * @filp: file private
 * @user: drm_amdgpu_fence copied from user space
 */
static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
					     struct drm_file *filp,
					     struct drm_amdgpu_fence *user)
{
1393
	struct drm_sched_entity *entity;
1394 1395 1396 1397 1398 1399 1400 1401
	struct amdgpu_ctx *ctx;
	struct dma_fence *fence;
	int r;

	ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
	if (ctx == NULL)
		return ERR_PTR(-EINVAL);

1402 1403
	r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
				  user->ring, &entity);
1404 1405 1406 1407 1408
	if (r) {
		amdgpu_ctx_put(ctx);
		return ERR_PTR(r);
	}

1409
	fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1410 1411 1412 1413 1414
	amdgpu_ctx_put(ctx);

	return fence;
}

1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470
int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *filp)
{
	struct amdgpu_device *adev = dev->dev_private;
	union drm_amdgpu_fence_to_handle *info = data;
	struct dma_fence *fence;
	struct drm_syncobj *syncobj;
	struct sync_file *sync_file;
	int fd, r;

	fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
	if (IS_ERR(fence))
		return PTR_ERR(fence);

	switch (info->in.what) {
	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
		r = drm_syncobj_create(&syncobj, 0, fence);
		dma_fence_put(fence);
		if (r)
			return r;
		r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
		drm_syncobj_put(syncobj);
		return r;

	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
		r = drm_syncobj_create(&syncobj, 0, fence);
		dma_fence_put(fence);
		if (r)
			return r;
		r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
		drm_syncobj_put(syncobj);
		return r;

	case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
		fd = get_unused_fd_flags(O_CLOEXEC);
		if (fd < 0) {
			dma_fence_put(fence);
			return fd;
		}

		sync_file = sync_file_create(fence);
		dma_fence_put(fence);
		if (!sync_file) {
			put_unused_fd(fd);
			return -ENOMEM;
		}

		fd_install(fd, sync_file->file);
		info->out.handle = fd;
		return 0;

	default:
		return -EINVAL;
	}
}

1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498
/**
 * amdgpu_cs_wait_all_fence - wait on all fences to signal
 *
 * @adev: amdgpu device
 * @filp: file private
 * @wait: wait parameters
 * @fences: array of drm_amdgpu_fence
 */
static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
				     struct drm_file *filp,
				     union drm_amdgpu_wait_fences *wait,
				     struct drm_amdgpu_fence *fences)
{
	uint32_t fence_count = wait->in.fence_count;
	unsigned int i;
	long r = 1;

	for (i = 0; i < fence_count; i++) {
		struct dma_fence *fence;
		unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);

		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
		if (IS_ERR(fence))
			return PTR_ERR(fence);
		else if (!fence)
			continue;

		r = dma_fence_wait_timeout(fence, true, timeout);
1499
		dma_fence_put(fence);
1500 1501 1502 1503 1504
		if (r < 0)
			return r;

		if (r == 0)
			break;
1505 1506 1507

		if (fence->error)
			return fence->error;
1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552
	}

	memset(wait, 0, sizeof(*wait));
	wait->out.status = (r > 0);

	return 0;
}

/**
 * amdgpu_cs_wait_any_fence - wait on any fence to signal
 *
 * @adev: amdgpu device
 * @filp: file private
 * @wait: wait parameters
 * @fences: array of drm_amdgpu_fence
 */
static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
				    struct drm_file *filp,
				    union drm_amdgpu_wait_fences *wait,
				    struct drm_amdgpu_fence *fences)
{
	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
	uint32_t fence_count = wait->in.fence_count;
	uint32_t first = ~0;
	struct dma_fence **array;
	unsigned int i;
	long r;

	/* Prepare the fence array */
	array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);

	if (array == NULL)
		return -ENOMEM;

	for (i = 0; i < fence_count; i++) {
		struct dma_fence *fence;

		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
		if (IS_ERR(fence)) {
			r = PTR_ERR(fence);
			goto err_free_fence_array;
		} else if (fence) {
			array[i] = fence;
		} else { /* NULL, the fence has been already signaled */
			r = 1;
M
Monk Liu 已提交
1553
			first = i;
1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
			goto out;
		}
	}

	r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
				       &first);
	if (r < 0)
		goto err_free_fence_array;

out:
	memset(wait, 0, sizeof(*wait));
	wait->out.status = (r > 0);
	wait->out.first_signaled = first;
1567

1568
	if (first < fence_count && array[first])
1569 1570 1571
		r = array[first]->error;
	else
		r = 0;
1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603

err_free_fence_array:
	for (i = 0; i < fence_count; i++)
		dma_fence_put(array[i]);
	kfree(array);

	return r;
}

/**
 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
 *
 * @dev: drm device
 * @data: data from userspace
 * @filp: file private
 */
int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
				struct drm_file *filp)
{
	struct amdgpu_device *adev = dev->dev_private;
	union drm_amdgpu_wait_fences *wait = data;
	uint32_t fence_count = wait->in.fence_count;
	struct drm_amdgpu_fence *fences_user;
	struct drm_amdgpu_fence *fences;
	int r;

	/* Get the fences from userspace */
	fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
			GFP_KERNEL);
	if (fences == NULL)
		return -ENOMEM;

1604
	fences_user = u64_to_user_ptr(wait->in.fences);
1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621
	if (copy_from_user(fences, fences_user,
		sizeof(struct drm_amdgpu_fence) * fence_count)) {
		r = -EFAULT;
		goto err_free_fences;
	}

	if (wait->in.wait_all)
		r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
	else
		r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);

err_free_fences:
	kfree(fences);

	return r;
}

A
Alex Deucher 已提交
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632
/**
 * amdgpu_cs_find_bo_va - find bo_va for VM address
 *
 * @parser: command submission parser context
 * @addr: VM address
 * @bo: resulting BO of the mapping found
 *
 * Search the buffer objects in the command submission context for a certain
 * virtual memory address. Returns allocation structure when found, NULL
 * otherwise.
 */
1633 1634 1635
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
			   uint64_t addr, struct amdgpu_bo **bo,
			   struct amdgpu_bo_va_mapping **map)
A
Alex Deucher 已提交
1636
{
1637
	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1638
	struct ttm_operation_ctx ctx = { false, false };
1639
	struct amdgpu_vm *vm = &fpriv->vm;
A
Alex Deucher 已提交
1640
	struct amdgpu_bo_va_mapping *mapping;
1641 1642
	int r;

A
Alex Deucher 已提交
1643
	addr /= AMDGPU_GPU_PAGE_SIZE;
1644

1645 1646 1647
	mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
	if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
		return -EINVAL;
1648

1649 1650
	*bo = mapping->bo_va->base.bo;
	*map = mapping;
1651

1652 1653 1654
	/* Double check that the BO is reserved by this CS */
	if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
		return -EINVAL;
1655

1656 1657
	if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
		(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1658
		amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1659
		r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1660
		if (r)
1661
			return r;
1662 1663
	}

1664
	return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1665
}