amdgpu_cs.c 40.9 KB
Newer Older
A
Alex Deucher 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Copyright 2008 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 *
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 */
27
#include <linux/pagemap.h>
28
#include <linux/sync_file.h>
A
Alex Deucher 已提交
29 30
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
31
#include <drm/drm_syncobj.h>
A
Alex Deucher 已提交
32 33
#include "amdgpu.h"
#include "amdgpu_trace.h"
34
#include "amdgpu_gmc.h"
35
#include "amdgpu_gem.h"
A
Alex Deucher 已提交
36

37
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
38 39
				      struct drm_amdgpu_cs_chunk_fence *data,
				      uint32_t *offset)
40 41
{
	struct drm_gem_object *gobj;
42
	struct amdgpu_bo *bo;
43
	unsigned long size;
44
	int r;
45

46
	gobj = drm_gem_object_lookup(p->filp, data->handle);
47 48 49
	if (gobj == NULL)
		return -EINVAL;

50
	bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
51
	p->uf_entry.priority = 0;
52
	p->uf_entry.tv.bo = &bo->tbo;
53 54
	/* One for TTM and one for the CS job */
	p->uf_entry.tv.num_shared = 2;
55
	p->uf_entry.user_pages = NULL;
56

57
	drm_gem_object_put_unlocked(gobj);
58

59
	size = amdgpu_bo_size(bo);
60 61 62 63 64
	if (size != PAGE_SIZE || (data->offset + 8) > size) {
		r = -EINVAL;
		goto error_unref;
	}

65
	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
66 67
		r = -EINVAL;
		goto error_unref;
68 69
	}

70 71
	*offset = data->offset;

72
	return 0;
73 74

error_unref:
75
	amdgpu_bo_unref(&bo);
76
	return r;
77 78
}

79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
				      struct drm_amdgpu_bo_list_in *data)
{
	int r;
	struct drm_amdgpu_bo_list_entry *info = NULL;

	r = amdgpu_bo_create_list_entry_array(data, &info);
	if (r)
		return r;

	r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
				  &p->bo_list);
	if (r)
		goto error_free;

	kvfree(info);
	return 0;

error_free:
	if (info)
		kvfree(info);

	return r;
}

static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
A
Alex Deucher 已提交
105
{
106
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
107
	struct amdgpu_vm *vm = &fpriv->vm;
A
Alex Deucher 已提交
108
	uint64_t *chunk_array_user;
109
	uint64_t *chunk_array;
110
	unsigned size, num_ibs = 0;
111
	uint32_t uf_offset = 0;
112
	int i;
113
	int ret;
A
Alex Deucher 已提交
114

115 116 117 118 119 120
	if (cs->in.num_chunks == 0)
		return 0;

	chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
	if (!chunk_array)
		return -ENOMEM;
A
Alex Deucher 已提交
121

122 123
	p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
	if (!p->ctx) {
124 125
		ret = -EINVAL;
		goto free_chunk;
126
	}
127

128 129
	mutex_lock(&p->ctx->lock);

130 131 132 133 134 135
	/* skip guilty context job */
	if (atomic_read(&p->ctx->guilty) == 1) {
		ret = -ECANCELED;
		goto free_chunk;
	}

A
Alex Deucher 已提交
136
	/* get chunks */
137
	chunk_array_user = u64_to_user_ptr(cs->in.chunks);
A
Alex Deucher 已提交
138 139
	if (copy_from_user(chunk_array, chunk_array_user,
			   sizeof(uint64_t)*cs->in.num_chunks)) {
140
		ret = -EFAULT;
141
		goto free_chunk;
A
Alex Deucher 已提交
142 143 144
	}

	p->nchunks = cs->in.num_chunks;
145
	p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
A
Alex Deucher 已提交
146
			    GFP_KERNEL);
147 148
	if (!p->chunks) {
		ret = -ENOMEM;
149
		goto free_chunk;
A
Alex Deucher 已提交
150 151 152 153 154 155 156
	}

	for (i = 0; i < p->nchunks; i++) {
		struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
		struct drm_amdgpu_cs_chunk user_chunk;
		uint32_t __user *cdata;

157
		chunk_ptr = u64_to_user_ptr(chunk_array[i]);
A
Alex Deucher 已提交
158 159
		if (copy_from_user(&user_chunk, chunk_ptr,
				       sizeof(struct drm_amdgpu_cs_chunk))) {
160 161 162
			ret = -EFAULT;
			i--;
			goto free_partial_kdata;
A
Alex Deucher 已提交
163 164 165 166 167
		}
		p->chunks[i].chunk_id = user_chunk.chunk_id;
		p->chunks[i].length_dw = user_chunk.length_dw;

		size = p->chunks[i].length_dw;
168
		cdata = u64_to_user_ptr(user_chunk.chunk_data);
A
Alex Deucher 已提交
169

M
Michal Hocko 已提交
170
		p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
A
Alex Deucher 已提交
171
		if (p->chunks[i].kdata == NULL) {
172 173 174
			ret = -ENOMEM;
			i--;
			goto free_partial_kdata;
A
Alex Deucher 已提交
175 176 177
		}
		size *= sizeof(uint32_t);
		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
178 179
			ret = -EFAULT;
			goto free_partial_kdata;
A
Alex Deucher 已提交
180 181
		}

182 183
		switch (p->chunks[i].chunk_id) {
		case AMDGPU_CHUNK_ID_IB:
184
			++num_ibs;
185 186 187
			break;

		case AMDGPU_CHUNK_ID_FENCE:
A
Alex Deucher 已提交
188
			size = sizeof(struct drm_amdgpu_cs_chunk_fence);
189
			if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
190 191
				ret = -EINVAL;
				goto free_partial_kdata;
A
Alex Deucher 已提交
192
			}
193

194 195
			ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
							 &uf_offset);
196 197 198
			if (ret)
				goto free_partial_kdata;

199 200
			break;

201 202 203 204 205 206 207 208 209 210 211 212 213
		case AMDGPU_CHUNK_ID_BO_HANDLES:
			size = sizeof(struct drm_amdgpu_bo_list_in);
			if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
				ret = -EINVAL;
				goto free_partial_kdata;
			}

			ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
			if (ret)
				goto free_partial_kdata;

			break;

214
		case AMDGPU_CHUNK_ID_DEPENDENCIES:
215 216
		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
217
		case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
218 219
			break;

220
		default:
221 222
			ret = -EINVAL;
			goto free_partial_kdata;
A
Alex Deucher 已提交
223 224 225
		}
	}

226
	ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
227
	if (ret)
C
Christian König 已提交
228
		goto free_all_kdata;
A
Alex Deucher 已提交
229

230 231 232 233
	if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
		ret = -ECANCELED;
		goto free_all_kdata;
	}
234

235
	if (p->uf_entry.tv.bo)
236
		p->job->uf_addr = uf_offset;
A
Alex Deucher 已提交
237
	kfree(chunk_array);
238 239 240 241

	/* Use this opportunity to fill in task info for the vm */
	amdgpu_vm_set_task_info(vm);

242 243 244 245 246 247
	return 0;

free_all_kdata:
	i = p->nchunks - 1;
free_partial_kdata:
	for (; i >= 0; i--)
M
Michal Hocko 已提交
248
		kvfree(p->chunks[i].kdata);
249
	kfree(p->chunks);
250 251
	p->chunks = NULL;
	p->nchunks = 0;
252 253 254 255
free_chunk:
	kfree(chunk_array);

	return ret;
A
Alex Deucher 已提交
256 257
}

258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
/* Convert microseconds to bytes. */
static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
{
	if (us <= 0 || !adev->mm_stats.log2_max_MBps)
		return 0;

	/* Since accum_us is incremented by a million per second, just
	 * multiply it by the number of MB/s to get the number of bytes.
	 */
	return us << adev->mm_stats.log2_max_MBps;
}

static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
{
	if (!adev->mm_stats.log2_max_MBps)
		return 0;

	return bytes >> adev->mm_stats.log2_max_MBps;
}

/* Returns how many bytes TTM can move right now. If no bytes can be moved,
 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
 * which means it can go over the threshold once. If that happens, the driver
 * will be in debt and no other buffer migrations can be done until that debt
 * is repaid.
 *
 * This approach allows moving a buffer of any size (it's important to allow
 * that).
 *
 * The currency is simply time in microseconds and it increases as the clock
 * ticks. The accumulated microseconds (us) are converted to bytes and
 * returned.
A
Alex Deucher 已提交
290
 */
291 292 293
static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
					      u64 *max_bytes,
					      u64 *max_vis_bytes)
A
Alex Deucher 已提交
294
{
295 296
	s64 time_us, increment_us;
	u64 free_vram, total_vram, used_vram;
A
Alex Deucher 已提交
297

298 299
	/* Allow a maximum of 200 accumulated ms. This is basically per-IB
	 * throttling.
A
Alex Deucher 已提交
300
	 *
301 302 303 304 305
	 * It means that in order to get full max MBps, at least 5 IBs per
	 * second must be submitted and not more than 200ms apart from each
	 * other.
	 */
	const s64 us_upper_bound = 200000;
A
Alex Deucher 已提交
306

307 308 309 310 311
	if (!adev->mm_stats.log2_max_MBps) {
		*max_bytes = 0;
		*max_vis_bytes = 0;
		return;
	}
312

313
	total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
314
	used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
315 316 317 318 319 320 321 322 323 324 325 326 327 328
	free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;

	spin_lock(&adev->mm_stats.lock);

	/* Increase the amount of accumulated us. */
	time_us = ktime_to_us(ktime_get());
	increment_us = time_us - adev->mm_stats.last_update_us;
	adev->mm_stats.last_update_us = time_us;
	adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
                                      us_upper_bound);

	/* This prevents the short period of low performance when the VRAM
	 * usage is low and the driver is in debt or doesn't have enough
	 * accumulated us to fill VRAM quickly.
A
Alex Deucher 已提交
329
	 *
330 331 332 333
	 * The situation can occur in these cases:
	 * - a lot of VRAM is freed by userspace
	 * - the presence of a big buffer causes a lot of evictions
	 *   (solution: split buffers into smaller ones)
A
Alex Deucher 已提交
334
	 *
335 336
	 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
	 * accum_us to a positive number.
A
Alex Deucher 已提交
337
	 */
338 339 340 341 342 343 344 345 346 347 348 349 350
	if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
		s64 min_us;

		/* Be more aggresive on dGPUs. Try to fill a portion of free
		 * VRAM now.
		 */
		if (!(adev->flags & AMD_IS_APU))
			min_us = bytes_to_us(adev, free_vram / 4);
		else
			min_us = 0; /* Reset accum_us on APUs. */

		adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
	}
A
Alex Deucher 已提交
351

352
	/* This is set to 0 if the driver is in debt to disallow (optional)
353 354
	 * buffer moves.
	 */
355 356 357
	*max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);

	/* Do the same for visible VRAM if half of it is free */
358
	if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
359
		u64 total_vis_vram = adev->gmc.visible_vram_size;
360 361
		u64 used_vis_vram =
			amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377

		if (used_vis_vram < total_vis_vram) {
			u64 free_vis_vram = total_vis_vram - used_vis_vram;
			adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
							  increment_us, us_upper_bound);

			if (free_vis_vram >= total_vis_vram / 2)
				adev->mm_stats.accum_us_vis =
					max(bytes_to_us(adev, free_vis_vram / 2),
					    adev->mm_stats.accum_us_vis);
		}

		*max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
	} else {
		*max_vis_bytes = 0;
	}
378 379 380 381 382 383 384 385

	spin_unlock(&adev->mm_stats.lock);
}

/* Report how many bytes have really been moved for the last command
 * submission. This can result in a debt that can stop buffer migrations
 * temporarily.
 */
386 387
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
				  u64 num_vis_bytes)
388 389 390
{
	spin_lock(&adev->mm_stats.lock);
	adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
391
	adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
392
	spin_unlock(&adev->mm_stats.lock);
A
Alex Deucher 已提交
393 394
}

395 396 397
static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
				 struct amdgpu_bo *bo)
{
398
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
399 400 401
	struct ttm_operation_ctx ctx = {
		.interruptible = true,
		.no_wait_gpu = false,
402 403
		.resv = bo->tbo.resv,
		.flags = 0
404
	};
405 406 407 408 409 410
	uint32_t domain;
	int r;

	if (bo->pin_count)
		return 0;

411 412
	/* Don't move this buffer if we have depleted our allowance
	 * to move it. Don't move anything if the threshold is zero.
413
	 */
414
	if (p->bytes_moved < p->bytes_moved_threshold) {
415
		if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
416 417 418 419 420 421
		    (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
			/* And don't move a CPU_ACCESS_REQUIRED BO to limited
			 * visible VRAM if we've depleted our allowance to do
			 * that.
			 */
			if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
K
Kent Russell 已提交
422
				domain = bo->preferred_domains;
423 424 425
			else
				domain = bo->allowed_domains;
		} else {
K
Kent Russell 已提交
426
			domain = bo->preferred_domains;
427 428
		}
	} else {
429
		domain = bo->allowed_domains;
430
	}
431 432

retry:
433
	amdgpu_bo_placement_from_domain(bo, domain);
434
	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
435 436

	p->bytes_moved += ctx.bytes_moved;
437
	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
438
	    amdgpu_bo_in_cpu_visible_vram(bo))
439
		p->bytes_moved_vis += ctx.bytes_moved;
440

441 442 443
	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
		domain = bo->allowed_domains;
		goto retry;
444 445 446 447 448
	}

	return r;
}

449 450
/* Last resort, try to evict something from the current working set */
static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
451
				struct amdgpu_bo *validated)
452
{
453
	uint32_t domain = validated->allowed_domains;
454
	struct ttm_operation_ctx ctx = { true, false };
455 456 457 458 459 460 461 462 463
	int r;

	if (!p->evictable)
		return false;

	for (;&p->evictable->tv.head != &p->validated;
	     p->evictable = list_prev_entry(p->evictable, tv.head)) {

		struct amdgpu_bo_list_entry *candidate = p->evictable;
464
		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(candidate->tv.bo);
465
		struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
466
		bool update_bytes_moved_vis;
467 468 469
		uint32_t other;

		/* If we reached our current BO we can forget it */
470
		if (bo == validated)
471 472
			break;

473 474 475 476
		/* We can't move pinned BOs here */
		if (bo->pin_count)
			continue;

477 478 479 480 481 482 483 484 485 486 487 488
		other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);

		/* Check if this BO is in one of the domains we need space for */
		if (!(other & domain))
			continue;

		/* Check if we can move this BO somewhere else */
		other = bo->allowed_domains & ~domain;
		if (!other)
			continue;

		/* Good we can try to move this BO somewhere else */
489
		update_bytes_moved_vis =
490 491
				!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
				amdgpu_bo_in_cpu_visible_vram(bo);
492
		amdgpu_bo_placement_from_domain(bo, other);
493
		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
494
		p->bytes_moved += ctx.bytes_moved;
495
		if (update_bytes_moved_vis)
496
			p->bytes_moved_vis += ctx.bytes_moved;
497 498 499 500 501 502 503 504 505 506 507 508 509

		if (unlikely(r))
			break;

		p->evictable = list_prev_entry(p->evictable, tv.head);
		list_move(&candidate->tv.head, &p->validated);

		return true;
	}

	return false;
}

510 511 512 513 514 515 516 517 518 519 520 521
static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
{
	struct amdgpu_cs_parser *p = param;
	int r;

	do {
		r = amdgpu_cs_bo_validate(p, bo);
	} while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
	if (r)
		return r;

	if (bo->shadow)
A
Alex Xie 已提交
522
		r = amdgpu_cs_bo_validate(p, bo->shadow);
523 524 525 526

	return r;
}

527
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
528
			    struct list_head *validated)
A
Alex Deucher 已提交
529
{
530
	struct ttm_operation_ctx ctx = { true, false };
A
Alex Deucher 已提交
531 532 533
	struct amdgpu_bo_list_entry *lobj;
	int r;

534
	list_for_each_entry(lobj, validated, tv.head) {
535
		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
536
		bool binding_userptr = false;
537
		struct mm_struct *usermm;
A
Alex Deucher 已提交
538

539 540 541 542
		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
		if (usermm && usermm != current->mm)
			return -EPERM;

543
		/* Check if we have user pages and nobody bound the BO already */
544 545
		if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
		    lobj->user_pages) {
546 547
			amdgpu_bo_placement_from_domain(bo,
							AMDGPU_GEM_DOMAIN_CPU);
548
			r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
549 550
			if (r)
				return r;
551 552
			amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
						     lobj->user_pages);
553 554 555
			binding_userptr = true;
		}

556 557 558
		if (p->evictable == lobj)
			p->evictable = NULL;

559
		r = amdgpu_cs_validate(p, bo);
560
		if (r)
561
			return r;
562

563
		if (binding_userptr) {
M
Michal Hocko 已提交
564
			kvfree(lobj->user_pages);
565 566
			lobj->user_pages = NULL;
		}
A
Alex Deucher 已提交
567 568 569 570
	}
	return 0;
}

571 572
static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
				union drm_amdgpu_cs *cs)
A
Alex Deucher 已提交
573 574
{
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
575
	struct amdgpu_vm *vm = &fpriv->vm;
576
	struct amdgpu_bo_list_entry *e;
577
	struct list_head duplicates;
578 579 580
	struct amdgpu_bo *gds;
	struct amdgpu_bo *gws;
	struct amdgpu_bo *oa;
581
	unsigned tries = 10;
582
	int r;
A
Alex Deucher 已提交
583

584 585
	INIT_LIST_HEAD(&p->validated);

586
	/* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
587 588 589
	if (cs->in.bo_list_handle) {
		if (p->bo_list)
			return -EINVAL;
590

591 592 593 594
		r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
				       &p->bo_list);
		if (r)
			return r;
595 596 597 598 599 600
	} else if (!p->bo_list) {
		/* Create a empty bo_list when no handle is provided */
		r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
					  &p->bo_list);
		if (r)
			return r;
601 602
	}

603
	/* One for TTM and one for the CS job */
604
	amdgpu_bo_list_for_each_entry(e, p->bo_list)
605
		e->tv.num_shared = 2;
606

607 608 609
	amdgpu_bo_list_get_list(p->bo_list, &p->validated);
	if (p->bo_list->first_userptr != p->bo_list->num_entries)
		p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
A
Alex Deucher 已提交
610

611
	INIT_LIST_HEAD(&duplicates);
612
	amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
A
Alex Deucher 已提交
613

614
	if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
615 616
		list_add(&p->uf_entry.tv.head, &p->validated);

617 618 619 620 621
	while (1) {
		struct list_head need_pages;

		r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
					   &duplicates);
622
		if (unlikely(r != 0)) {
623 624
			if (r != -ERESTARTSYS)
				DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
625
			goto error_free_pages;
626
		}
627 628

		INIT_LIST_HEAD(&need_pages);
629
		amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
630
			struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
631

632
			if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
633 634 635
				 &e->user_invalidated) && e->user_pages) {

				/* We acquired a page array, but somebody
636
				 * invalidated it. Free it and try again
637 638
				 */
				release_pages(e->user_pages,
639
					      bo->tbo.ttm->num_pages);
M
Michal Hocko 已提交
640
				kvfree(e->user_pages);
641 642 643
				e->user_pages = NULL;
			}

644
			if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
645 646 647 648
			    !e->user_pages) {
				list_del(&e->tv.head);
				list_add(&e->tv.head, &need_pages);

649
				amdgpu_bo_unreserve(bo);
650 651 652 653 654 655 656 657 658
			}
		}

		if (list_empty(&need_pages))
			break;

		/* Unreserve everything again. */
		ttm_eu_backoff_reservation(&p->ticket, &p->validated);

659
		/* We tried too many times, just abort */
660 661
		if (!--tries) {
			r = -EDEADLK;
662
			DRM_ERROR("deadlock in %s\n", __func__);
663 664 665
			goto error_free_pages;
		}

A
Alex Xie 已提交
666
		/* Fill the page arrays for all userptrs. */
667
		list_for_each_entry(e, &need_pages, tv.head) {
668
			struct ttm_tt *ttm = e->tv.bo->ttm;
669

M
Michal Hocko 已提交
670 671 672
			e->user_pages = kvmalloc_array(ttm->num_pages,
							 sizeof(struct page*),
							 GFP_KERNEL | __GFP_ZERO);
673 674
			if (!e->user_pages) {
				r = -ENOMEM;
675
				DRM_ERROR("calloc failure in %s\n", __func__);
676 677 678 679 680
				goto error_free_pages;
			}

			r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
			if (r) {
681
				DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
M
Michal Hocko 已提交
682
				kvfree(e->user_pages);
683 684 685 686 687 688 689 690
				e->user_pages = NULL;
				goto error_free_pages;
			}
		}

		/* And try again. */
		list_splice(&need_pages, &p->validated);
	}
691

692 693
	amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
					  &p->bytes_moved_vis_threshold);
694
	p->bytes_moved = 0;
695
	p->bytes_moved_vis = 0;
696 697 698
	p->evictable = list_last_entry(&p->validated,
				       struct amdgpu_bo_list_entry,
				       tv.head);
699

700 701 702 703 704 705 706
	r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
				      amdgpu_cs_validate, p);
	if (r) {
		DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
		goto error_validate;
	}

707
	r = amdgpu_cs_list_validate(p, &duplicates);
708 709
	if (r) {
		DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
710
		goto error_validate;
711
	}
712

713
	r = amdgpu_cs_list_validate(p, &p->validated);
714 715
	if (r) {
		DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n");
716
		goto error_validate;
717
	}
718

719 720
	amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
				     p->bytes_moved_vis);
721

722 723 724
	gds = p->bo_list->gds_obj;
	gws = p->bo_list->gws_obj;
	oa = p->bo_list->oa_obj;
725

726 727 728 729 730 731 732 733
	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);

		/* Make sure we use the exclusive slot for shared BOs */
		if (bo->prime_shared_count)
			e->tv.num_shared = 0;
		e->bo_va = amdgpu_vm_bo_find(vm, bo);
	}
734

735
	if (gds) {
736 737
		p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
		p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
738 739
	}
	if (gws) {
740 741
		p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
		p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
742 743
	}
	if (oa) {
744 745
		p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
		p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
746
	}
747

748 749
	if (!r && p->uf_entry.tv.bo) {
		struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
750

751
		r = amdgpu_ttm_alloc_gart(&uf->tbo);
752 753
		p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
	}
754

755
error_validate:
756
	if (r)
757
		ttm_eu_backoff_reservation(&p->ticket, &p->validated);
A
Alex Deucher 已提交
758

759 760
error_free_pages:

761 762 763
	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
		if (!e->user_pages)
			continue;
764

765
		release_pages(e->user_pages, e->tv.bo->ttm->num_pages);
766
		kvfree(e->user_pages);
767 768
	}

A
Alex Deucher 已提交
769 770 771 772 773 774 775 776 777
	return r;
}

static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
{
	struct amdgpu_bo_list_entry *e;
	int r;

	list_for_each_entry(e, &p->validated, tv.head) {
778 779 780
		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
		struct reservation_object *resv = bo->tbo.resv;

781
		r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
782
				     amdgpu_bo_explicit_sync(bo));
A
Alex Deucher 已提交
783 784 785 786 787 788 789

		if (r)
			return r;
	}
	return 0;
}

790 791 792 793 794 795 796 797
/**
 * cs_parser_fini() - clean parser states
 * @parser:	parser structure holding parsing context.
 * @error:	error number
 *
 * If error is set than unvalidate buffer, otherwise just free memory
 * used by parsing context.
 **/
798 799
static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
				  bool backoff)
C
Chunming Zhou 已提交
800
{
801 802
	unsigned i;

803
	if (error && backoff)
A
Alex Deucher 已提交
804 805
		ttm_eu_backoff_reservation(&parser->ticket,
					   &parser->validated);
806 807 808 809 810

	for (i = 0; i < parser->num_post_dep_syncobjs; i++)
		drm_syncobj_put(parser->post_dep_syncobjs[i]);
	kfree(parser->post_dep_syncobjs);

811
	dma_fence_put(parser->fence);
812

813 814
	if (parser->ctx) {
		mutex_unlock(&parser->ctx->lock);
815
		amdgpu_ctx_put(parser->ctx);
816
	}
817 818 819
	if (parser->bo_list)
		amdgpu_bo_list_put(parser->bo_list);

A
Alex Deucher 已提交
820
	for (i = 0; i < parser->nchunks; i++)
M
Michal Hocko 已提交
821
		kvfree(parser->chunks[i].kdata);
A
Alex Deucher 已提交
822
	kfree(parser->chunks);
823 824
	if (parser->job)
		amdgpu_job_free(parser->job);
825 826 827 828 829
	if (parser->uf_entry.tv.bo) {
		struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);

		amdgpu_bo_unref(&uf);
	}
A
Alex Deucher 已提交
830 831
}

832
static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
A
Alex Deucher 已提交
833
{
834
	struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
835
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
836
	struct amdgpu_device *adev = p->adev;
837
	struct amdgpu_vm *vm = &fpriv->vm;
838
	struct amdgpu_bo_list_entry *e;
A
Alex Deucher 已提交
839 840
	struct amdgpu_bo_va *bo_va;
	struct amdgpu_bo *bo;
841
	int r;
A
Alex Deucher 已提交
842

843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862
	/* Only for UVD/VCE VM emulation */
	if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
		unsigned i, j;

		for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
			struct drm_amdgpu_cs_chunk_ib *chunk_ib;
			struct amdgpu_bo_va_mapping *m;
			struct amdgpu_bo *aobj = NULL;
			struct amdgpu_cs_chunk *chunk;
			uint64_t offset, va_start;
			struct amdgpu_ib *ib;
			uint8_t *kptr;

			chunk = &p->chunks[i];
			ib = &p->job->ibs[j];
			chunk_ib = chunk->kdata;

			if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
				continue;

863
			va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907
			r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
			if (r) {
				DRM_ERROR("IB va_start is invalid\n");
				return r;
			}

			if ((va_start + chunk_ib->ib_bytes) >
			    (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
				DRM_ERROR("IB va_start+ib_bytes is invalid\n");
				return -EINVAL;
			}

			/* the IB should be reserved at this point */
			r = amdgpu_bo_kmap(aobj, (void **)&kptr);
			if (r) {
				return r;
			}

			offset = m->start * AMDGPU_GPU_PAGE_SIZE;
			kptr += va_start - offset;

			if (ring->funcs->parse_cs) {
				memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
				amdgpu_bo_kunmap(aobj);

				r = amdgpu_ring_parse_cs(ring, p, j);
				if (r)
					return r;
			} else {
				ib->ptr = (uint32_t *)kptr;
				r = amdgpu_ring_patch_cs_in_place(ring, p, j);
				amdgpu_bo_kunmap(aobj);
				if (r)
					return r;
			}

			j++;
		}
	}

	if (!p->job->vm)
		return amdgpu_cs_sync_rings(p);


908
	r = amdgpu_vm_clear_freed(adev, vm, NULL);
A
Alex Deucher 已提交
909 910 911
	if (r)
		return r;

912 913 914 915 916
	r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
	if (r)
		return r;

	r = amdgpu_sync_fence(adev, &p->job->sync,
917
			      fpriv->prt_va->last_pt_update, false);
918 919 920
	if (r)
		return r;

M
Monk Liu 已提交
921 922
	if (amdgpu_sriov_vf(adev)) {
		struct dma_fence *f;
923 924

		bo_va = fpriv->csa_va;
M
Monk Liu 已提交
925 926 927 928 929 930
		BUG_ON(!bo_va);
		r = amdgpu_vm_bo_update(adev, bo_va, false);
		if (r)
			return r;

		f = bo_va->last_pt_update;
931
		r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
M
Monk Liu 已提交
932 933 934 935
		if (r)
			return r;
	}

936 937
	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
		struct dma_fence *f;
A
Alex Deucher 已提交
938

939
		/* ignore duplicates */
940
		bo = ttm_to_amdgpu_bo(e->tv.bo);
941 942
		if (!bo)
			continue;
A
Alex Deucher 已提交
943

944 945 946
		bo_va = e->bo_va;
		if (bo_va == NULL)
			continue;
A
Alex Deucher 已提交
947

948 949 950
		r = amdgpu_vm_bo_update(adev, bo_va, false);
		if (r)
			return r;
951

952 953 954 955
		f = bo_va->last_pt_update;
		r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
		if (r)
			return r;
956 957
	}

958
	r = amdgpu_vm_handle_moved(adev, vm);
959 960 961
	if (r)
		return r;

962 963 964 965
	r = amdgpu_vm_update_directories(adev, vm);
	if (r)
		return r;

966
	r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
967 968
	if (r)
		return r;
969

970
	p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
971

972
	if (amdgpu_vm_debug) {
973
		/* Invalidate all BOs to test for userspace bugs */
974
		amdgpu_bo_list_for_each_entry(e, p->bo_list) {
975
			struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
976

977
			/* ignore duplicates */
978
			if (!bo)
979 980
				continue;

981
			amdgpu_vm_bo_invalidate(adev, bo, false);
A
Alex Deucher 已提交
982
		}
983 984
	}

985
	return amdgpu_cs_sync_rings(p);
A
Alex Deucher 已提交
986 987 988 989 990 991 992
}

static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
			     struct amdgpu_cs_parser *parser)
{
	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
	struct amdgpu_vm *vm = &fpriv->vm;
M
Monk Liu 已提交
993
	int r, ce_preempt = 0, de_preempt = 0;
994 995
	struct amdgpu_ring *ring;
	int i, j;
A
Alex Deucher 已提交
996

997
	for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
A
Alex Deucher 已提交
998 999 1000
		struct amdgpu_cs_chunk *chunk;
		struct amdgpu_ib *ib;
		struct drm_amdgpu_cs_chunk_ib *chunk_ib;
1001
		struct drm_sched_entity *entity;
A
Alex Deucher 已提交
1002 1003

		chunk = &parser->chunks[i];
1004
		ib = &parser->job->ibs[j];
A
Alex Deucher 已提交
1005 1006 1007 1008 1009
		chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;

		if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
			continue;

1010
		if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && amdgpu_sriov_vf(adev)) {
1011
			if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
1012 1013 1014 1015
				if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
					ce_preempt++;
				else
					de_preempt++;
1016
			}
1017 1018 1019

			/* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
			if (ce_preempt > 1 || de_preempt > 1)
1020
				return -EINVAL;
M
Monk Liu 已提交
1021 1022
		}

1023 1024 1025
		r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
					  chunk_ib->ip_instance, chunk_ib->ring,
					  &entity);
1026
		if (r)
A
Alex Deucher 已提交
1027 1028
			return r;

1029 1030 1031
		if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
			parser->job->preamble_status |=
				AMDGPU_PREAMBLE_IB_PRESENT;
1032

1033
		if (parser->entity && parser->entity != entity)
1034 1035
			return -EINVAL;

1036
		parser->entity = entity;
1037

1038 1039 1040
		ring = to_amdgpu_ring(entity->rq->sched);
		r =  amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
				   chunk_ib->ib_bytes : 0, ib);
1041 1042 1043
		if (r) {
			DRM_ERROR("Failed to get ib !\n");
			return r;
A
Alex Deucher 已提交
1044 1045
		}

1046
		ib->gpu_addr = chunk_ib->va_start;
1047
		ib->length_dw = chunk_ib->ib_bytes / 4;
1048
		ib->flags = chunk_ib->flags;
1049

A
Alex Deucher 已提交
1050 1051 1052
		j++;
	}

1053
	/* UVD & VCE fw doesn't support user fences */
1054
	ring = to_amdgpu_ring(parser->entity->rq->sched);
1055
	if (parser->job->uf_addr && (
1056 1057
	    ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
	    ring->funcs->type == AMDGPU_RING_TYPE_VCE))
1058
		return -EINVAL;
A
Alex Deucher 已提交
1059

1060
	return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity);
A
Alex Deucher 已提交
1061 1062
}

1063 1064
static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
				       struct amdgpu_cs_chunk *chunk)
1065
{
1066
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1067 1068 1069
	unsigned num_deps;
	int i, r;
	struct drm_amdgpu_cs_chunk_dep *deps;
1070

1071 1072 1073
	deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
	num_deps = chunk->length_dw * 4 /
		sizeof(struct drm_amdgpu_cs_chunk_dep);
1074

1075 1076
	for (i = 0; i < num_deps; ++i) {
		struct amdgpu_ctx *ctx;
1077
		struct drm_sched_entity *entity;
1078
		struct dma_fence *fence;
1079

1080 1081 1082
		ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
		if (ctx == NULL)
			return -EINVAL;
1083

1084 1085 1086
		r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
					  deps[i].ip_instance,
					  deps[i].ring, &entity);
1087 1088 1089 1090
		if (r) {
			amdgpu_ctx_put(ctx);
			return r;
		}
1091

1092
		fence = amdgpu_ctx_get_fence(ctx, entity,
1093
					     deps[i].handle);
1094 1095 1096 1097 1098 1099 1100 1101 1102

		if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
			struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
			struct dma_fence *old = fence;

			fence = dma_fence_get(&s_fence->scheduled);
			dma_fence_put(old);
		}

1103 1104 1105 1106 1107
		if (IS_ERR(fence)) {
			r = PTR_ERR(fence);
			amdgpu_ctx_put(ctx);
			return r;
		} else if (fence) {
1108 1109
			r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
					true);
1110 1111 1112 1113 1114 1115 1116 1117
			dma_fence_put(fence);
			amdgpu_ctx_put(ctx);
			if (r)
				return r;
		}
	}
	return 0;
}
1118

1119 1120 1121 1122 1123
static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
						 uint32_t handle)
{
	int r;
	struct dma_fence *fence;
1124
	r = drm_syncobj_find_fence(p->filp, handle, 0, 0, &fence);
1125 1126 1127
	if (r)
		return r;

1128
	r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
	dma_fence_put(fence);

	return r;
}

static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
					    struct amdgpu_cs_chunk *chunk)
{
	unsigned num_deps;
	int i, r;
	struct drm_amdgpu_cs_chunk_sem *deps;

	deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
	num_deps = chunk->length_dw * 4 /
		sizeof(struct drm_amdgpu_cs_chunk_sem);

	for (i = 0; i < num_deps; ++i) {
		r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle);
		if (r)
			return r;
	}
	return 0;
}

static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
					     struct amdgpu_cs_chunk *chunk)
{
	unsigned num_deps;
	int i;
	struct drm_amdgpu_cs_chunk_sem *deps;
	deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
	num_deps = chunk->length_dw * 4 /
		sizeof(struct drm_amdgpu_cs_chunk_sem);

	p->post_dep_syncobjs = kmalloc_array(num_deps,
					     sizeof(struct drm_syncobj *),
					     GFP_KERNEL);
	p->num_post_dep_syncobjs = 0;

1168 1169 1170
	if (!p->post_dep_syncobjs)
		return -ENOMEM;

1171 1172 1173 1174 1175 1176 1177 1178 1179
	for (i = 0; i < num_deps; ++i) {
		p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle);
		if (!p->post_dep_syncobjs[i])
			return -EINVAL;
		p->num_post_dep_syncobjs++;
	}
	return 0;
}

1180 1181 1182 1183
static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
				  struct amdgpu_cs_parser *p)
{
	int i, r;
1184

1185 1186
	for (i = 0; i < p->nchunks; ++i) {
		struct amdgpu_cs_chunk *chunk;
1187

1188
		chunk = &p->chunks[i];
1189

1190 1191
		if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES ||
		    chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
1192 1193 1194
			r = amdgpu_cs_process_fence_dep(p, chunk);
			if (r)
				return r;
1195 1196 1197 1198 1199 1200 1201 1202
		} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) {
			r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
			if (r)
				return r;
		} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) {
			r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
			if (r)
				return r;
1203 1204 1205 1206 1207 1208
		}
	}

	return 0;
}

1209 1210 1211 1212
static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
{
	int i;

1213
	for (i = 0; i < p->num_post_dep_syncobjs; ++i)
1214
		drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
1215 1216
}

1217 1218 1219
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
			    union drm_amdgpu_cs *cs)
{
1220
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1221
	struct drm_sched_entity *entity = p->entity;
1222
	enum drm_sched_priority priority;
1223
	struct amdgpu_ring *ring;
1224
	struct amdgpu_bo_list_entry *e;
1225
	struct amdgpu_job *job;
1226 1227
	uint64_t seq;

1228
	int r;
1229

1230 1231 1232 1233 1234 1235 1236 1237
	job = p->job;
	p->job = NULL;

	r = drm_sched_job_init(&job->base, entity, p->filp);
	if (r)
		goto error_unlock;

	/* No memory allocation is allowed while holding the mn lock */
1238
	amdgpu_mn_lock(p->mn);
1239
	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1240
		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1241

1242
		if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
1243 1244
			r = -ERESTARTSYS;
			goto error_abort;
1245 1246 1247
		}
	}

1248
	job->owner = p->filp;
1249
	p->fence = dma_fence_get(&job->base.s_fence->finished);
1250

1251
	amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
1252 1253
	amdgpu_cs_post_dependencies(p);

1254 1255 1256 1257 1258 1259
	if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
	    !p->ctx->preamble_presented) {
		job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
		p->ctx->preamble_presented = true;
	}

1260 1261 1262
	cs->out.handle = seq;
	job->uf_sequence = seq;

1263
	amdgpu_job_free_resources(job);
1264 1265

	trace_amdgpu_cs_ioctl(job);
1266
	amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1267
	priority = job->base.s_priority;
1268
	drm_sched_entity_push_job(&job->base, entity);
1269

1270
	ring = to_amdgpu_ring(entity->rq->sched);
1271 1272
	amdgpu_ring_priority_get(ring, priority);

1273 1274
	amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);

1275 1276 1277
	ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
	amdgpu_mn_unlock(p->mn);

1278
	return 0;
1279 1280

error_abort:
1281
	drm_sched_job_cleanup(&job->base);
1282
	amdgpu_mn_unlock(p->mn);
1283 1284 1285 1286

error_unlock:
	amdgpu_job_free(job);
	return r;
1287 1288
}

C
Chunming Zhou 已提交
1289 1290 1291 1292
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
	struct amdgpu_device *adev = dev->dev_private;
	union drm_amdgpu_cs *cs = data;
1293
	struct amdgpu_cs_parser parser = {};
1294 1295
	bool reserved_buffers = false;
	int i, r;
C
Chunming Zhou 已提交
1296

1297
	if (!adev->accel_working)
C
Chunming Zhou 已提交
1298
		return -EBUSY;
1299

1300 1301 1302 1303
	parser.adev = adev;
	parser.filp = filp;

	r = amdgpu_cs_parser_init(&parser, data);
A
Alex Deucher 已提交
1304
	if (r) {
1305
		DRM_ERROR("Failed to initialize parser %d!\n", r);
1306
		goto out;
1307 1308
	}

1309 1310 1311 1312
	r = amdgpu_cs_ib_fill(adev, &parser);
	if (r)
		goto out;

1313 1314 1315 1316 1317 1318
	r = amdgpu_cs_dependencies(adev, &parser);
	if (r) {
		DRM_ERROR("Failed in the dependencies handling %d!\n", r);
		goto out;
	}

1319 1320 1321 1322 1323 1324 1325
	r = amdgpu_cs_parser_bos(&parser, data);
	if (r) {
		if (r == -ENOMEM)
			DRM_ERROR("Not enough memory for command submission!\n");
		else if (r != -ERESTARTSYS)
			DRM_ERROR("Failed to process the buffer list %d!\n", r);
		goto out;
1326 1327
	}

1328
	reserved_buffers = true;
1329

1330
	for (i = 0; i < parser.job->num_ibs; i++)
1331
		trace_amdgpu_cs(&parser, i);
1332

1333
	r = amdgpu_cs_vm_handling(&parser);
1334 1335 1336
	if (r)
		goto out;

C
Christian König 已提交
1337
	r = amdgpu_cs_submit(&parser, cs);
A
Alex Deucher 已提交
1338 1339

out:
1340
	amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
A
Alex Deucher 已提交
1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357
	return r;
}

/**
 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
 *
 * @dev: drm device
 * @data: data from userspace
 * @filp: file private
 *
 * Wait for the command submission identified by handle to finish.
 */
int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
			 struct drm_file *filp)
{
	union drm_amdgpu_wait_cs *wait = data;
	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1358
	struct drm_sched_entity *entity;
1359
	struct amdgpu_ctx *ctx;
1360
	struct dma_fence *fence;
A
Alex Deucher 已提交
1361 1362
	long r;

1363 1364 1365
	ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
	if (ctx == NULL)
		return -EINVAL;
A
Alex Deucher 已提交
1366

1367 1368
	r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
				  wait->in.ring, &entity);
1369 1370 1371 1372 1373
	if (r) {
		amdgpu_ctx_put(ctx);
		return r;
	}

1374
	fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1375 1376 1377
	if (IS_ERR(fence))
		r = PTR_ERR(fence);
	else if (fence) {
1378
		r = dma_fence_wait_timeout(fence, true, timeout);
1379 1380
		if (r > 0 && fence->error)
			r = fence->error;
1381
		dma_fence_put(fence);
1382 1383
	} else
		r = 1;
C
Chunming Zhou 已提交
1384

1385
	amdgpu_ctx_put(ctx);
A
Alex Deucher 已提交
1386 1387 1388 1389 1390 1391 1392 1393 1394
	if (r < 0)
		return r;

	memset(wait, 0, sizeof(*wait));
	wait->out.status = (r == 0);

	return 0;
}

1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405
/**
 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
 *
 * @adev: amdgpu device
 * @filp: file private
 * @user: drm_amdgpu_fence copied from user space
 */
static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
					     struct drm_file *filp,
					     struct drm_amdgpu_fence *user)
{
1406
	struct drm_sched_entity *entity;
1407 1408 1409 1410 1411 1412 1413 1414
	struct amdgpu_ctx *ctx;
	struct dma_fence *fence;
	int r;

	ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
	if (ctx == NULL)
		return ERR_PTR(-EINVAL);

1415 1416
	r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
				  user->ring, &entity);
1417 1418 1419 1420 1421
	if (r) {
		amdgpu_ctx_put(ctx);
		return ERR_PTR(r);
	}

1422
	fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1423 1424 1425 1426 1427
	amdgpu_ctx_put(ctx);

	return fence;
}

1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441
int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *filp)
{
	struct amdgpu_device *adev = dev->dev_private;
	union drm_amdgpu_fence_to_handle *info = data;
	struct dma_fence *fence;
	struct drm_syncobj *syncobj;
	struct sync_file *sync_file;
	int fd, r;

	fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
	if (IS_ERR(fence))
		return PTR_ERR(fence);

1442 1443 1444
	if (!fence)
		fence = dma_fence_get_stub();

1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
	switch (info->in.what) {
	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
		r = drm_syncobj_create(&syncobj, 0, fence);
		dma_fence_put(fence);
		if (r)
			return r;
		r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
		drm_syncobj_put(syncobj);
		return r;

	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
		r = drm_syncobj_create(&syncobj, 0, fence);
		dma_fence_put(fence);
		if (r)
			return r;
		r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
		drm_syncobj_put(syncobj);
		return r;

	case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
		fd = get_unused_fd_flags(O_CLOEXEC);
		if (fd < 0) {
			dma_fence_put(fence);
			return fd;
		}

		sync_file = sync_file_create(fence);
		dma_fence_put(fence);
		if (!sync_file) {
			put_unused_fd(fd);
			return -ENOMEM;
		}

		fd_install(fd, sync_file->file);
		info->out.handle = fd;
		return 0;

	default:
		return -EINVAL;
	}
}

1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514
/**
 * amdgpu_cs_wait_all_fence - wait on all fences to signal
 *
 * @adev: amdgpu device
 * @filp: file private
 * @wait: wait parameters
 * @fences: array of drm_amdgpu_fence
 */
static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
				     struct drm_file *filp,
				     union drm_amdgpu_wait_fences *wait,
				     struct drm_amdgpu_fence *fences)
{
	uint32_t fence_count = wait->in.fence_count;
	unsigned int i;
	long r = 1;

	for (i = 0; i < fence_count; i++) {
		struct dma_fence *fence;
		unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);

		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
		if (IS_ERR(fence))
			return PTR_ERR(fence);
		else if (!fence)
			continue;

		r = dma_fence_wait_timeout(fence, true, timeout);
1515
		dma_fence_put(fence);
1516 1517 1518 1519 1520
		if (r < 0)
			return r;

		if (r == 0)
			break;
1521 1522 1523

		if (fence->error)
			return fence->error;
1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568
	}

	memset(wait, 0, sizeof(*wait));
	wait->out.status = (r > 0);

	return 0;
}

/**
 * amdgpu_cs_wait_any_fence - wait on any fence to signal
 *
 * @adev: amdgpu device
 * @filp: file private
 * @wait: wait parameters
 * @fences: array of drm_amdgpu_fence
 */
static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
				    struct drm_file *filp,
				    union drm_amdgpu_wait_fences *wait,
				    struct drm_amdgpu_fence *fences)
{
	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
	uint32_t fence_count = wait->in.fence_count;
	uint32_t first = ~0;
	struct dma_fence **array;
	unsigned int i;
	long r;

	/* Prepare the fence array */
	array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);

	if (array == NULL)
		return -ENOMEM;

	for (i = 0; i < fence_count; i++) {
		struct dma_fence *fence;

		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
		if (IS_ERR(fence)) {
			r = PTR_ERR(fence);
			goto err_free_fence_array;
		} else if (fence) {
			array[i] = fence;
		} else { /* NULL, the fence has been already signaled */
			r = 1;
M
Monk Liu 已提交
1569
			first = i;
1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582
			goto out;
		}
	}

	r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
				       &first);
	if (r < 0)
		goto err_free_fence_array;

out:
	memset(wait, 0, sizeof(*wait));
	wait->out.status = (r > 0);
	wait->out.first_signaled = first;
1583

1584
	if (first < fence_count && array[first])
1585 1586 1587
		r = array[first]->error;
	else
		r = 0;
1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619

err_free_fence_array:
	for (i = 0; i < fence_count; i++)
		dma_fence_put(array[i]);
	kfree(array);

	return r;
}

/**
 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
 *
 * @dev: drm device
 * @data: data from userspace
 * @filp: file private
 */
int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
				struct drm_file *filp)
{
	struct amdgpu_device *adev = dev->dev_private;
	union drm_amdgpu_wait_fences *wait = data;
	uint32_t fence_count = wait->in.fence_count;
	struct drm_amdgpu_fence *fences_user;
	struct drm_amdgpu_fence *fences;
	int r;

	/* Get the fences from userspace */
	fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
			GFP_KERNEL);
	if (fences == NULL)
		return -ENOMEM;

1620
	fences_user = u64_to_user_ptr(wait->in.fences);
1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
	if (copy_from_user(fences, fences_user,
		sizeof(struct drm_amdgpu_fence) * fence_count)) {
		r = -EFAULT;
		goto err_free_fences;
	}

	if (wait->in.wait_all)
		r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
	else
		r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);

err_free_fences:
	kfree(fences);

	return r;
}

A
Alex Deucher 已提交
1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648
/**
 * amdgpu_cs_find_bo_va - find bo_va for VM address
 *
 * @parser: command submission parser context
 * @addr: VM address
 * @bo: resulting BO of the mapping found
 *
 * Search the buffer objects in the command submission context for a certain
 * virtual memory address. Returns allocation structure when found, NULL
 * otherwise.
 */
1649 1650 1651
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
			   uint64_t addr, struct amdgpu_bo **bo,
			   struct amdgpu_bo_va_mapping **map)
A
Alex Deucher 已提交
1652
{
1653
	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1654
	struct ttm_operation_ctx ctx = { false, false };
1655
	struct amdgpu_vm *vm = &fpriv->vm;
A
Alex Deucher 已提交
1656
	struct amdgpu_bo_va_mapping *mapping;
1657 1658
	int r;

A
Alex Deucher 已提交
1659
	addr /= AMDGPU_GPU_PAGE_SIZE;
1660

1661 1662 1663
	mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
	if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
		return -EINVAL;
1664

1665 1666
	*bo = mapping->bo_va->base.bo;
	*map = mapping;
1667

1668 1669 1670
	/* Double check that the BO is reserved by this CS */
	if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
		return -EINVAL;
1671

1672 1673
	if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
		(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1674
		amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1675
		r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1676
		if (r)
1677
			return r;
1678 1679
	}

1680
	return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1681
}