amdgpu_cs.c 39.9 KB
Newer Older
A
Alex Deucher 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Copyright 2008 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 *
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 */
27
#include <linux/pagemap.h>
28
#include <linux/sync_file.h>
A
Alex Deucher 已提交
29 30
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
31
#include <drm/drm_syncobj.h>
A
Alex Deucher 已提交
32 33
#include "amdgpu.h"
#include "amdgpu_trace.h"
34
#include "amdgpu_gmc.h"
35
#include "amdgpu_gem.h"
A
Alex Deucher 已提交
36

37
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
38 39
				      struct drm_amdgpu_cs_chunk_fence *data,
				      uint32_t *offset)
40 41
{
	struct drm_gem_object *gobj;
42
	unsigned long size;
43

44
	gobj = drm_gem_object_lookup(p->filp, data->handle);
45 46 47
	if (gobj == NULL)
		return -EINVAL;

48
	p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
49 50 51
	p->uf_entry.priority = 0;
	p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
	p->uf_entry.tv.shared = true;
52
	p->uf_entry.user_pages = NULL;
53 54 55 56 57

	size = amdgpu_bo_size(p->uf_entry.robj);
	if (size != PAGE_SIZE || (data->offset + 8) > size)
		return -EINVAL;

58
	*offset = data->offset;
59

60
	drm_gem_object_put_unlocked(gobj);
61 62 63 64 65 66

	if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
		amdgpu_bo_unref(&p->uf_entry.robj);
		return -EINVAL;
	}

67 68 69
	return 0;
}

70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
				      struct drm_amdgpu_bo_list_in *data)
{
	int r;
	struct drm_amdgpu_bo_list_entry *info = NULL;

	r = amdgpu_bo_create_list_entry_array(data, &info);
	if (r)
		return r;

	r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
				  &p->bo_list);
	if (r)
		goto error_free;

	kvfree(info);
	return 0;

error_free:
	if (info)
		kvfree(info);

	return r;
}

static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
A
Alex Deucher 已提交
96
{
97
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
98
	struct amdgpu_vm *vm = &fpriv->vm;
A
Alex Deucher 已提交
99
	uint64_t *chunk_array_user;
100
	uint64_t *chunk_array;
101
	unsigned size, num_ibs = 0;
102
	uint32_t uf_offset = 0;
103
	int i;
104
	int ret;
A
Alex Deucher 已提交
105

106 107 108 109 110 111
	if (cs->in.num_chunks == 0)
		return 0;

	chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
	if (!chunk_array)
		return -ENOMEM;
A
Alex Deucher 已提交
112

113 114
	p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
	if (!p->ctx) {
115 116
		ret = -EINVAL;
		goto free_chunk;
117
	}
118

119 120 121 122 123 124
	/* skip guilty context job */
	if (atomic_read(&p->ctx->guilty) == 1) {
		ret = -ECANCELED;
		goto free_chunk;
	}

125 126
	mutex_lock(&p->ctx->lock);

A
Alex Deucher 已提交
127
	/* get chunks */
128
	chunk_array_user = u64_to_user_ptr(cs->in.chunks);
A
Alex Deucher 已提交
129 130
	if (copy_from_user(chunk_array, chunk_array_user,
			   sizeof(uint64_t)*cs->in.num_chunks)) {
131
		ret = -EFAULT;
132
		goto free_chunk;
A
Alex Deucher 已提交
133 134 135
	}

	p->nchunks = cs->in.num_chunks;
136
	p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
A
Alex Deucher 已提交
137
			    GFP_KERNEL);
138 139
	if (!p->chunks) {
		ret = -ENOMEM;
140
		goto free_chunk;
A
Alex Deucher 已提交
141 142 143 144 145 146 147
	}

	for (i = 0; i < p->nchunks; i++) {
		struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
		struct drm_amdgpu_cs_chunk user_chunk;
		uint32_t __user *cdata;

148
		chunk_ptr = u64_to_user_ptr(chunk_array[i]);
A
Alex Deucher 已提交
149 150
		if (copy_from_user(&user_chunk, chunk_ptr,
				       sizeof(struct drm_amdgpu_cs_chunk))) {
151 152 153
			ret = -EFAULT;
			i--;
			goto free_partial_kdata;
A
Alex Deucher 已提交
154 155 156 157 158
		}
		p->chunks[i].chunk_id = user_chunk.chunk_id;
		p->chunks[i].length_dw = user_chunk.length_dw;

		size = p->chunks[i].length_dw;
159
		cdata = u64_to_user_ptr(user_chunk.chunk_data);
A
Alex Deucher 已提交
160

M
Michal Hocko 已提交
161
		p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
A
Alex Deucher 已提交
162
		if (p->chunks[i].kdata == NULL) {
163 164 165
			ret = -ENOMEM;
			i--;
			goto free_partial_kdata;
A
Alex Deucher 已提交
166 167 168
		}
		size *= sizeof(uint32_t);
		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
169 170
			ret = -EFAULT;
			goto free_partial_kdata;
A
Alex Deucher 已提交
171 172
		}

173 174
		switch (p->chunks[i].chunk_id) {
		case AMDGPU_CHUNK_ID_IB:
175
			++num_ibs;
176 177 178
			break;

		case AMDGPU_CHUNK_ID_FENCE:
A
Alex Deucher 已提交
179
			size = sizeof(struct drm_amdgpu_cs_chunk_fence);
180
			if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
181 182
				ret = -EINVAL;
				goto free_partial_kdata;
A
Alex Deucher 已提交
183
			}
184

185 186
			ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
							 &uf_offset);
187 188 189
			if (ret)
				goto free_partial_kdata;

190 191
			break;

192 193 194 195 196 197 198 199 200 201 202 203 204
		case AMDGPU_CHUNK_ID_BO_HANDLES:
			size = sizeof(struct drm_amdgpu_bo_list_in);
			if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
				ret = -EINVAL;
				goto free_partial_kdata;
			}

			ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
			if (ret)
				goto free_partial_kdata;

			break;

205
		case AMDGPU_CHUNK_ID_DEPENDENCIES:
206 207
		case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
		case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
208 209
			break;

210
		default:
211 212
			ret = -EINVAL;
			goto free_partial_kdata;
A
Alex Deucher 已提交
213 214 215
		}
	}

216
	ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
217
	if (ret)
C
Christian König 已提交
218
		goto free_all_kdata;
A
Alex Deucher 已提交
219

220 221 222 223
	if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
		ret = -ECANCELED;
		goto free_all_kdata;
	}
224

225 226
	if (p->uf_entry.robj)
		p->job->uf_addr = uf_offset;
A
Alex Deucher 已提交
227
	kfree(chunk_array);
228 229 230 231

	/* Use this opportunity to fill in task info for the vm */
	amdgpu_vm_set_task_info(vm);

232 233 234 235 236 237
	return 0;

free_all_kdata:
	i = p->nchunks - 1;
free_partial_kdata:
	for (; i >= 0; i--)
M
Michal Hocko 已提交
238
		kvfree(p->chunks[i].kdata);
239
	kfree(p->chunks);
240 241
	p->chunks = NULL;
	p->nchunks = 0;
242 243 244 245
free_chunk:
	kfree(chunk_array);

	return ret;
A
Alex Deucher 已提交
246 247
}

248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
/* Convert microseconds to bytes. */
static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
{
	if (us <= 0 || !adev->mm_stats.log2_max_MBps)
		return 0;

	/* Since accum_us is incremented by a million per second, just
	 * multiply it by the number of MB/s to get the number of bytes.
	 */
	return us << adev->mm_stats.log2_max_MBps;
}

static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
{
	if (!adev->mm_stats.log2_max_MBps)
		return 0;

	return bytes >> adev->mm_stats.log2_max_MBps;
}

/* Returns how many bytes TTM can move right now. If no bytes can be moved,
 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
 * which means it can go over the threshold once. If that happens, the driver
 * will be in debt and no other buffer migrations can be done until that debt
 * is repaid.
 *
 * This approach allows moving a buffer of any size (it's important to allow
 * that).
 *
 * The currency is simply time in microseconds and it increases as the clock
 * ticks. The accumulated microseconds (us) are converted to bytes and
 * returned.
A
Alex Deucher 已提交
280
 */
281 282 283
static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
					      u64 *max_bytes,
					      u64 *max_vis_bytes)
A
Alex Deucher 已提交
284
{
285 286
	s64 time_us, increment_us;
	u64 free_vram, total_vram, used_vram;
A
Alex Deucher 已提交
287

288 289
	/* Allow a maximum of 200 accumulated ms. This is basically per-IB
	 * throttling.
A
Alex Deucher 已提交
290
	 *
291 292 293 294 295
	 * It means that in order to get full max MBps, at least 5 IBs per
	 * second must be submitted and not more than 200ms apart from each
	 * other.
	 */
	const s64 us_upper_bound = 200000;
A
Alex Deucher 已提交
296

297 298 299 300 301
	if (!adev->mm_stats.log2_max_MBps) {
		*max_bytes = 0;
		*max_vis_bytes = 0;
		return;
	}
302

303
	total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
304
	used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
305 306 307 308 309 310 311 312 313 314 315 316 317 318
	free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;

	spin_lock(&adev->mm_stats.lock);

	/* Increase the amount of accumulated us. */
	time_us = ktime_to_us(ktime_get());
	increment_us = time_us - adev->mm_stats.last_update_us;
	adev->mm_stats.last_update_us = time_us;
	adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
                                      us_upper_bound);

	/* This prevents the short period of low performance when the VRAM
	 * usage is low and the driver is in debt or doesn't have enough
	 * accumulated us to fill VRAM quickly.
A
Alex Deucher 已提交
319
	 *
320 321 322 323
	 * The situation can occur in these cases:
	 * - a lot of VRAM is freed by userspace
	 * - the presence of a big buffer causes a lot of evictions
	 *   (solution: split buffers into smaller ones)
A
Alex Deucher 已提交
324
	 *
325 326
	 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
	 * accum_us to a positive number.
A
Alex Deucher 已提交
327
	 */
328 329 330 331 332 333 334 335 336 337 338 339 340
	if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
		s64 min_us;

		/* Be more aggresive on dGPUs. Try to fill a portion of free
		 * VRAM now.
		 */
		if (!(adev->flags & AMD_IS_APU))
			min_us = bytes_to_us(adev, free_vram / 4);
		else
			min_us = 0; /* Reset accum_us on APUs. */

		adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
	}
A
Alex Deucher 已提交
341

342
	/* This is set to 0 if the driver is in debt to disallow (optional)
343 344
	 * buffer moves.
	 */
345 346 347
	*max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);

	/* Do the same for visible VRAM if half of it is free */
348
	if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
349
		u64 total_vis_vram = adev->gmc.visible_vram_size;
350 351
		u64 used_vis_vram =
			amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367

		if (used_vis_vram < total_vis_vram) {
			u64 free_vis_vram = total_vis_vram - used_vis_vram;
			adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
							  increment_us, us_upper_bound);

			if (free_vis_vram >= total_vis_vram / 2)
				adev->mm_stats.accum_us_vis =
					max(bytes_to_us(adev, free_vis_vram / 2),
					    adev->mm_stats.accum_us_vis);
		}

		*max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
	} else {
		*max_vis_bytes = 0;
	}
368 369 370 371 372 373 374 375

	spin_unlock(&adev->mm_stats.lock);
}

/* Report how many bytes have really been moved for the last command
 * submission. This can result in a debt that can stop buffer migrations
 * temporarily.
 */
376 377
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
				  u64 num_vis_bytes)
378 379 380
{
	spin_lock(&adev->mm_stats.lock);
	adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
381
	adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
382
	spin_unlock(&adev->mm_stats.lock);
A
Alex Deucher 已提交
383 384
}

385 386 387
static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
				 struct amdgpu_bo *bo)
{
388
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
389 390 391
	struct ttm_operation_ctx ctx = {
		.interruptible = true,
		.no_wait_gpu = false,
392 393
		.resv = bo->tbo.resv,
		.flags = 0
394
	};
395 396 397 398 399 400
	uint32_t domain;
	int r;

	if (bo->pin_count)
		return 0;

401 402
	/* Don't move this buffer if we have depleted our allowance
	 * to move it. Don't move anything if the threshold is zero.
403
	 */
404
	if (p->bytes_moved < p->bytes_moved_threshold) {
405
		if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
406 407 408 409 410 411
		    (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
			/* And don't move a CPU_ACCESS_REQUIRED BO to limited
			 * visible VRAM if we've depleted our allowance to do
			 * that.
			 */
			if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
K
Kent Russell 已提交
412
				domain = bo->preferred_domains;
413 414 415
			else
				domain = bo->allowed_domains;
		} else {
K
Kent Russell 已提交
416
			domain = bo->preferred_domains;
417 418
		}
	} else {
419
		domain = bo->allowed_domains;
420
	}
421 422

retry:
423
	amdgpu_bo_placement_from_domain(bo, domain);
424
	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
425 426

	p->bytes_moved += ctx.bytes_moved;
427
	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
428
	    amdgpu_bo_in_cpu_visible_vram(bo))
429
		p->bytes_moved_vis += ctx.bytes_moved;
430

431 432 433
	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
		domain = bo->allowed_domains;
		goto retry;
434 435 436 437 438
	}

	return r;
}

439 440
/* Last resort, try to evict something from the current working set */
static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
441
				struct amdgpu_bo *validated)
442
{
443
	uint32_t domain = validated->allowed_domains;
444
	struct ttm_operation_ctx ctx = { true, false };
445 446 447 448 449 450 451 452 453 454
	int r;

	if (!p->evictable)
		return false;

	for (;&p->evictable->tv.head != &p->validated;
	     p->evictable = list_prev_entry(p->evictable, tv.head)) {

		struct amdgpu_bo_list_entry *candidate = p->evictable;
		struct amdgpu_bo *bo = candidate->robj;
455
		struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
456
		bool update_bytes_moved_vis;
457 458 459
		uint32_t other;

		/* If we reached our current BO we can forget it */
460
		if (candidate->robj == validated)
461 462
			break;

463 464 465 466
		/* We can't move pinned BOs here */
		if (bo->pin_count)
			continue;

467 468 469 470 471 472 473 474 475 476 477 478
		other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);

		/* Check if this BO is in one of the domains we need space for */
		if (!(other & domain))
			continue;

		/* Check if we can move this BO somewhere else */
		other = bo->allowed_domains & ~domain;
		if (!other)
			continue;

		/* Good we can try to move this BO somewhere else */
479
		update_bytes_moved_vis =
480 481
				!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
				amdgpu_bo_in_cpu_visible_vram(bo);
482
		amdgpu_bo_placement_from_domain(bo, other);
483
		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
484
		p->bytes_moved += ctx.bytes_moved;
485
		if (update_bytes_moved_vis)
486
			p->bytes_moved_vis += ctx.bytes_moved;
487 488 489 490 491 492 493 494 495 496 497 498 499

		if (unlikely(r))
			break;

		p->evictable = list_prev_entry(p->evictable, tv.head);
		list_move(&candidate->tv.head, &p->validated);

		return true;
	}

	return false;
}

500 501 502 503 504 505 506 507 508 509 510 511
static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
{
	struct amdgpu_cs_parser *p = param;
	int r;

	do {
		r = amdgpu_cs_bo_validate(p, bo);
	} while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
	if (r)
		return r;

	if (bo->shadow)
A
Alex Xie 已提交
512
		r = amdgpu_cs_bo_validate(p, bo->shadow);
513 514 515 516

	return r;
}

517
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
518
			    struct list_head *validated)
A
Alex Deucher 已提交
519
{
520
	struct ttm_operation_ctx ctx = { true, false };
A
Alex Deucher 已提交
521 522 523
	struct amdgpu_bo_list_entry *lobj;
	int r;

524
	list_for_each_entry(lobj, validated, tv.head) {
525
		struct amdgpu_bo *bo = lobj->robj;
526
		bool binding_userptr = false;
527
		struct mm_struct *usermm;
A
Alex Deucher 已提交
528

529 530 531 532
		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
		if (usermm && usermm != current->mm)
			return -EPERM;

533
		/* Check if we have user pages and nobody bound the BO already */
534 535
		if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
		    lobj->user_pages) {
536 537
			amdgpu_bo_placement_from_domain(bo,
							AMDGPU_GEM_DOMAIN_CPU);
538
			r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
539 540
			if (r)
				return r;
541 542
			amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
						     lobj->user_pages);
543 544 545
			binding_userptr = true;
		}

546 547 548
		if (p->evictable == lobj)
			p->evictable = NULL;

549
		r = amdgpu_cs_validate(p, bo);
550
		if (r)
551
			return r;
552

553
		if (binding_userptr) {
M
Michal Hocko 已提交
554
			kvfree(lobj->user_pages);
555 556
			lobj->user_pages = NULL;
		}
A
Alex Deucher 已提交
557 558 559 560
	}
	return 0;
}

561 562
static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
				union drm_amdgpu_cs *cs)
A
Alex Deucher 已提交
563 564
{
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
565
	struct amdgpu_vm *vm = &fpriv->vm;
566
	struct amdgpu_bo_list_entry *e;
567
	struct list_head duplicates;
568 569 570
	struct amdgpu_bo *gds;
	struct amdgpu_bo *gws;
	struct amdgpu_bo *oa;
571
	unsigned tries = 10;
572
	int r;
A
Alex Deucher 已提交
573

574 575
	INIT_LIST_HEAD(&p->validated);

576
	/* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
577 578 579
	if (cs->in.bo_list_handle) {
		if (p->bo_list)
			return -EINVAL;
580

581 582 583 584
		r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
				       &p->bo_list);
		if (r)
			return r;
585 586 587 588 589 590
	} else if (!p->bo_list) {
		/* Create a empty bo_list when no handle is provided */
		r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
					  &p->bo_list);
		if (r)
			return r;
591 592
	}

593 594 595
	amdgpu_bo_list_get_list(p->bo_list, &p->validated);
	if (p->bo_list->first_userptr != p->bo_list->num_entries)
		p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
A
Alex Deucher 已提交
596

597
	INIT_LIST_HEAD(&duplicates);
598
	amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
A
Alex Deucher 已提交
599

600
	if (p->uf_entry.robj && !p->uf_entry.robj->parent)
601 602
		list_add(&p->uf_entry.tv.head, &p->validated);

603 604 605 606 607
	while (1) {
		struct list_head need_pages;

		r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
					   &duplicates);
608
		if (unlikely(r != 0)) {
609 610
			if (r != -ERESTARTSYS)
				DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
611
			goto error_free_pages;
612
		}
613 614

		INIT_LIST_HEAD(&need_pages);
615 616
		amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
			struct amdgpu_bo *bo = e->robj;
617

618
			if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
619 620 621
				 &e->user_invalidated) && e->user_pages) {

				/* We acquired a page array, but somebody
622
				 * invalidated it. Free it and try again
623 624
				 */
				release_pages(e->user_pages,
625
					      bo->tbo.ttm->num_pages);
M
Michal Hocko 已提交
626
				kvfree(e->user_pages);
627 628 629
				e->user_pages = NULL;
			}

630
			if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
631 632 633 634 635 636 637 638 639 640 641 642 643 644
			    !e->user_pages) {
				list_del(&e->tv.head);
				list_add(&e->tv.head, &need_pages);

				amdgpu_bo_unreserve(e->robj);
			}
		}

		if (list_empty(&need_pages))
			break;

		/* Unreserve everything again. */
		ttm_eu_backoff_reservation(&p->ticket, &p->validated);

645
		/* We tried too many times, just abort */
646 647
		if (!--tries) {
			r = -EDEADLK;
648
			DRM_ERROR("deadlock in %s\n", __func__);
649 650 651
			goto error_free_pages;
		}

A
Alex Xie 已提交
652
		/* Fill the page arrays for all userptrs. */
653 654 655
		list_for_each_entry(e, &need_pages, tv.head) {
			struct ttm_tt *ttm = e->robj->tbo.ttm;

M
Michal Hocko 已提交
656 657 658
			e->user_pages = kvmalloc_array(ttm->num_pages,
							 sizeof(struct page*),
							 GFP_KERNEL | __GFP_ZERO);
659 660
			if (!e->user_pages) {
				r = -ENOMEM;
661
				DRM_ERROR("calloc failure in %s\n", __func__);
662 663 664 665 666
				goto error_free_pages;
			}

			r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
			if (r) {
667
				DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
M
Michal Hocko 已提交
668
				kvfree(e->user_pages);
669 670 671 672 673 674 675 676
				e->user_pages = NULL;
				goto error_free_pages;
			}
		}

		/* And try again. */
		list_splice(&need_pages, &p->validated);
	}
677

678 679
	amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
					  &p->bytes_moved_vis_threshold);
680
	p->bytes_moved = 0;
681
	p->bytes_moved_vis = 0;
682 683 684
	p->evictable = list_last_entry(&p->validated,
				       struct amdgpu_bo_list_entry,
				       tv.head);
685

686 687 688 689 690 691 692
	r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
				      amdgpu_cs_validate, p);
	if (r) {
		DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
		goto error_validate;
	}

693
	r = amdgpu_cs_list_validate(p, &duplicates);
694 695
	if (r) {
		DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
696
		goto error_validate;
697
	}
698

699
	r = amdgpu_cs_list_validate(p, &p->validated);
700 701
	if (r) {
		DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n");
702
		goto error_validate;
703
	}
704

705 706
	amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
				     p->bytes_moved_vis);
707

708 709 710
	gds = p->bo_list->gds_obj;
	gws = p->bo_list->gws_obj;
	oa = p->bo_list->oa_obj;
711

712 713
	amdgpu_bo_list_for_each_entry(e, p->bo_list)
		e->bo_va = amdgpu_vm_bo_find(vm, e->robj);
714

715 716 717 718 719 720 721 722 723 724 725
	if (gds) {
		p->job->gds_base = amdgpu_bo_gpu_offset(gds);
		p->job->gds_size = amdgpu_bo_size(gds);
	}
	if (gws) {
		p->job->gws_base = amdgpu_bo_gpu_offset(gws);
		p->job->gws_size = amdgpu_bo_size(gws);
	}
	if (oa) {
		p->job->oa_base = amdgpu_bo_gpu_offset(oa);
		p->job->oa_size = amdgpu_bo_size(oa);
726
	}
727

728 729 730
	if (!r && p->uf_entry.robj) {
		struct amdgpu_bo *uf = p->uf_entry.robj;

731
		r = amdgpu_ttm_alloc_gart(&uf->tbo);
732 733
		p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
	}
734

735
error_validate:
736
	if (r)
737
		ttm_eu_backoff_reservation(&p->ticket, &p->validated);
A
Alex Deucher 已提交
738

739 740
error_free_pages:

741 742 743
	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
		if (!e->user_pages)
			continue;
744

745 746 747
		release_pages(e->user_pages,
			      e->robj->tbo.ttm->num_pages);
		kvfree(e->user_pages);
748 749
	}

A
Alex Deucher 已提交
750 751 752 753 754 755 756 757 758 759
	return r;
}

static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
{
	struct amdgpu_bo_list_entry *e;
	int r;

	list_for_each_entry(e, &p->validated, tv.head) {
		struct reservation_object *resv = e->robj->tbo.resv;
760 761
		r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
				     amdgpu_bo_explicit_sync(e->robj));
A
Alex Deucher 已提交
762 763 764 765 766 767 768

		if (r)
			return r;
	}
	return 0;
}

769 770 771 772 773 774 775 776
/**
 * cs_parser_fini() - clean parser states
 * @parser:	parser structure holding parsing context.
 * @error:	error number
 *
 * If error is set than unvalidate buffer, otherwise just free memory
 * used by parsing context.
 **/
777 778
static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
				  bool backoff)
C
Chunming Zhou 已提交
779
{
780 781
	unsigned i;

782
	if (error && backoff)
A
Alex Deucher 已提交
783 784
		ttm_eu_backoff_reservation(&parser->ticket,
					   &parser->validated);
785 786 787 788 789

	for (i = 0; i < parser->num_post_dep_syncobjs; i++)
		drm_syncobj_put(parser->post_dep_syncobjs[i]);
	kfree(parser->post_dep_syncobjs);

790
	dma_fence_put(parser->fence);
791

792 793
	if (parser->ctx) {
		mutex_unlock(&parser->ctx->lock);
794
		amdgpu_ctx_put(parser->ctx);
795
	}
796 797 798
	if (parser->bo_list)
		amdgpu_bo_list_put(parser->bo_list);

A
Alex Deucher 已提交
799
	for (i = 0; i < parser->nchunks; i++)
M
Michal Hocko 已提交
800
		kvfree(parser->chunks[i].kdata);
A
Alex Deucher 已提交
801
	kfree(parser->chunks);
802 803
	if (parser->job)
		amdgpu_job_free(parser->job);
804
	amdgpu_bo_unref(&parser->uf_entry.robj);
A
Alex Deucher 已提交
805 806
}

807
static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
A
Alex Deucher 已提交
808
{
809
	struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
810
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
811
	struct amdgpu_device *adev = p->adev;
812
	struct amdgpu_vm *vm = &fpriv->vm;
813
	struct amdgpu_bo_list_entry *e;
A
Alex Deucher 已提交
814 815
	struct amdgpu_bo_va *bo_va;
	struct amdgpu_bo *bo;
816
	int r;
A
Alex Deucher 已提交
817

818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882
	/* Only for UVD/VCE VM emulation */
	if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
		unsigned i, j;

		for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
			struct drm_amdgpu_cs_chunk_ib *chunk_ib;
			struct amdgpu_bo_va_mapping *m;
			struct amdgpu_bo *aobj = NULL;
			struct amdgpu_cs_chunk *chunk;
			uint64_t offset, va_start;
			struct amdgpu_ib *ib;
			uint8_t *kptr;

			chunk = &p->chunks[i];
			ib = &p->job->ibs[j];
			chunk_ib = chunk->kdata;

			if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
				continue;

			va_start = chunk_ib->va_start & AMDGPU_VA_HOLE_MASK;
			r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
			if (r) {
				DRM_ERROR("IB va_start is invalid\n");
				return r;
			}

			if ((va_start + chunk_ib->ib_bytes) >
			    (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
				DRM_ERROR("IB va_start+ib_bytes is invalid\n");
				return -EINVAL;
			}

			/* the IB should be reserved at this point */
			r = amdgpu_bo_kmap(aobj, (void **)&kptr);
			if (r) {
				return r;
			}

			offset = m->start * AMDGPU_GPU_PAGE_SIZE;
			kptr += va_start - offset;

			if (ring->funcs->parse_cs) {
				memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
				amdgpu_bo_kunmap(aobj);

				r = amdgpu_ring_parse_cs(ring, p, j);
				if (r)
					return r;
			} else {
				ib->ptr = (uint32_t *)kptr;
				r = amdgpu_ring_patch_cs_in_place(ring, p, j);
				amdgpu_bo_kunmap(aobj);
				if (r)
					return r;
			}

			j++;
		}
	}

	if (!p->job->vm)
		return amdgpu_cs_sync_rings(p);


883
	r = amdgpu_vm_clear_freed(adev, vm, NULL);
A
Alex Deucher 已提交
884 885 886
	if (r)
		return r;

887 888 889 890 891
	r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
	if (r)
		return r;

	r = amdgpu_sync_fence(adev, &p->job->sync,
892
			      fpriv->prt_va->last_pt_update, false);
893 894 895
	if (r)
		return r;

M
Monk Liu 已提交
896 897
	if (amdgpu_sriov_vf(adev)) {
		struct dma_fence *f;
898 899

		bo_va = fpriv->csa_va;
M
Monk Liu 已提交
900 901 902 903 904 905
		BUG_ON(!bo_va);
		r = amdgpu_vm_bo_update(adev, bo_va, false);
		if (r)
			return r;

		f = bo_va->last_pt_update;
906
		r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
M
Monk Liu 已提交
907 908 909 910
		if (r)
			return r;
	}

911 912
	amdgpu_bo_list_for_each_entry(e, p->bo_list) {
		struct dma_fence *f;
A
Alex Deucher 已提交
913

914 915 916 917
		/* ignore duplicates */
		bo = e->robj;
		if (!bo)
			continue;
A
Alex Deucher 已提交
918

919 920 921
		bo_va = e->bo_va;
		if (bo_va == NULL)
			continue;
A
Alex Deucher 已提交
922

923 924 925
		r = amdgpu_vm_bo_update(adev, bo_va, false);
		if (r)
			return r;
926

927 928 929 930
		f = bo_va->last_pt_update;
		r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
		if (r)
			return r;
931 932
	}

933
	r = amdgpu_vm_handle_moved(adev, vm);
934 935 936
	if (r)
		return r;

937 938 939 940
	r = amdgpu_vm_update_directories(adev, vm);
	if (r)
		return r;

941
	r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
942 943
	if (r)
		return r;
944

945 946 947 948
	r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
	if (r)
		return r;

949
	p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
950

951
	if (amdgpu_vm_debug) {
952
		/* Invalidate all BOs to test for userspace bugs */
953
		amdgpu_bo_list_for_each_entry(e, p->bo_list) {
954
			/* ignore duplicates */
955
			if (!e->robj)
956 957
				continue;

958
			amdgpu_vm_bo_invalidate(adev, e->robj, false);
959
		}
A
Alex Deucher 已提交
960 961
	}

962
	return amdgpu_cs_sync_rings(p);
A
Alex Deucher 已提交
963 964 965 966 967 968 969
}

static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
			     struct amdgpu_cs_parser *parser)
{
	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
	struct amdgpu_vm *vm = &fpriv->vm;
M
Monk Liu 已提交
970
	int r, ce_preempt = 0, de_preempt = 0;
971 972
	struct amdgpu_ring *ring;
	int i, j;
A
Alex Deucher 已提交
973

974
	for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
A
Alex Deucher 已提交
975 976 977
		struct amdgpu_cs_chunk *chunk;
		struct amdgpu_ib *ib;
		struct drm_amdgpu_cs_chunk_ib *chunk_ib;
978
		struct drm_sched_entity *entity;
A
Alex Deucher 已提交
979 980

		chunk = &parser->chunks[i];
981
		ib = &parser->job->ibs[j];
A
Alex Deucher 已提交
982 983 984 985 986
		chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;

		if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
			continue;

987
		if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && amdgpu_sriov_vf(adev)) {
988
			if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
989 990 991 992
				if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
					ce_preempt++;
				else
					de_preempt++;
993
			}
994 995 996

			/* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
			if (ce_preempt > 1 || de_preempt > 1)
997
				return -EINVAL;
M
Monk Liu 已提交
998 999
		}

1000 1001 1002
		r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
					  chunk_ib->ip_instance, chunk_ib->ring,
					  &entity);
1003
		if (r)
A
Alex Deucher 已提交
1004 1005
			return r;

1006 1007 1008
		if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
			parser->job->preamble_status |=
				AMDGPU_PREAMBLE_IB_PRESENT;
1009

1010
		if (parser->entity && parser->entity != entity)
1011 1012
			return -EINVAL;

1013
		parser->entity = entity;
1014

1015 1016 1017
		ring = to_amdgpu_ring(entity->rq->sched);
		r =  amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
				   chunk_ib->ib_bytes : 0, ib);
1018 1019 1020
		if (r) {
			DRM_ERROR("Failed to get ib !\n");
			return r;
A
Alex Deucher 已提交
1021 1022
		}

1023
		ib->gpu_addr = chunk_ib->va_start;
1024
		ib->length_dw = chunk_ib->ib_bytes / 4;
1025
		ib->flags = chunk_ib->flags;
1026

A
Alex Deucher 已提交
1027 1028 1029
		j++;
	}

1030
	/* UVD & VCE fw doesn't support user fences */
1031
	ring = to_amdgpu_ring(parser->entity->rq->sched);
1032
	if (parser->job->uf_addr && (
1033 1034
	    ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
	    ring->funcs->type == AMDGPU_RING_TYPE_VCE))
1035
		return -EINVAL;
A
Alex Deucher 已提交
1036

1037
	return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity);
A
Alex Deucher 已提交
1038 1039
}

1040 1041
static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
				       struct amdgpu_cs_chunk *chunk)
1042
{
1043
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1044 1045 1046
	unsigned num_deps;
	int i, r;
	struct drm_amdgpu_cs_chunk_dep *deps;
1047

1048 1049 1050
	deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
	num_deps = chunk->length_dw * 4 /
		sizeof(struct drm_amdgpu_cs_chunk_dep);
1051

1052 1053
	for (i = 0; i < num_deps; ++i) {
		struct amdgpu_ctx *ctx;
1054
		struct drm_sched_entity *entity;
1055
		struct dma_fence *fence;
1056

1057 1058 1059
		ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
		if (ctx == NULL)
			return -EINVAL;
1060

1061 1062 1063
		r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
					  deps[i].ip_instance,
					  deps[i].ring, &entity);
1064 1065 1066 1067
		if (r) {
			amdgpu_ctx_put(ctx);
			return r;
		}
1068

1069
		fence = amdgpu_ctx_get_fence(ctx, entity,
1070 1071 1072 1073 1074 1075
					     deps[i].handle);
		if (IS_ERR(fence)) {
			r = PTR_ERR(fence);
			amdgpu_ctx_put(ctx);
			return r;
		} else if (fence) {
1076 1077
			r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
					true);
1078 1079 1080 1081 1082 1083 1084 1085
			dma_fence_put(fence);
			amdgpu_ctx_put(ctx);
			if (r)
				return r;
		}
	}
	return 0;
}
1086

1087 1088 1089 1090 1091
static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
						 uint32_t handle)
{
	int r;
	struct dma_fence *fence;
1092
	r = drm_syncobj_find_fence(p->filp, handle, &fence);
1093 1094 1095
	if (r)
		return r;

1096
	r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
	dma_fence_put(fence);

	return r;
}

static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
					    struct amdgpu_cs_chunk *chunk)
{
	unsigned num_deps;
	int i, r;
	struct drm_amdgpu_cs_chunk_sem *deps;

	deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
	num_deps = chunk->length_dw * 4 /
		sizeof(struct drm_amdgpu_cs_chunk_sem);

	for (i = 0; i < num_deps; ++i) {
		r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle);
		if (r)
			return r;
	}
	return 0;
}

static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
					     struct amdgpu_cs_chunk *chunk)
{
	unsigned num_deps;
	int i;
	struct drm_amdgpu_cs_chunk_sem *deps;
	deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
	num_deps = chunk->length_dw * 4 /
		sizeof(struct drm_amdgpu_cs_chunk_sem);

	p->post_dep_syncobjs = kmalloc_array(num_deps,
					     sizeof(struct drm_syncobj *),
					     GFP_KERNEL);
	p->num_post_dep_syncobjs = 0;

1136 1137 1138
	if (!p->post_dep_syncobjs)
		return -ENOMEM;

1139 1140 1141 1142 1143 1144 1145 1146 1147
	for (i = 0; i < num_deps; ++i) {
		p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle);
		if (!p->post_dep_syncobjs[i])
			return -EINVAL;
		p->num_post_dep_syncobjs++;
	}
	return 0;
}

1148 1149 1150 1151
static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
				  struct amdgpu_cs_parser *p)
{
	int i, r;
1152

1153 1154
	for (i = 0; i < p->nchunks; ++i) {
		struct amdgpu_cs_chunk *chunk;
1155

1156
		chunk = &p->chunks[i];
1157

1158 1159 1160 1161
		if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) {
			r = amdgpu_cs_process_fence_dep(p, chunk);
			if (r)
				return r;
1162 1163 1164 1165 1166 1167 1168 1169
		} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) {
			r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
			if (r)
				return r;
		} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) {
			r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
			if (r)
				return r;
1170 1171 1172 1173 1174 1175
		}
	}

	return 0;
}

1176 1177 1178 1179
static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
{
	int i;

1180 1181
	for (i = 0; i < p->num_post_dep_syncobjs; ++i)
		drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
1182 1183
}

1184 1185 1186
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
			    union drm_amdgpu_cs *cs)
{
1187
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1188
	struct drm_sched_entity *entity = p->entity;
1189
	enum drm_sched_priority priority;
1190
	struct amdgpu_ring *ring;
1191
	struct amdgpu_bo_list_entry *e;
1192
	struct amdgpu_job *job;
1193 1194
	uint64_t seq;

1195
	int r;
1196

1197 1198 1199 1200 1201 1202 1203 1204
	job = p->job;
	p->job = NULL;

	r = drm_sched_job_init(&job->base, entity, p->filp);
	if (r)
		goto error_unlock;

	/* No memory allocation is allowed while holding the mn lock */
1205
	amdgpu_mn_lock(p->mn);
1206 1207
	amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
		struct amdgpu_bo *bo = e->robj;
1208

1209
		if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
1210 1211
			r = -ERESTARTSYS;
			goto error_abort;
1212 1213 1214
		}
	}

1215
	job->owner = p->filp;
1216
	p->fence = dma_fence_get(&job->base.s_fence->finished);
1217

1218
	amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
1219 1220
	amdgpu_cs_post_dependencies(p);

1221 1222 1223 1224 1225 1226
	if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
	    !p->ctx->preamble_presented) {
		job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
		p->ctx->preamble_presented = true;
	}

1227 1228 1229
	cs->out.handle = seq;
	job->uf_sequence = seq;

1230
	amdgpu_job_free_resources(job);
1231 1232

	trace_amdgpu_cs_ioctl(job);
1233
	amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1234
	priority = job->base.s_priority;
1235
	drm_sched_entity_push_job(&job->base, entity);
1236

1237
	ring = to_amdgpu_ring(entity->rq->sched);
1238 1239
	amdgpu_ring_priority_get(ring, priority);

1240 1241
	amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);

1242 1243 1244
	ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
	amdgpu_mn_unlock(p->mn);

1245
	return 0;
1246 1247 1248 1249

error_abort:
	dma_fence_put(&job->base.s_fence->finished);
	job->base.s_fence = NULL;
1250
	amdgpu_mn_unlock(p->mn);
1251 1252 1253 1254

error_unlock:
	amdgpu_job_free(job);
	return r;
1255 1256
}

C
Chunming Zhou 已提交
1257 1258 1259 1260
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
	struct amdgpu_device *adev = dev->dev_private;
	union drm_amdgpu_cs *cs = data;
1261
	struct amdgpu_cs_parser parser = {};
1262 1263
	bool reserved_buffers = false;
	int i, r;
C
Chunming Zhou 已提交
1264

1265
	if (!adev->accel_working)
C
Chunming Zhou 已提交
1266
		return -EBUSY;
1267

1268 1269 1270 1271
	parser.adev = adev;
	parser.filp = filp;

	r = amdgpu_cs_parser_init(&parser, data);
A
Alex Deucher 已提交
1272
	if (r) {
C
Chunming Zhou 已提交
1273
		DRM_ERROR("Failed to initialize parser !\n");
1274
		goto out;
1275 1276
	}

1277 1278 1279 1280
	r = amdgpu_cs_ib_fill(adev, &parser);
	if (r)
		goto out;

1281 1282 1283 1284 1285 1286 1287
	r = amdgpu_cs_parser_bos(&parser, data);
	if (r) {
		if (r == -ENOMEM)
			DRM_ERROR("Not enough memory for command submission!\n");
		else if (r != -ERESTARTSYS)
			DRM_ERROR("Failed to process the buffer list %d!\n", r);
		goto out;
1288 1289
	}

1290
	reserved_buffers = true;
1291

1292 1293 1294 1295 1296 1297
	r = amdgpu_cs_dependencies(adev, &parser);
	if (r) {
		DRM_ERROR("Failed in the dependencies handling %d!\n", r);
		goto out;
	}

1298
	for (i = 0; i < parser.job->num_ibs; i++)
1299
		trace_amdgpu_cs(&parser, i);
1300

1301
	r = amdgpu_cs_vm_handling(&parser);
1302 1303 1304
	if (r)
		goto out;

C
Christian König 已提交
1305
	r = amdgpu_cs_submit(&parser, cs);
A
Alex Deucher 已提交
1306 1307

out:
1308
	amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
A
Alex Deucher 已提交
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
	return r;
}

/**
 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
 *
 * @dev: drm device
 * @data: data from userspace
 * @filp: file private
 *
 * Wait for the command submission identified by handle to finish.
 */
int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
			 struct drm_file *filp)
{
	union drm_amdgpu_wait_cs *wait = data;
	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1326
	struct drm_sched_entity *entity;
1327
	struct amdgpu_ctx *ctx;
1328
	struct dma_fence *fence;
A
Alex Deucher 已提交
1329 1330
	long r;

1331 1332 1333
	ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
	if (ctx == NULL)
		return -EINVAL;
A
Alex Deucher 已提交
1334

1335 1336
	r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
				  wait->in.ring, &entity);
1337 1338 1339 1340 1341
	if (r) {
		amdgpu_ctx_put(ctx);
		return r;
	}

1342
	fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1343 1344 1345
	if (IS_ERR(fence))
		r = PTR_ERR(fence);
	else if (fence) {
1346
		r = dma_fence_wait_timeout(fence, true, timeout);
1347 1348
		if (r > 0 && fence->error)
			r = fence->error;
1349
		dma_fence_put(fence);
1350 1351
	} else
		r = 1;
C
Chunming Zhou 已提交
1352

1353
	amdgpu_ctx_put(ctx);
A
Alex Deucher 已提交
1354 1355 1356 1357 1358 1359 1360 1361 1362
	if (r < 0)
		return r;

	memset(wait, 0, sizeof(*wait));
	wait->out.status = (r == 0);

	return 0;
}

1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
/**
 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
 *
 * @adev: amdgpu device
 * @filp: file private
 * @user: drm_amdgpu_fence copied from user space
 */
static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
					     struct drm_file *filp,
					     struct drm_amdgpu_fence *user)
{
1374
	struct drm_sched_entity *entity;
1375 1376 1377 1378 1379 1380 1381 1382
	struct amdgpu_ctx *ctx;
	struct dma_fence *fence;
	int r;

	ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
	if (ctx == NULL)
		return ERR_PTR(-EINVAL);

1383 1384
	r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
				  user->ring, &entity);
1385 1386 1387 1388 1389
	if (r) {
		amdgpu_ctx_put(ctx);
		return ERR_PTR(r);
	}

1390
	fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1391 1392 1393 1394 1395
	amdgpu_ctx_put(ctx);

	return fence;
}

1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451
int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *filp)
{
	struct amdgpu_device *adev = dev->dev_private;
	union drm_amdgpu_fence_to_handle *info = data;
	struct dma_fence *fence;
	struct drm_syncobj *syncobj;
	struct sync_file *sync_file;
	int fd, r;

	fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
	if (IS_ERR(fence))
		return PTR_ERR(fence);

	switch (info->in.what) {
	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
		r = drm_syncobj_create(&syncobj, 0, fence);
		dma_fence_put(fence);
		if (r)
			return r;
		r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
		drm_syncobj_put(syncobj);
		return r;

	case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
		r = drm_syncobj_create(&syncobj, 0, fence);
		dma_fence_put(fence);
		if (r)
			return r;
		r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
		drm_syncobj_put(syncobj);
		return r;

	case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
		fd = get_unused_fd_flags(O_CLOEXEC);
		if (fd < 0) {
			dma_fence_put(fence);
			return fd;
		}

		sync_file = sync_file_create(fence);
		dma_fence_put(fence);
		if (!sync_file) {
			put_unused_fd(fd);
			return -ENOMEM;
		}

		fd_install(fd, sync_file->file);
		info->out.handle = fd;
		return 0;

	default:
		return -EINVAL;
	}
}

1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479
/**
 * amdgpu_cs_wait_all_fence - wait on all fences to signal
 *
 * @adev: amdgpu device
 * @filp: file private
 * @wait: wait parameters
 * @fences: array of drm_amdgpu_fence
 */
static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
				     struct drm_file *filp,
				     union drm_amdgpu_wait_fences *wait,
				     struct drm_amdgpu_fence *fences)
{
	uint32_t fence_count = wait->in.fence_count;
	unsigned int i;
	long r = 1;

	for (i = 0; i < fence_count; i++) {
		struct dma_fence *fence;
		unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);

		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
		if (IS_ERR(fence))
			return PTR_ERR(fence);
		else if (!fence)
			continue;

		r = dma_fence_wait_timeout(fence, true, timeout);
1480
		dma_fence_put(fence);
1481 1482 1483 1484 1485
		if (r < 0)
			return r;

		if (r == 0)
			break;
1486 1487 1488

		if (fence->error)
			return fence->error;
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533
	}

	memset(wait, 0, sizeof(*wait));
	wait->out.status = (r > 0);

	return 0;
}

/**
 * amdgpu_cs_wait_any_fence - wait on any fence to signal
 *
 * @adev: amdgpu device
 * @filp: file private
 * @wait: wait parameters
 * @fences: array of drm_amdgpu_fence
 */
static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
				    struct drm_file *filp,
				    union drm_amdgpu_wait_fences *wait,
				    struct drm_amdgpu_fence *fences)
{
	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
	uint32_t fence_count = wait->in.fence_count;
	uint32_t first = ~0;
	struct dma_fence **array;
	unsigned int i;
	long r;

	/* Prepare the fence array */
	array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);

	if (array == NULL)
		return -ENOMEM;

	for (i = 0; i < fence_count; i++) {
		struct dma_fence *fence;

		fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
		if (IS_ERR(fence)) {
			r = PTR_ERR(fence);
			goto err_free_fence_array;
		} else if (fence) {
			array[i] = fence;
		} else { /* NULL, the fence has been already signaled */
			r = 1;
M
Monk Liu 已提交
1534
			first = i;
1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
			goto out;
		}
	}

	r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
				       &first);
	if (r < 0)
		goto err_free_fence_array;

out:
	memset(wait, 0, sizeof(*wait));
	wait->out.status = (r > 0);
	wait->out.first_signaled = first;
1548

1549
	if (first < fence_count && array[first])
1550 1551 1552
		r = array[first]->error;
	else
		r = 0;
1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584

err_free_fence_array:
	for (i = 0; i < fence_count; i++)
		dma_fence_put(array[i]);
	kfree(array);

	return r;
}

/**
 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
 *
 * @dev: drm device
 * @data: data from userspace
 * @filp: file private
 */
int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
				struct drm_file *filp)
{
	struct amdgpu_device *adev = dev->dev_private;
	union drm_amdgpu_wait_fences *wait = data;
	uint32_t fence_count = wait->in.fence_count;
	struct drm_amdgpu_fence *fences_user;
	struct drm_amdgpu_fence *fences;
	int r;

	/* Get the fences from userspace */
	fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
			GFP_KERNEL);
	if (fences == NULL)
		return -ENOMEM;

1585
	fences_user = u64_to_user_ptr(wait->in.fences);
1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
	if (copy_from_user(fences, fences_user,
		sizeof(struct drm_amdgpu_fence) * fence_count)) {
		r = -EFAULT;
		goto err_free_fences;
	}

	if (wait->in.wait_all)
		r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
	else
		r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);

err_free_fences:
	kfree(fences);

	return r;
}

A
Alex Deucher 已提交
1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613
/**
 * amdgpu_cs_find_bo_va - find bo_va for VM address
 *
 * @parser: command submission parser context
 * @addr: VM address
 * @bo: resulting BO of the mapping found
 *
 * Search the buffer objects in the command submission context for a certain
 * virtual memory address. Returns allocation structure when found, NULL
 * otherwise.
 */
1614 1615 1616
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
			   uint64_t addr, struct amdgpu_bo **bo,
			   struct amdgpu_bo_va_mapping **map)
A
Alex Deucher 已提交
1617
{
1618
	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1619
	struct ttm_operation_ctx ctx = { false, false };
1620
	struct amdgpu_vm *vm = &fpriv->vm;
A
Alex Deucher 已提交
1621
	struct amdgpu_bo_va_mapping *mapping;
1622 1623
	int r;

A
Alex Deucher 已提交
1624
	addr /= AMDGPU_GPU_PAGE_SIZE;
1625

1626 1627 1628
	mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
	if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
		return -EINVAL;
1629

1630 1631
	*bo = mapping->bo_va->base.bo;
	*map = mapping;
1632

1633 1634 1635
	/* Double check that the BO is reserved by this CS */
	if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
		return -EINVAL;
1636

1637 1638
	if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
		(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1639
		amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1640
		r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1641
		if (r)
1642
			return r;
1643 1644
	}

1645
	return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1646
}