amdgpu_vm.c 75.9 KB
Newer Older
A
Alex Deucher 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
28
#include <linux/dma-fence-array.h>
29
#include <linux/interval_tree_generic.h>
30
#include <linux/idr.h>
A
Alex Deucher 已提交
31 32 33 34 35
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"

36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
/*
 * PASID manager
 *
 * PASIDs are global address space identifiers that can be shared
 * between the GPU, an IOMMU and the driver. VMs on different devices
 * may use the same PASID if they share the same address
 * space. Therefore PASIDs are allocated using a global IDA. VMs are
 * looked up from the PASID per amdgpu_device.
 */
static DEFINE_IDA(amdgpu_vm_pasid_ida);

/**
 * amdgpu_vm_alloc_pasid - Allocate a PASID
 * @bits: Maximum width of the PASID in bits, must be at least 1
 *
 * Allocates a PASID of the given width while keeping smaller PASIDs
 * available if possible.
 *
 * Returns a positive integer on success. Returns %-EINVAL if bits==0.
 * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
 * memory allocation failure.
 */
int amdgpu_vm_alloc_pasid(unsigned int bits)
{
	int pasid = -EINVAL;

	for (bits = min(bits, 31U); bits > 0; bits--) {
		pasid = ida_simple_get(&amdgpu_vm_pasid_ida,
				       1U << (bits - 1), 1U << bits,
				       GFP_KERNEL);
		if (pasid != -ENOSPC)
			break;
	}

	return pasid;
}

/**
 * amdgpu_vm_free_pasid - Free a PASID
 * @pasid: PASID to free
 */
void amdgpu_vm_free_pasid(unsigned int pasid)
{
	ida_simple_remove(&amdgpu_vm_pasid_ida, pasid);
}

A
Alex Deucher 已提交
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
/*
 * GPUVM
 * GPUVM is similar to the legacy gart on older asics, however
 * rather than there being a single global gart table
 * for the entire GPU, there are multiple VM page tables active
 * at any given time.  The VM page tables can contain a mix
 * vram pages and system memory pages and system memory pages
 * can be mapped as snooped (cached system pages) or unsnooped
 * (uncached system pages).
 * Each VM has an ID associated with it and there is a page table
 * associated with each VMID.  When execting a command buffer,
 * the kernel tells the the ring what VMID to use for that command
 * buffer.  VMIDs are allocated dynamically as commands are submitted.
 * The userspace drivers maintain their own address space and the kernel
 * sets up their pages tables accordingly when they submit their
 * command buffers and a VMID is assigned.
 * Cayman/Trinity support up to 8 active VMs at any given time;
 * SI supports 16.
 */

102 103 104 105 106 107 108 109 110
#define START(node) ((node)->start)
#define LAST(node) ((node)->last)

INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
		     START, LAST, static, amdgpu_vm_it)

#undef START
#undef LAST

111 112 113
/* Local structure. Encapsulate some VM table update parameters to reduce
 * the number of function parameters
 */
114
struct amdgpu_pte_update_params {
115 116
	/* amdgpu device we do this update for */
	struct amdgpu_device *adev;
117 118
	/* optional amdgpu_vm we do this update for */
	struct amdgpu_vm *vm;
119 120 121 122
	/* address where to copy page table entries from */
	uint64_t src;
	/* indirect buffer to fill with commands */
	struct amdgpu_ib *ib;
123 124 125
	/* Function which actually does the update */
	void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
		     uint64_t addr, unsigned count, uint32_t incr,
126
		     uint64_t flags);
127 128 129 130 131 132
	/* The next two are used during VM update by CPU
	 *  DMA addresses to use for mapping
	 *  Kernel pointer of PD/PT BO that needs to be updated
	 */
	dma_addr_t *pages_addr;
	void *kptr;
133 134
};

135 136 137 138 139 140
/* Helper to disable partial resident texture feature from a fence callback */
struct amdgpu_prt_cb {
	struct amdgpu_device *adev;
	struct dma_fence_cb cb;
};

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
/**
 * amdgpu_vm_level_shift - return the addr shift for each level
 *
 * @adev: amdgpu_device pointer
 *
 * Returns the number of bits the pfn needs to be right shifted for a level.
 */
static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
				      unsigned level)
{
	if (level != adev->vm_manager.num_level)
		return 9 * (adev->vm_manager.num_level - level - 1) +
			adev->vm_manager.block_size;
	else
		/* For the page tables on the leaves */
		return 0;
}

A
Alex Deucher 已提交
159
/**
160
 * amdgpu_vm_num_entries - return the number of entries in a PD/PT
A
Alex Deucher 已提交
161 162 163
 *
 * @adev: amdgpu_device pointer
 *
164
 * Calculate the number of entries in a page directory or page table.
A
Alex Deucher 已提交
165
 */
166 167
static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
				      unsigned level)
A
Alex Deucher 已提交
168
{
169 170
	unsigned shift = amdgpu_vm_level_shift(adev, 0);

171 172
	if (level == 0)
		/* For the root directory */
173 174 175 176 177
		return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
	else if (level != adev->vm_manager.num_level)
		/* Everything in between */
		return 512;
	else
178
		/* For the page tables on the leaves */
179
		return AMDGPU_VM_PTE_COUNT(adev);
A
Alex Deucher 已提交
180 181 182
}

/**
183
 * amdgpu_vm_bo_size - returns the size of the BOs in bytes
A
Alex Deucher 已提交
184 185 186
 *
 * @adev: amdgpu_device pointer
 *
187
 * Calculate the size of the BO for a page directory or page table in bytes.
A
Alex Deucher 已提交
188
 */
189
static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
A
Alex Deucher 已提交
190
{
191
	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
A
Alex Deucher 已提交
192 193 194
}

/**
195
 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
A
Alex Deucher 已提交
196 197
 *
 * @vm: vm providing the BOs
198
 * @validated: head of validation list
199
 * @entry: entry to add
A
Alex Deucher 已提交
200 201
 *
 * Add the page directory to the list of BOs to
202
 * validate for command submission.
A
Alex Deucher 已提交
203
 */
204 205 206
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
			 struct list_head *validated,
			 struct amdgpu_bo_list_entry *entry)
A
Alex Deucher 已提交
207
{
208
	entry->robj = vm->root.base.bo;
209
	entry->priority = 0;
210
	entry->tv.bo = &entry->robj->tbo;
211
	entry->tv.shared = true;
212
	entry->user_pages = NULL;
213 214
	list_add(&entry->tv.head, validated);
}
A
Alex Deucher 已提交
215

216
/**
217
 * amdgpu_vm_validate_pt_bos - validate the page table BOs
218
 *
219
 * @adev: amdgpu device pointer
220
 * @vm: vm providing the BOs
221 222 223 224 225
 * @validate: callback to do the validation
 * @param: parameter for the validation callback
 *
 * Validate the page table BOs on command submission if neccessary.
 */
226 227 228
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
			      int (*validate)(void *p, struct amdgpu_bo *bo),
			      void *param)
229
{
230
	struct ttm_bo_global *glob = adev->mman.bdev.glob;
231 232
	int r;

233 234 235 236
	spin_lock(&vm->status_lock);
	while (!list_empty(&vm->evicted)) {
		struct amdgpu_vm_bo_base *bo_base;
		struct amdgpu_bo *bo;
237

238 239 240 241
		bo_base = list_first_entry(&vm->evicted,
					   struct amdgpu_vm_bo_base,
					   vm_status);
		spin_unlock(&vm->status_lock);
242

243 244 245 246 247 248
		bo = bo_base->bo;
		BUG_ON(!bo);
		if (bo->parent) {
			r = validate(param, bo);
			if (r)
				return r;
249

250 251 252 253 254 255
			spin_lock(&glob->lru_lock);
			ttm_bo_move_to_lru_tail(&bo->tbo);
			if (bo->shadow)
				ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
			spin_unlock(&glob->lru_lock);
		}
256

257 258
		if (bo->tbo.type == ttm_bo_type_kernel &&
		    vm->use_cpu_for_update) {
259 260 261 262
			r = amdgpu_bo_kmap(bo, NULL);
			if (r)
				return r;
		}
263

264
		spin_lock(&vm->status_lock);
265 266 267 268
		if (bo->tbo.type != ttm_bo_type_kernel)
			list_move(&bo_base->vm_status, &vm->moved);
		else
			list_move(&bo_base->vm_status, &vm->relocated);
269
	}
270
	spin_unlock(&vm->status_lock);
271

272
	return 0;
273 274
}

275
/**
276
 * amdgpu_vm_ready - check VM is ready for updates
277
 *
278
 * @vm: VM to check
A
Alex Deucher 已提交
279
 *
280
 * Check if all VM PDs/PTs are ready for updates
A
Alex Deucher 已提交
281
 */
282
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
A
Alex Deucher 已提交
283
{
284
	bool ready;
A
Alex Deucher 已提交
285

286 287 288
	spin_lock(&vm->status_lock);
	ready = list_empty(&vm->evicted);
	spin_unlock(&vm->status_lock);
289

290
	return ready;
291 292 293
}

/**
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
 * amdgpu_vm_alloc_levels - allocate the PD/PT levels
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 * @saddr: start of the address range
 * @eaddr: end of the address range
 *
 * Make sure the page directories and page tables are allocated
 */
static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
				  struct amdgpu_vm *vm,
				  struct amdgpu_vm_pt *parent,
				  uint64_t saddr, uint64_t eaddr,
				  unsigned level)
{
309
	unsigned shift = amdgpu_vm_level_shift(adev, level);
310 311
	unsigned pt_idx, from, to;
	int r;
312
	u64 flags;
Y
Yong Zhao 已提交
313
	uint64_t init_value = 0;
314 315 316 317

	if (!parent->entries) {
		unsigned num_entries = amdgpu_vm_num_entries(adev, level);

M
Michal Hocko 已提交
318 319 320
		parent->entries = kvmalloc_array(num_entries,
						   sizeof(struct amdgpu_vm_pt),
						   GFP_KERNEL | __GFP_ZERO);
321 322 323 324 325
		if (!parent->entries)
			return -ENOMEM;
		memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
	}

326 327 328 329 330
	from = saddr >> shift;
	to = eaddr >> shift;
	if (from >= amdgpu_vm_num_entries(adev, level) ||
	    to >= amdgpu_vm_num_entries(adev, level))
		return -EINVAL;
331 332

	++level;
333 334
	saddr = saddr & ((1 << shift) - 1);
	eaddr = eaddr & ((1 << shift) - 1);
335

336 337 338 339 340 341 342 343
	flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
			AMDGPU_GEM_CREATE_VRAM_CLEARED;
	if (vm->use_cpu_for_update)
		flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
	else
		flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
				AMDGPU_GEM_CREATE_SHADOW);

Y
Yong Zhao 已提交
344
	if (vm->pte_support_ats) {
345
		init_value = AMDGPU_PTE_DEFAULT_ATC;
Y
Yong Zhao 已提交
346 347
		if (level != adev->vm_manager.num_level - 1)
			init_value |= AMDGPU_PDE_PTE;
348

Y
Yong Zhao 已提交
349 350
	}

351 352
	/* walk over the address space and allocate the page tables */
	for (pt_idx = from; pt_idx <= to; ++pt_idx) {
353
		struct reservation_object *resv = vm->root.base.bo->tbo.resv;
354 355 356
		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
		struct amdgpu_bo *pt;

357
		if (!entry->base.bo) {
358 359 360 361
			r = amdgpu_bo_create(adev,
					     amdgpu_vm_bo_size(adev, level),
					     AMDGPU_GPU_PAGE_SIZE, true,
					     AMDGPU_GEM_DOMAIN_VRAM,
362
					     flags,
Y
Yong Zhao 已提交
363
					     NULL, resv, init_value, &pt);
364 365 366
			if (r)
				return r;

367 368 369 370 371 372 373 374
			if (vm->use_cpu_for_update) {
				r = amdgpu_bo_kmap(pt, NULL);
				if (r) {
					amdgpu_bo_unref(&pt);
					return r;
				}
			}

375 376 377
			/* Keep a reference to the root directory to avoid
			* freeing them up in the wrong order.
			*/
378
			pt->parent = amdgpu_bo_ref(parent->base.bo);
379

380 381 382
			entry->base.vm = vm;
			entry->base.bo = pt;
			list_add_tail(&entry->base.bo_list, &pt->va);
383 384 385
			spin_lock(&vm->status_lock);
			list_add(&entry->base.vm_status, &vm->relocated);
			spin_unlock(&vm->status_lock);
386 387 388
		}

		if (level < adev->vm_manager.num_level) {
389 390 391 392 393
			uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
			uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
				((1 << shift) - 1);
			r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
						   sub_eaddr, level);
394 395 396 397 398 399 400 401
			if (r)
				return r;
		}
	}

	return 0;
}

402 403 404 405 406 407 408 409 410 411 412 413 414 415
/**
 * amdgpu_vm_alloc_pts - Allocate page tables.
 *
 * @adev: amdgpu_device pointer
 * @vm: VM to allocate page tables for
 * @saddr: Start address which needs to be allocated
 * @size: Size from start address we need.
 *
 * Make sure the page tables are allocated.
 */
int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
			struct amdgpu_vm *vm,
			uint64_t saddr, uint64_t size)
{
F
Felix Kuehling 已提交
416
	uint64_t last_pfn;
417 418 419 420 421 422 423 424 425
	uint64_t eaddr;

	/* validate the parameters */
	if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
		return -EINVAL;

	eaddr = saddr + size - 1;
	last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
	if (last_pfn >= adev->vm_manager.max_pfn) {
F
Felix Kuehling 已提交
426
		dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
427 428 429 430 431 432 433
			last_pfn, adev->vm_manager.max_pfn);
		return -EINVAL;
	}

	saddr /= AMDGPU_GPU_PAGE_SIZE;
	eaddr /= AMDGPU_GPU_PAGE_SIZE;

434
	return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0);
435 436
}

437 438 439 440 441 442 443 444 445 446
/**
 * amdgpu_vm_had_gpu_reset - check if reset occured since last use
 *
 * @adev: amdgpu_device pointer
 * @id: VMID structure
 *
 * Check if GPU reset occured since last use of the VMID.
 */
static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev,
				    struct amdgpu_vm_id *id)
447 448
{
	return id->current_gpu_reset_count !=
449
		atomic_read(&adev->gpu_reset_counter);
450 451
}

452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
static bool amdgpu_vm_reserved_vmid_ready(struct amdgpu_vm *vm, unsigned vmhub)
{
	return !!vm->reserved_vmid[vmhub];
}

/* idr_mgr->lock must be held */
static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm,
					       struct amdgpu_ring *ring,
					       struct amdgpu_sync *sync,
					       struct dma_fence *fence,
					       struct amdgpu_job *job)
{
	struct amdgpu_device *adev = ring->adev;
	unsigned vmhub = ring->funcs->vmhub;
	uint64_t fence_context = adev->fence_context + ring->idx;
	struct amdgpu_vm_id *id = vm->reserved_vmid[vmhub];
	struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
	struct dma_fence *updates = sync->last_vm_update;
	int r = 0;
	struct dma_fence *flushed, *tmp;
472
	bool needs_flush = vm->use_cpu_for_update;
473 474 475 476 477 478 479 480 481 482 483 484 485 486

	flushed  = id->flushed_updates;
	if ((amdgpu_vm_had_gpu_reset(adev, id)) ||
	    (atomic64_read(&id->owner) != vm->client_id) ||
	    (job->vm_pd_addr != id->pd_gpu_addr) ||
	    (updates && (!flushed || updates->context != flushed->context ||
			dma_fence_is_later(updates, flushed))) ||
	    (!id->last_flush || (id->last_flush->context != fence_context &&
				 !dma_fence_is_signaled(id->last_flush)))) {
		needs_flush = true;
		/* to prevent one context starved by another context */
		id->pd_gpu_addr = 0;
		tmp = amdgpu_sync_peek_fence(&id->active, ring);
		if (tmp) {
487
			r = amdgpu_sync_fence(adev, sync, tmp, false);
488 489 490 491 492 493 494
			return r;
		}
	}

	/* Good we can use this VMID. Remember this submission as
	* user of the VMID.
	*/
495
	r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
	if (r)
		goto out;

	if (updates && (!flushed || updates->context != flushed->context ||
			dma_fence_is_later(updates, flushed))) {
		dma_fence_put(id->flushed_updates);
		id->flushed_updates = dma_fence_get(updates);
	}
	id->pd_gpu_addr = job->vm_pd_addr;
	atomic64_set(&id->owner, vm->client_id);
	job->vm_needs_flush = needs_flush;
	if (needs_flush) {
		dma_fence_put(id->last_flush);
		id->last_flush = NULL;
	}
	job->vm_id = id - id_mgr->ids;
	trace_amdgpu_vm_grab_id(vm, ring, job);
out:
	return r;
}

A
Alex Deucher 已提交
517 518 519 520
/**
 * amdgpu_vm_grab_id - allocate the next free VMID
 *
 * @vm: vm to allocate id for
521 522
 * @ring: ring we want to submit job to
 * @sync: sync object where we add dependencies
523
 * @fence: fence protecting ID from reuse
A
Alex Deucher 已提交
524
 *
525
 * Allocate an id for the vm, adding fences to the sync obj as necessary.
A
Alex Deucher 已提交
526
 */
527
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
528
		      struct amdgpu_sync *sync, struct dma_fence *fence,
529
		      struct amdgpu_job *job)
A
Alex Deucher 已提交
530 531
{
	struct amdgpu_device *adev = ring->adev;
532
	unsigned vmhub = ring->funcs->vmhub;
533
	struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
534
	uint64_t fence_context = adev->fence_context + ring->idx;
535
	struct dma_fence *updates = sync->last_vm_update;
536
	struct amdgpu_vm_id *id, *idle;
537
	struct dma_fence **fences;
538 539 540
	unsigned i;
	int r = 0;

541 542 543 544 545 546
	mutex_lock(&id_mgr->lock);
	if (amdgpu_vm_reserved_vmid_ready(vm, vmhub)) {
		r = amdgpu_vm_grab_reserved_vmid_locked(vm, ring, sync, fence, job);
		mutex_unlock(&id_mgr->lock);
		return r;
	}
547
	fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
548 549
	if (!fences) {
		mutex_unlock(&id_mgr->lock);
550
		return -ENOMEM;
551
	}
552
	/* Check if we have an idle VMID */
553
	i = 0;
554
	list_for_each_entry(idle, &id_mgr->ids_lru, list) {
555 556
		fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
		if (!fences[i])
557
			break;
558
		++i;
559 560
	}

561
	/* If we can't find a idle VMID to use, wait till one becomes available */
562
	if (&idle->list == &id_mgr->ids_lru) {
563 564
		u64 fence_context = adev->vm_manager.fence_context + ring->idx;
		unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
565
		struct dma_fence_array *array;
566 567 568
		unsigned j;

		for (j = 0; j < i; ++j)
569
			dma_fence_get(fences[j]);
570

571
		array = dma_fence_array_create(i, fences, fence_context,
572 573 574
					   seqno, true);
		if (!array) {
			for (j = 0; j < i; ++j)
575
				dma_fence_put(fences[j]);
576 577 578 579 580 581
			kfree(fences);
			r = -ENOMEM;
			goto error;
		}


582
		r = amdgpu_sync_fence(ring->adev, sync, &array->base, false);
583
		dma_fence_put(&array->base);
584 585 586
		if (r)
			goto error;

587
		mutex_unlock(&id_mgr->lock);
588 589 590 591 592
		return 0;

	}
	kfree(fences);

593
	job->vm_needs_flush = vm->use_cpu_for_update;
594
	/* Check if we can use a VMID already assigned to this VM */
595
	list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
596
		struct dma_fence *flushed;
597
		bool needs_flush = vm->use_cpu_for_update;
598 599

		/* Check all the prerequisites to using this VMID */
600
		if (amdgpu_vm_had_gpu_reset(adev, id))
601
			continue;
602 603 604 605

		if (atomic64_read(&id->owner) != vm->client_id)
			continue;

606
		if (job->vm_pd_addr != id->pd_gpu_addr)
607 608
			continue;

609 610 611 612
		if (!id->last_flush ||
		    (id->last_flush->context != fence_context &&
		     !dma_fence_is_signaled(id->last_flush)))
			needs_flush = true;
613 614

		flushed  = id->flushed_updates;
615 616 617 618 619
		if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
			needs_flush = true;

		/* Concurrent flushes are only possible starting with Vega10 */
		if (adev->asic_type < CHIP_VEGA10 && needs_flush)
620 621
			continue;

622 623 624
		/* Good we can use this VMID. Remember this submission as
		 * user of the VMID.
		 */
625
		r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
626 627
		if (r)
			goto error;
628

629 630 631 632
		if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
			dma_fence_put(id->flushed_updates);
			id->flushed_updates = dma_fence_get(updates);
		}
633

634 635 636 637
		if (needs_flush)
			goto needs_flush;
		else
			goto no_flush_needed;
638

639
	};
640

641 642
	/* Still no ID to use? Then use the idle one found earlier */
	id = idle;
643

644
	/* Remember this submission as user of the VMID */
645
	r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
646 647
	if (r)
		goto error;
648

649
	id->pd_gpu_addr = job->vm_pd_addr;
650 651
	dma_fence_put(id->flushed_updates);
	id->flushed_updates = dma_fence_get(updates);
652
	atomic64_set(&id->owner, vm->client_id);
A
Alex Deucher 已提交
653

654 655 656 657 658 659 660 661
needs_flush:
	job->vm_needs_flush = true;
	dma_fence_put(id->last_flush);
	id->last_flush = NULL;

no_flush_needed:
	list_move_tail(&id->list, &id_mgr->ids_lru);

662
	job->vm_id = id - id_mgr->ids;
663
	trace_amdgpu_vm_grab_id(vm, ring, job);
664 665

error:
666
	mutex_unlock(&id_mgr->lock);
667
	return r;
A
Alex Deucher 已提交
668 669
}

670 671 672 673 674 675 676 677 678 679 680
static void amdgpu_vm_free_reserved_vmid(struct amdgpu_device *adev,
					  struct amdgpu_vm *vm,
					  unsigned vmhub)
{
	struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];

	mutex_lock(&id_mgr->lock);
	if (vm->reserved_vmid[vmhub]) {
		list_add(&vm->reserved_vmid[vmhub]->list,
			&id_mgr->ids_lru);
		vm->reserved_vmid[vmhub] = NULL;
681
		atomic_dec(&id_mgr->reserved_vmid_num);
682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
	}
	mutex_unlock(&id_mgr->lock);
}

static int amdgpu_vm_alloc_reserved_vmid(struct amdgpu_device *adev,
					 struct amdgpu_vm *vm,
					 unsigned vmhub)
{
	struct amdgpu_vm_id_manager *id_mgr;
	struct amdgpu_vm_id *idle;
	int r = 0;

	id_mgr = &adev->vm_manager.id_mgr[vmhub];
	mutex_lock(&id_mgr->lock);
	if (vm->reserved_vmid[vmhub])
		goto unlock;
698 699 700 701 702 703 704
	if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
	    AMDGPU_VM_MAX_RESERVED_VMID) {
		DRM_ERROR("Over limitation of reserved vmid\n");
		atomic_dec(&id_mgr->reserved_vmid_num);
		r = -EINVAL;
		goto unlock;
	}
705 706 707 708 709 710 711 712 713 714 715 716
	/* Select the first entry VMID */
	idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vm_id, list);
	list_del_init(&idle->list);
	vm->reserved_vmid[vmhub] = idle;
	mutex_unlock(&id_mgr->lock);

	return 0;
unlock:
	mutex_unlock(&id_mgr->lock);
	return r;
}

717 718 719 720 721 722
/**
 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
 *
 * @adev: amdgpu_device pointer
 */
void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
723
{
724
	const struct amdgpu_ip_block *ip_block;
725 726 727
	bool has_compute_vm_bug;
	struct amdgpu_ring *ring;
	int i;
728

729
	has_compute_vm_bug = false;
730 731

	ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
732 733 734 735 736 737 738 739 740
	if (ip_block) {
		/* Compute has a VM bug for GFX version < 7.
		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
		if (ip_block->version->major <= 7)
			has_compute_vm_bug = true;
		else if (ip_block->version->major == 8)
			if (adev->gfx.mec_fw_version < 673)
				has_compute_vm_bug = true;
	}
741

742 743 744 745 746
	for (i = 0; i < adev->num_rings; i++) {
		ring = adev->rings[i];
		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
			/* only compute rings */
			ring->has_compute_vm_bug = has_compute_vm_bug;
747
		else
748
			ring->has_compute_vm_bug = false;
749 750 751
	}
}

752 753
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
				  struct amdgpu_job *job)
A
Alex Xie 已提交
754
{
755 756 757 758 759
	struct amdgpu_device *adev = ring->adev;
	unsigned vmhub = ring->funcs->vmhub;
	struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
	struct amdgpu_vm_id *id;
	bool gds_switch_needed;
760
	bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
761 762 763 764 765 766 767 768 769 770 771

	if (job->vm_id == 0)
		return false;
	id = &id_mgr->ids[job->vm_id];
	gds_switch_needed = ring->funcs->emit_gds_switch && (
		id->gds_base != job->gds_base ||
		id->gds_size != job->gds_size ||
		id->gws_base != job->gws_base ||
		id->gws_size != job->gws_size ||
		id->oa_base != job->oa_base ||
		id->oa_size != job->oa_size);
A
Alex Xie 已提交
772

773 774
	if (amdgpu_vm_had_gpu_reset(adev, id))
		return true;
A
Alex Xie 已提交
775

776
	return vm_flush_needed || gds_switch_needed;
777 778
}

779 780 781
static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
{
	return (adev->mc.real_vram_size == adev->mc.visible_vram_size);
A
Alex Xie 已提交
782 783
}

A
Alex Deucher 已提交
784 785 786 787
/**
 * amdgpu_vm_flush - hardware flush the vm
 *
 * @ring: ring to use for flush
788
 * @vm_id: vmid number to use
789
 * @pd_addr: address of the page directory
A
Alex Deucher 已提交
790
 *
791
 * Emit a VM flush when it is necessary.
A
Alex Deucher 已提交
792
 */
M
Monk Liu 已提交
793
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
A
Alex Deucher 已提交
794
{
795
	struct amdgpu_device *adev = ring->adev;
796 797 798
	unsigned vmhub = ring->funcs->vmhub;
	struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
	struct amdgpu_vm_id *id = &id_mgr->ids[job->vm_id];
799
	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
800 801 802 803 804 805
		id->gds_base != job->gds_base ||
		id->gds_size != job->gds_size ||
		id->gws_base != job->gws_base ||
		id->gws_size != job->gws_size ||
		id->oa_base != job->oa_base ||
		id->oa_size != job->oa_size);
806
	bool vm_flush_needed = job->vm_needs_flush;
807
	unsigned patch_offset = 0;
808
	int r;
809

810 811 812 813
	if (amdgpu_vm_had_gpu_reset(adev, id)) {
		gds_switch_needed = true;
		vm_flush_needed = true;
	}
814

M
Monk Liu 已提交
815
	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
816
		return 0;
817

818 819
	if (ring->funcs->init_cond_exec)
		patch_offset = amdgpu_ring_init_cond_exec(ring);
820

M
Monk Liu 已提交
821 822 823
	if (need_pipe_sync)
		amdgpu_ring_emit_pipeline_sync(ring);

824
	if (ring->funcs->emit_vm_flush && vm_flush_needed) {
825
		struct dma_fence *fence;
826

827 828
		trace_amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr);
		amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
829

830 831 832
		r = amdgpu_fence_emit(ring, &fence);
		if (r)
			return r;
833

834
		mutex_lock(&id_mgr->lock);
835 836
		dma_fence_put(id->last_flush);
		id->last_flush = fence;
837
		id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
838
		mutex_unlock(&id_mgr->lock);
839
	}
840

841
	if (ring->funcs->emit_gds_switch && gds_switch_needed) {
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860
		id->gds_base = job->gds_base;
		id->gds_size = job->gds_size;
		id->gws_base = job->gws_base;
		id->gws_size = job->gws_size;
		id->oa_base = job->oa_base;
		id->oa_size = job->oa_size;
		amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base,
					    job->gds_size, job->gws_base,
					    job->gws_size, job->oa_base,
					    job->oa_size);
	}

	if (ring->funcs->patch_cond_exec)
		amdgpu_ring_patch_cond_exec(ring, patch_offset);

	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
	if (ring->funcs->emit_switch_buffer) {
		amdgpu_ring_emit_switch_buffer(ring);
		amdgpu_ring_emit_switch_buffer(ring);
861
	}
862
	return 0;
863 864 865 866 867 868 869 870 871 872
}

/**
 * amdgpu_vm_reset_id - reset VMID to zero
 *
 * @adev: amdgpu device structure
 * @vm_id: vmid number to use
 *
 * Reset saved GDW, GWS and OA to force switch on next flush.
 */
873 874
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
			unsigned vmid)
875
{
876 877
	struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
	struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
878

879
	atomic64_set(&id->owner, 0);
880 881 882 883 884 885
	id->gds_base = 0;
	id->gds_size = 0;
	id->gws_base = 0;
	id->gws_size = 0;
	id->oa_base = 0;
	id->oa_size = 0;
A
Alex Deucher 已提交
886 887
}

888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907
/**
 * amdgpu_vm_reset_all_id - reset VMID to zero
 *
 * @adev: amdgpu device structure
 *
 * Reset VMID to force flush on next use
 */
void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev)
{
	unsigned i, j;

	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
		struct amdgpu_vm_id_manager *id_mgr =
			&adev->vm_manager.id_mgr[i];

		for (j = 1; j < id_mgr->num_ids; ++j)
			amdgpu_vm_reset_id(adev, i, j);
	}
}

A
Alex Deucher 已提交
908 909 910 911 912 913
/**
 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
 *
 * @vm: requested vm
 * @bo: requested buffer object
 *
914
 * Find @bo inside the requested vm.
A
Alex Deucher 已提交
915 916 917 918 919 920 921 922 923 924
 * Search inside the @bos vm list for the requested vm
 * Returns the found bo_va or NULL if none is found
 *
 * Object has to be reserved!
 */
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
				       struct amdgpu_bo *bo)
{
	struct amdgpu_bo_va *bo_va;

925 926
	list_for_each_entry(bo_va, &bo->va, base.bo_list) {
		if (bo_va->base.vm == vm) {
A
Alex Deucher 已提交
927 928 929 930 931 932 933
			return bo_va;
		}
	}
	return NULL;
}

/**
934
 * amdgpu_vm_do_set_ptes - helper to call the right asic function
A
Alex Deucher 已提交
935
 *
936
 * @params: see amdgpu_pte_update_params definition
A
Alex Deucher 已提交
937 938 939 940 941 942 943 944 945
 * @pe: addr of the page entry
 * @addr: dst addr to write into pe
 * @count: number of page entries to update
 * @incr: increase next addr by incr bytes
 * @flags: hw access flags
 *
 * Traces the parameters and calls the right asic functions
 * to setup the page table using the DMA.
 */
946 947 948
static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
				  uint64_t pe, uint64_t addr,
				  unsigned count, uint32_t incr,
949
				  uint64_t flags)
A
Alex Deucher 已提交
950
{
951
	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
A
Alex Deucher 已提交
952

953
	if (count < 3) {
954 955
		amdgpu_vm_write_pte(params->adev, params->ib, pe,
				    addr | flags, count, incr);
A
Alex Deucher 已提交
956 957

	} else {
958
		amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
A
Alex Deucher 已提交
959 960 961 962
				      count, incr, flags);
	}
}

963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
/**
 * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
 *
 * @params: see amdgpu_pte_update_params definition
 * @pe: addr of the page entry
 * @addr: dst addr to write into pe
 * @count: number of page entries to update
 * @incr: increase next addr by incr bytes
 * @flags: hw access flags
 *
 * Traces the parameters and calls the DMA function to copy the PTEs.
 */
static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
				   uint64_t pe, uint64_t addr,
				   unsigned count, uint32_t incr,
978
				   uint64_t flags)
979
{
980
	uint64_t src = (params->src + (addr >> 12) * 8);
981

982 983 984 985

	trace_amdgpu_vm_copy_ptes(pe, src, count);

	amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
986 987
}

A
Alex Deucher 已提交
988
/**
989
 * amdgpu_vm_map_gart - Resolve gart mapping of addr
A
Alex Deucher 已提交
990
 *
991
 * @pages_addr: optional DMA address to use for lookup
A
Alex Deucher 已提交
992 993 994
 * @addr: the unmapped addr
 *
 * Look up the physical address of the page that the pte resolves
995
 * to and return the pointer for the page table entry.
A
Alex Deucher 已提交
996
 */
997
static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
A
Alex Deucher 已提交
998 999 1000
{
	uint64_t result;

1001 1002
	/* page table offset */
	result = pages_addr[addr >> PAGE_SHIFT];
1003

1004 1005
	/* in case cpu page size != gpu page size*/
	result |= addr & (~PAGE_MASK);
A
Alex Deucher 已提交
1006

1007
	result &= 0xFFFFFFFFFFFFF000ULL;
A
Alex Deucher 已提交
1008 1009 1010 1011

	return result;
}

1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
/**
 * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
 *
 * @params: see amdgpu_pte_update_params definition
 * @pe: kmap addr of the page entry
 * @addr: dst addr to write into pe
 * @count: number of page entries to update
 * @incr: increase next addr by incr bytes
 * @flags: hw access flags
 *
 * Write count number of PT/PD entries directly.
 */
static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
				   uint64_t pe, uint64_t addr,
				   unsigned count, uint32_t incr,
				   uint64_t flags)
{
	unsigned int i;
1030
	uint64_t value;
1031

1032 1033
	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);

1034
	for (i = 0; i < count; i++) {
1035 1036 1037
		value = params->pages_addr ?
			amdgpu_vm_map_gart(params->pages_addr, addr) :
			addr;
1038
		amdgpu_gart_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
1039
					i, value, flags);
1040 1041 1042 1043
		addr += incr;
	}
}

1044 1045
static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
			     void *owner)
1046 1047 1048 1049 1050
{
	struct amdgpu_sync sync;
	int r;

	amdgpu_sync_create(&sync);
1051
	amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
1052 1053 1054 1055 1056 1057
	r = amdgpu_sync_wait(&sync, true);
	amdgpu_sync_free(&sync);

	return r;
}

1058
/*
1059
 * amdgpu_vm_update_level - update a single level in the hierarchy
1060 1061 1062
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
1063
 * @parent: parent directory
1064
 *
1065
 * Makes sure all entries in @parent are up to date.
1066 1067
 * Returns 0 for success, error for failure.
 */
1068 1069 1070 1071
static int amdgpu_vm_update_pde(struct amdgpu_device *adev,
				struct amdgpu_vm *vm,
				struct amdgpu_vm_pt *parent,
				struct amdgpu_vm_pt *entry)
A
Alex Deucher 已提交
1072
{
1073 1074
	struct amdgpu_pte_update_params params;
	struct amdgpu_bo *bo = entry->base.bo;
1075
	struct amdgpu_bo *shadow;
1076 1077
	struct amdgpu_ring *ring = NULL;
	uint64_t pd_addr, shadow_addr = 0;
1078
	struct amdgpu_job *job;
1079
	struct dma_fence *fence = NULL;
1080 1081
	unsigned ndw = 0;
	uint64_t pde, pt;
C
Chunming Zhou 已提交
1082

A
Alex Deucher 已提交
1083 1084
	int r;

1085 1086
	if (!parent->entries)
		return 0;
1087

1088 1089
	memset(&params, 0, sizeof(params));
	params.adev = adev;
1090
	shadow = parent->base.bo->shadow;
A
Alex Deucher 已提交
1091

1092
	if (vm->use_cpu_for_update) {
1093
		pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
1094
		r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
1095
		if (unlikely(r))
1096
			return r;
1097

1098 1099 1100 1101
		params.func = amdgpu_vm_cpu_set_ptes;
	} else {
		ring = container_of(vm->entity.sched, struct amdgpu_ring,
				    sched);
A
Alex Deucher 已提交
1102

1103
		/* should be sufficient for two commands plus padding, etc. */
1104
		ndw = 64;
1105

1106
		pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
1107
		if (shadow)
1108
			shadow_addr = amdgpu_bo_gpu_offset(shadow);
1109
		else
1110 1111 1112
			shadow_addr = 0;

		r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1113 1114 1115
		if (r)
			return r;

1116 1117 1118
		params.ib = &job->ibs[0];
		params.func = amdgpu_vm_do_set_ptes;
	}
1119

1120 1121 1122
	spin_lock(&vm->status_lock);
	list_del_init(&entry->base.vm_status);
	spin_unlock(&vm->status_lock);
A
Alex Deucher 已提交
1123

1124 1125 1126
	pt = amdgpu_bo_gpu_offset(bo);
	pt = amdgpu_gart_get_vm_pde(adev, pt);
	/* Don't update huge pages here */
1127
	if (entry->huge) {
1128 1129 1130 1131
		if (!vm->use_cpu_for_update)
			amdgpu_job_free(job);
		return 0;
	}
A
Alex Deucher 已提交
1132

1133 1134 1135
	if (shadow) {
		pde = shadow_addr + (entry - parent->entries) * 8;
		params.func(&params, pde, pt, 1, 0, AMDGPU_PTE_VALID);
1136
	}
A
Alex Deucher 已提交
1137

1138 1139 1140
	pde = pd_addr + (entry - parent->entries) * 8;
	params.func(&params, pde, pt, 1, 0, AMDGPU_PTE_VALID);

1141 1142 1143 1144 1145
	if (!vm->use_cpu_for_update) {
		if (params.ib->length_dw == 0) {
			amdgpu_job_free(job);
		} else {
			amdgpu_ring_pad_ib(ring, params.ib);
1146 1147
			amdgpu_sync_resv(adev, &job->sync,
					 parent->base.bo->tbo.resv,
1148
					 AMDGPU_FENCE_OWNER_VM, false);
1149 1150 1151
			if (shadow)
				amdgpu_sync_resv(adev, &job->sync,
						 shadow->tbo.resv,
1152
						 AMDGPU_FENCE_OWNER_VM, false);
1153 1154 1155 1156 1157 1158

			WARN_ON(params.ib->length_dw > ndw);
			r = amdgpu_job_submit(job, ring, &vm->entity,
					AMDGPU_FENCE_OWNER_VM, &fence);
			if (r)
				goto error_free;
1159

1160
			amdgpu_bo_fence(parent->base.bo, fence, true);
1161 1162
			dma_fence_put(vm->last_update);
			vm->last_update = fence;
1163
		}
1164
	}
A
Alex Deucher 已提交
1165 1166

	return 0;
C
Chunming Zhou 已提交
1167 1168

error_free:
1169
	amdgpu_job_free(job);
1170
	return r;
A
Alex Deucher 已提交
1171 1172
}

1173 1174 1175 1176 1177 1178 1179
/*
 * amdgpu_vm_invalidate_level - mark all PD levels as invalid
 *
 * @parent: parent PD
 *
 * Mark all PD level as invalid after an error.
 */
1180 1181 1182 1183
static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
				       struct amdgpu_vm *vm,
				       struct amdgpu_vm_pt *parent,
				       unsigned level)
1184
{
1185
	unsigned pt_idx, num_entries;
1186 1187 1188 1189 1190

	/*
	 * Recurse into the subdirectories. This recursion is harmless because
	 * we only have a maximum of 5 layers.
	 */
1191 1192
	num_entries = amdgpu_vm_num_entries(adev, level);
	for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
1193 1194
		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];

1195
		if (!entry->base.bo)
1196 1197
			continue;

1198
		spin_lock(&vm->status_lock);
1199 1200
		if (list_empty(&entry->base.vm_status))
			list_add(&entry->base.vm_status, &vm->relocated);
1201
		spin_unlock(&vm->status_lock);
1202
		amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
1203 1204 1205
	}
}

1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
/*
 * amdgpu_vm_update_directories - make sure that all directories are valid
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 *
 * Makes sure all directories are up to date.
 * Returns 0 for success, error for failure.
 */
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
				 struct amdgpu_vm *vm)
{
1218
	int r = 0;
1219

1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232
	spin_lock(&vm->status_lock);
	while (!list_empty(&vm->relocated)) {
		struct amdgpu_vm_bo_base *bo_base;
		struct amdgpu_bo *bo;

		bo_base = list_first_entry(&vm->relocated,
					   struct amdgpu_vm_bo_base,
					   vm_status);
		spin_unlock(&vm->status_lock);

		bo = bo_base->bo->parent;
		if (bo) {
			struct amdgpu_vm_bo_base *parent;
1233
			struct amdgpu_vm_pt *pt, *entry;
1234 1235 1236 1237 1238

			parent = list_first_entry(&bo->va,
						  struct amdgpu_vm_bo_base,
						  bo_list);
			pt = container_of(parent, struct amdgpu_vm_pt, base);
1239 1240
			entry = container_of(bo_base, struct amdgpu_vm_pt,
					     base);
1241

1242
			r = amdgpu_vm_update_pde(adev, vm, pt, entry);
1243
			if (r) {
1244 1245
				amdgpu_vm_invalidate_level(adev, vm,
							   &vm->root, 0);
1246 1247 1248 1249 1250 1251 1252 1253 1254
				return r;
			}
			spin_lock(&vm->status_lock);
		} else {
			spin_lock(&vm->status_lock);
			list_del_init(&bo_base->vm_status);
		}
	}
	spin_unlock(&vm->status_lock);
1255

1256 1257 1258 1259 1260 1261
	if (vm->use_cpu_for_update) {
		/* Flush HDP */
		mb();
		amdgpu_gart_flush_gpu_tlb(adev, 0);
	}

1262
	return r;
1263 1264
}

1265
/**
1266
 * amdgpu_vm_find_entry - find the entry for an address
1267 1268 1269
 *
 * @p: see amdgpu_pte_update_params definition
 * @addr: virtual address in question
1270 1271
 * @entry: resulting entry or NULL
 * @parent: parent entry
1272
 *
1273
 * Find the vm_pt entry and it's parent for the given address.
1274
 */
1275 1276 1277
void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
			 struct amdgpu_vm_pt **entry,
			 struct amdgpu_vm_pt **parent)
1278
{
1279
	unsigned level = 0;
1280

1281 1282 1283
	*parent = NULL;
	*entry = &p->vm->root;
	while ((*entry)->entries) {
1284
		unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
1285

1286
		*parent = *entry;
1287 1288
		*entry = &(*entry)->entries[addr >> shift];
		addr &= (1ULL << shift) - 1;
1289 1290
	}

1291
	if (level != p->adev->vm_manager.num_level)
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306
		*entry = NULL;
}

/**
 * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
 *
 * @p: see amdgpu_pte_update_params definition
 * @entry: vm_pt entry to check
 * @parent: parent entry
 * @nptes: number of PTEs updated with this operation
 * @dst: destination address where the PTEs should point to
 * @flags: access flags fro the PTEs
 *
 * Check if we can update the PD with a huge page.
 */
1307 1308 1309 1310 1311
static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
					struct amdgpu_vm_pt *entry,
					struct amdgpu_vm_pt *parent,
					unsigned nptes, uint64_t dst,
					uint64_t flags)
1312 1313 1314 1315 1316 1317 1318
{
	bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes);
	uint64_t pd_addr, pde;

	/* In the case of a mixed PT the PDE must point to it*/
	if (p->adev->asic_type < CHIP_VEGA10 ||
	    nptes != AMDGPU_VM_PTE_COUNT(p->adev) ||
1319
	    p->src ||
1320 1321
	    !(flags & AMDGPU_PTE_VALID)) {

1322
		dst = amdgpu_bo_gpu_offset(entry->base.bo);
1323 1324 1325
		dst = amdgpu_gart_get_vm_pde(p->adev, dst);
		flags = AMDGPU_PTE_VALID;
	} else {
1326
		/* Set the huge page flag to stop scanning at this PDE */
1327 1328 1329
		flags |= AMDGPU_PDE_PTE;
	}

1330
	if (!entry->huge && !(flags & AMDGPU_PDE_PTE))
1331
		return;
1332

1333
	entry->huge = !!(flags & AMDGPU_PDE_PTE);
1334 1335

	if (use_cpu_update) {
1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347
		/* In case a huge page is replaced with a system
		 * memory mapping, p->pages_addr != NULL and
		 * amdgpu_vm_cpu_set_ptes would try to translate dst
		 * through amdgpu_vm_map_gart. But dst is already a
		 * GPU address (of the page table). Disable
		 * amdgpu_vm_map_gart temporarily.
		 */
		dma_addr_t *tmp;

		tmp = p->pages_addr;
		p->pages_addr = NULL;

1348
		pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
1349 1350
		pde = pd_addr + (entry - parent->entries) * 8;
		amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
1351 1352

		p->pages_addr = tmp;
1353
	} else {
1354 1355
		if (parent->base.bo->shadow) {
			pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
1356 1357 1358
			pde = pd_addr + (entry - parent->entries) * 8;
			amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
		}
1359
		pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
1360 1361 1362
		pde = pd_addr + (entry - parent->entries) * 8;
		amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
	}
1363 1364
}

A
Alex Deucher 已提交
1365 1366 1367
/**
 * amdgpu_vm_update_ptes - make sure that page tables are valid
 *
1368
 * @params: see amdgpu_pte_update_params definition
A
Alex Deucher 已提交
1369 1370 1371
 * @vm: requested vm
 * @start: start of GPU address range
 * @end: end of GPU address range
1372
 * @dst: destination address to map to, the next dst inside the function
A
Alex Deucher 已提交
1373 1374
 * @flags: mapping flags
 *
1375
 * Update the page tables in the range @start - @end.
1376
 * Returns 0 for success, -EINVAL for failure.
A
Alex Deucher 已提交
1377
 */
1378
static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1379
				  uint64_t start, uint64_t end,
1380
				  uint64_t dst, uint64_t flags)
A
Alex Deucher 已提交
1381
{
1382 1383
	struct amdgpu_device *adev = params->adev;
	const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
1384

1385
	uint64_t addr, pe_start;
1386
	struct amdgpu_bo *pt;
1387
	unsigned nptes;
1388
	bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes);
A
Alex Deucher 已提交
1389 1390

	/* walk over the address space and update the page tables */
1391 1392 1393 1394 1395 1396 1397
	for (addr = start; addr < end; addr += nptes,
	     dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
		struct amdgpu_vm_pt *entry, *parent;

		amdgpu_vm_get_entry(params, addr, &entry, &parent);
		if (!entry)
			return -ENOENT;
1398

A
Alex Deucher 已提交
1399 1400 1401
		if ((addr & ~mask) == (end & ~mask))
			nptes = end - addr;
		else
1402
			nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
A
Alex Deucher 已提交
1403

1404 1405
		amdgpu_vm_handle_huge_pages(params, entry, parent,
					    nptes, dst, flags);
1406
		/* We don't need to update PTEs for huge pages */
1407
		if (entry->huge)
1408 1409
			continue;

1410
		pt = entry->base.bo;
1411
		if (use_cpu_update) {
1412
			pe_start = (unsigned long)amdgpu_bo_kptr(pt);
1413 1414 1415 1416 1417 1418 1419
		} else {
			if (pt->shadow) {
				pe_start = amdgpu_bo_gpu_offset(pt->shadow);
				pe_start += (addr & mask) * 8;
				params->func(params, pe_start, dst, nptes,
					     AMDGPU_GPU_PAGE_SIZE, flags);
			}
1420
			pe_start = amdgpu_bo_gpu_offset(pt);
1421
		}
A
Alex Deucher 已提交
1422

1423 1424 1425
		pe_start += (addr & mask) * 8;
		params->func(params, pe_start, dst, nptes,
			     AMDGPU_GPU_PAGE_SIZE, flags);
A
Alex Deucher 已提交
1426 1427
	}

1428
	return 0;
1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439
}

/*
 * amdgpu_vm_frag_ptes - add fragment information to PTEs
 *
 * @params: see amdgpu_pte_update_params definition
 * @vm: requested vm
 * @start: first PTE to handle
 * @end: last PTE to handle
 * @dst: addr those PTEs should point to
 * @flags: hw mapping flags
1440
 * Returns 0 for success, -EINVAL for failure.
1441
 */
1442
static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params	*params,
1443
				uint64_t start, uint64_t end,
1444
				uint64_t dst, uint64_t flags)
1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463
{
	/**
	 * The MC L1 TLB supports variable sized pages, based on a fragment
	 * field in the PTE. When this field is set to a non-zero value, page
	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
	 * flags are considered valid for all PTEs within the fragment range
	 * and corresponding mappings are assumed to be physically contiguous.
	 *
	 * The L1 TLB can store a single PTE for the whole fragment,
	 * significantly increasing the space available for translation
	 * caching. This leads to large improvements in throughput when the
	 * TLB is under pressure.
	 *
	 * The L2 TLB distributes small and large fragments into two
	 * asymmetric partitions. The large fragment cache is significantly
	 * larger. Thus, we try to use large fragments wherever possible.
	 * Userspace can support this by aligning virtual base address and
	 * allocation size to the fragment size.
	 */
1464 1465
	unsigned max_frag = params->adev->vm_manager.fragment_size;
	int r;
1466 1467

	/* system pages are non continuously */
1468
	if (params->src || !(flags & AMDGPU_PTE_VALID))
1469
		return amdgpu_vm_update_ptes(params, start, end, dst, flags);
1470

1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487
	while (start != end) {
		uint64_t frag_flags, frag_end;
		unsigned frag;

		/* This intentionally wraps around if no bit is set */
		frag = min((unsigned)ffs(start) - 1,
			   (unsigned)fls64(end - start) - 1);
		if (frag >= max_frag) {
			frag_flags = AMDGPU_PTE_FRAG(max_frag);
			frag_end = end & ~((1ULL << max_frag) - 1);
		} else {
			frag_flags = AMDGPU_PTE_FRAG(frag);
			frag_end = start + (1 << frag);
		}

		r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
					  flags | frag_flags);
1488 1489
		if (r)
			return r;
1490

1491 1492
		dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
		start = frag_end;
1493
	}
1494 1495

	return 0;
A
Alex Deucher 已提交
1496 1497 1498 1499 1500 1501
}

/**
 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
 *
 * @adev: amdgpu_device pointer
1502
 * @exclusive: fence we need to sync to
1503
 * @pages_addr: DMA addresses to use for mapping
A
Alex Deucher 已提交
1504
 * @vm: requested vm
1505 1506 1507
 * @start: start of mapped range
 * @last: last mapped entry
 * @flags: flags for the entries
A
Alex Deucher 已提交
1508 1509 1510
 * @addr: addr to set the area to
 * @fence: optional resulting fence
 *
1511
 * Fill in the page table entries between @start and @last.
A
Alex Deucher 已提交
1512 1513 1514
 * Returns 0 for success, -EINVAL for failure.
 */
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1515
				       struct dma_fence *exclusive,
1516
				       dma_addr_t *pages_addr,
A
Alex Deucher 已提交
1517
				       struct amdgpu_vm *vm,
1518
				       uint64_t start, uint64_t last,
1519
				       uint64_t flags, uint64_t addr,
1520
				       struct dma_fence **fence)
A
Alex Deucher 已提交
1521
{
1522
	struct amdgpu_ring *ring;
1523
	void *owner = AMDGPU_FENCE_OWNER_VM;
A
Alex Deucher 已提交
1524
	unsigned nptes, ncmds, ndw;
1525
	struct amdgpu_job *job;
1526
	struct amdgpu_pte_update_params params;
1527
	struct dma_fence *f = NULL;
A
Alex Deucher 已提交
1528 1529
	int r;

1530 1531
	memset(&params, 0, sizeof(params));
	params.adev = adev;
1532
	params.vm = vm;
1533

1534 1535 1536 1537
	/* sync to everything on unmapping */
	if (!(flags & AMDGPU_PTE_VALID))
		owner = AMDGPU_FENCE_OWNER_UNDEFINED;

1538 1539 1540 1541 1542 1543 1544 1545
	if (vm->use_cpu_for_update) {
		/* params.src is used as flag to indicate system Memory */
		if (pages_addr)
			params.src = ~0;

		/* Wait for PT BOs to be free. PTs share the same resv. object
		 * as the root PD BO
		 */
1546
		r = amdgpu_vm_wait_pd(adev, vm, owner);
1547 1548 1549 1550 1551 1552 1553 1554 1555
		if (unlikely(r))
			return r;

		params.func = amdgpu_vm_cpu_set_ptes;
		params.pages_addr = pages_addr;
		return amdgpu_vm_frag_ptes(&params, start, last + 1,
					   addr, flags);
	}

1556
	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
1557

1558
	nptes = last - start + 1;
A
Alex Deucher 已提交
1559 1560

	/*
1561
	 * reserve space for two commands every (1 << BLOCK_SIZE)
A
Alex Deucher 已提交
1562
	 *  entries or 2k dwords (whatever is smaller)
1563 1564
         *
         * The second command is for the shadow pagetables.
A
Alex Deucher 已提交
1565
	 */
1566
	ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
A
Alex Deucher 已提交
1567 1568 1569 1570

	/* padding, etc. */
	ndw = 64;

1571 1572 1573
	/* one PDE write for each huge page */
	ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6;

1574
	if (pages_addr) {
1575
		/* copy commands needed */
1576
		ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
A
Alex Deucher 已提交
1577

1578
		/* and also PTEs */
A
Alex Deucher 已提交
1579 1580
		ndw += nptes * 2;

1581 1582
		params.func = amdgpu_vm_do_copy_ptes;

A
Alex Deucher 已提交
1583 1584
	} else {
		/* set page commands needed */
1585
		ndw += ncmds * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw;
A
Alex Deucher 已提交
1586

1587
		/* extra commands for begin/end fragments */
1588 1589
		ndw += 2 * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw
				* adev->vm_manager.fragment_size;
1590 1591

		params.func = amdgpu_vm_do_set_ptes;
A
Alex Deucher 已提交
1592 1593
	}

1594 1595
	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
	if (r)
A
Alex Deucher 已提交
1596
		return r;
1597

1598
	params.ib = &job->ibs[0];
C
Chunming Zhou 已提交
1599

1600
	if (pages_addr) {
1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613
		uint64_t *pte;
		unsigned i;

		/* Put the PTEs at the end of the IB. */
		i = ndw - nptes * 2;
		pte= (uint64_t *)&(job->ibs->ptr[i]);
		params.src = job->ibs->gpu_addr + i * 4;

		for (i = 0; i < nptes; ++i) {
			pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
						    AMDGPU_GPU_PAGE_SIZE);
			pte[i] |= flags;
		}
1614
		addr = 0;
1615 1616
	}

1617
	r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
1618 1619 1620
	if (r)
		goto error_free;

1621
	r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
1622
			     owner, false);
1623 1624
	if (r)
		goto error_free;
A
Alex Deucher 已提交
1625

1626
	r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
1627 1628 1629
	if (r)
		goto error_free;

1630 1631 1632
	r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
	if (r)
		goto error_free;
A
Alex Deucher 已提交
1633

1634 1635
	amdgpu_ring_pad_ib(ring, params.ib);
	WARN_ON(params.ib->length_dw > ndw);
1636 1637
	r = amdgpu_job_submit(job, ring, &vm->entity,
			      AMDGPU_FENCE_OWNER_VM, &f);
1638 1639
	if (r)
		goto error_free;
A
Alex Deucher 已提交
1640

1641
	amdgpu_bo_fence(vm->root.base.bo, f, true);
1642 1643
	dma_fence_put(*fence);
	*fence = f;
A
Alex Deucher 已提交
1644
	return 0;
C
Chunming Zhou 已提交
1645 1646

error_free:
1647
	amdgpu_job_free(job);
1648
	amdgpu_vm_invalidate_level(adev, vm, &vm->root, 0);
1649
	return r;
A
Alex Deucher 已提交
1650 1651
}

1652 1653 1654 1655
/**
 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
 *
 * @adev: amdgpu_device pointer
1656
 * @exclusive: fence we need to sync to
1657
 * @pages_addr: DMA addresses to use for mapping
1658 1659
 * @vm: requested vm
 * @mapping: mapped range and flags to use for the update
1660
 * @flags: HW flags for the mapping
1661
 * @nodes: array of drm_mm_nodes with the MC addresses
1662 1663 1664 1665 1666 1667 1668
 * @fence: optional resulting fence
 *
 * Split the mapping into smaller chunks so that each update fits
 * into a SDMA IB.
 * Returns 0 for success, -EINVAL for failure.
 */
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1669
				      struct dma_fence *exclusive,
1670
				      dma_addr_t *pages_addr,
1671 1672
				      struct amdgpu_vm *vm,
				      struct amdgpu_bo_va_mapping *mapping,
1673
				      uint64_t flags,
1674
				      struct drm_mm_node *nodes,
1675
				      struct dma_fence **fence)
1676
{
1677
	unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1678
	uint64_t pfn, start = mapping->start;
1679 1680 1681 1682 1683 1684 1685 1686 1687 1688
	int r;

	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
	 * but in case of something, we filter the flags in first place
	 */
	if (!(mapping->flags & AMDGPU_PTE_READABLE))
		flags &= ~AMDGPU_PTE_READABLE;
	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
		flags &= ~AMDGPU_PTE_WRITEABLE;

1689 1690 1691
	flags &= ~AMDGPU_PTE_EXECUTABLE;
	flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;

1692 1693 1694
	flags &= ~AMDGPU_PTE_MTYPE_MASK;
	flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);

1695 1696 1697 1698 1699 1700
	if ((mapping->flags & AMDGPU_PTE_PRT) &&
	    (adev->asic_type >= CHIP_VEGA10)) {
		flags |= AMDGPU_PTE_PRT;
		flags &= ~AMDGPU_PTE_VALID;
	}

1701 1702
	trace_amdgpu_vm_bo_update(mapping);

1703 1704 1705 1706 1707 1708
	pfn = mapping->offset >> PAGE_SHIFT;
	if (nodes) {
		while (pfn >= nodes->size) {
			pfn -= nodes->size;
			++nodes;
		}
1709
	}
1710

1711
	do {
1712
		dma_addr_t *dma_addr = NULL;
1713 1714
		uint64_t max_entries;
		uint64_t addr, last;
1715

1716 1717 1718 1719 1720 1721 1722 1723
		if (nodes) {
			addr = nodes->start << PAGE_SHIFT;
			max_entries = (nodes->size - pfn) *
				(PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
		} else {
			addr = 0;
			max_entries = S64_MAX;
		}
1724

1725
		if (pages_addr) {
1726 1727
			uint64_t count;

1728
			max_entries = min(max_entries, 16ull * 1024ull);
1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744
			for (count = 1; count < max_entries; ++count) {
				uint64_t idx = pfn + count;

				if (pages_addr[idx] !=
				    (pages_addr[idx - 1] + PAGE_SIZE))
					break;
			}

			if (count < min_linear_pages) {
				addr = pfn << PAGE_SHIFT;
				dma_addr = pages_addr;
			} else {
				addr = pages_addr[pfn];
				max_entries = count;
			}

1745 1746
		} else if (flags & AMDGPU_PTE_VALID) {
			addr += adev->vm_manager.vram_base_offset;
1747
			addr += pfn << PAGE_SHIFT;
1748 1749
		}

1750
		last = min((uint64_t)mapping->last, start + max_entries - 1);
1751
		r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
1752 1753 1754 1755 1756
						start, last, flags, addr,
						fence);
		if (r)
			return r;

1757 1758 1759 1760 1761
		pfn += last - start + 1;
		if (nodes && nodes->size == pfn) {
			pfn = 0;
			++nodes;
		}
1762
		start = last + 1;
1763

1764
	} while (unlikely(start != mapping->last + 1));
1765 1766 1767 1768

	return 0;
}

A
Alex Deucher 已提交
1769 1770 1771 1772 1773
/**
 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
 *
 * @adev: amdgpu_device pointer
 * @bo_va: requested BO and VM object
1774
 * @clear: if true clear the entries
A
Alex Deucher 已提交
1775 1776 1777 1778 1779 1780
 *
 * Fill in the page table entries for @bo_va.
 * Returns 0 for success, -EINVAL for failure.
 */
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
			struct amdgpu_bo_va *bo_va,
1781
			bool clear)
A
Alex Deucher 已提交
1782
{
1783 1784
	struct amdgpu_bo *bo = bo_va->base.bo;
	struct amdgpu_vm *vm = bo_va->base.vm;
A
Alex Deucher 已提交
1785
	struct amdgpu_bo_va_mapping *mapping;
1786
	dma_addr_t *pages_addr = NULL;
1787
	struct ttm_mem_reg *mem;
1788
	struct drm_mm_node *nodes;
1789
	struct dma_fence *exclusive, **last_update;
1790
	uint64_t flags;
A
Alex Deucher 已提交
1791 1792
	int r;

1793
	if (clear || !bo_va->base.bo) {
1794
		mem = NULL;
1795
		nodes = NULL;
1796 1797
		exclusive = NULL;
	} else {
1798 1799
		struct ttm_dma_tt *ttm;

1800
		mem = &bo_va->base.bo->tbo.mem;
1801 1802
		nodes = mem->mm_node;
		if (mem->mem_type == TTM_PL_TT) {
1803 1804
			ttm = container_of(bo_va->base.bo->tbo.ttm,
					   struct ttm_dma_tt, ttm);
1805
			pages_addr = ttm->dma_address;
1806
		}
1807
		exclusive = reservation_object_get_excl(bo->tbo.resv);
A
Alex Deucher 已提交
1808 1809
	}

1810
	if (bo)
1811
		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1812
	else
1813
		flags = 0x0;
A
Alex Deucher 已提交
1814

1815 1816 1817 1818 1819
	if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
		last_update = &vm->last_update;
	else
		last_update = &bo_va->last_pt_update;

1820 1821
	if (!clear && bo_va->base.moved) {
		bo_va->base.moved = false;
1822
		list_splice_init(&bo_va->valids, &bo_va->invalids);
1823

1824 1825
	} else if (bo_va->cleared != clear) {
		list_splice_init(&bo_va->valids, &bo_va->invalids);
1826
	}
1827 1828

	list_for_each_entry(mapping, &bo_va->invalids, list) {
1829
		r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
1830
					       mapping, flags, nodes,
1831
					       last_update);
A
Alex Deucher 已提交
1832 1833 1834 1835
		if (r)
			return r;
	}

1836 1837 1838 1839
	if (vm->use_cpu_for_update) {
		/* Flush HDP */
		mb();
		amdgpu_gart_flush_gpu_tlb(adev, 0);
1840 1841
	}

A
Alex Deucher 已提交
1842
	spin_lock(&vm->status_lock);
1843
	list_del_init(&bo_va->base.vm_status);
A
Alex Deucher 已提交
1844 1845
	spin_unlock(&vm->status_lock);

1846 1847 1848 1849 1850 1851
	list_splice_init(&bo_va->invalids, &bo_va->valids);
	bo_va->cleared = clear;

	if (trace_amdgpu_vm_bo_mapping_enabled()) {
		list_for_each_entry(mapping, &bo_va->valids, list)
			trace_amdgpu_vm_bo_mapping(mapping);
1852 1853
	}

A
Alex Deucher 已提交
1854 1855 1856
	return 0;
}

1857 1858 1859 1860 1861 1862 1863 1864 1865
/**
 * amdgpu_vm_update_prt_state - update the global PRT state
 */
static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
{
	unsigned long flags;
	bool enable;

	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1866
	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1867 1868 1869 1870
	adev->gart.gart_funcs->set_prt(adev, enable);
	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
}

1871
/**
1872
 * amdgpu_vm_prt_get - add a PRT user
1873 1874 1875
 */
static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
{
1876 1877 1878
	if (!adev->gart.gart_funcs->set_prt)
		return;

1879 1880 1881 1882
	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
		amdgpu_vm_update_prt_state(adev);
}

1883 1884 1885 1886 1887
/**
 * amdgpu_vm_prt_put - drop a PRT user
 */
static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
{
1888
	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1889 1890 1891
		amdgpu_vm_update_prt_state(adev);
}

1892
/**
1893
 * amdgpu_vm_prt_cb - callback for updating the PRT status
1894 1895 1896 1897 1898
 */
static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
{
	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);

1899
	amdgpu_vm_prt_put(cb->adev);
1900 1901 1902
	kfree(cb);
}

1903 1904 1905 1906 1907 1908
/**
 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
 */
static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
				 struct dma_fence *fence)
{
1909
	struct amdgpu_prt_cb *cb;
1910

1911 1912 1913 1914
	if (!adev->gart.gart_funcs->set_prt)
		return;

	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1915 1916 1917 1918 1919
	if (!cb) {
		/* Last resort when we are OOM */
		if (fence)
			dma_fence_wait(fence, false);

1920
		amdgpu_vm_prt_put(adev);
1921 1922 1923 1924 1925 1926 1927 1928
	} else {
		cb->adev = adev;
		if (!fence || dma_fence_add_callback(fence, &cb->cb,
						     amdgpu_vm_prt_cb))
			amdgpu_vm_prt_cb(fence, &cb->cb);
	}
}

1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943
/**
 * amdgpu_vm_free_mapping - free a mapping
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 * @mapping: mapping to be freed
 * @fence: fence of the unmap operation
 *
 * Free a mapping and make sure we decrease the PRT usage count if applicable.
 */
static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
				   struct amdgpu_vm *vm,
				   struct amdgpu_bo_va_mapping *mapping,
				   struct dma_fence *fence)
{
1944 1945 1946 1947
	if (mapping->flags & AMDGPU_PTE_PRT)
		amdgpu_vm_add_prt_cb(adev, fence);
	kfree(mapping);
}
1948

1949 1950 1951 1952 1953 1954 1955 1956 1957 1958
/**
 * amdgpu_vm_prt_fini - finish all prt mappings
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 *
 * Register a cleanup callback to disable PRT support after VM dies.
 */
static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
1959
	struct reservation_object *resv = vm->root.base.bo->tbo.resv;
1960 1961 1962
	struct dma_fence *excl, **shared;
	unsigned i, shared_count;
	int r;
1963

1964 1965 1966 1967 1968 1969 1970 1971 1972
	r = reservation_object_get_fences_rcu(resv, &excl,
					      &shared_count, &shared);
	if (r) {
		/* Not enough memory to grab the fence list, as last resort
		 * block for all the fences to complete.
		 */
		reservation_object_wait_timeout_rcu(resv, true, false,
						    MAX_SCHEDULE_TIMEOUT);
		return;
1973
	}
1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984

	/* Add a callback for each fence in the reservation object */
	amdgpu_vm_prt_get(adev);
	amdgpu_vm_add_prt_cb(adev, excl);

	for (i = 0; i < shared_count; ++i) {
		amdgpu_vm_prt_get(adev);
		amdgpu_vm_add_prt_cb(adev, shared[i]);
	}

	kfree(shared);
1985 1986
}

A
Alex Deucher 已提交
1987 1988 1989 1990 1991
/**
 * amdgpu_vm_clear_freed - clear freed BOs in the PT
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
1992 1993
 * @fence: optional resulting fence (unchanged if no work needed to be done
 * or if an error occurred)
A
Alex Deucher 已提交
1994 1995 1996 1997 1998 1999 2000
 *
 * Make sure all freed BOs are cleared in the PT.
 * Returns 0 for success.
 *
 * PTs have to be reserved and mutex must be locked!
 */
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
2001 2002
			  struct amdgpu_vm *vm,
			  struct dma_fence **fence)
A
Alex Deucher 已提交
2003 2004
{
	struct amdgpu_bo_va_mapping *mapping;
2005
	struct dma_fence *f = NULL;
A
Alex Deucher 已提交
2006
	int r;
Y
Yong Zhao 已提交
2007
	uint64_t init_pte_value = 0;
A
Alex Deucher 已提交
2008 2009 2010 2011 2012

	while (!list_empty(&vm->freed)) {
		mapping = list_first_entry(&vm->freed,
			struct amdgpu_bo_va_mapping, list);
		list_del(&mapping->list);
2013

Y
Yong Zhao 已提交
2014
		if (vm->pte_support_ats)
2015
			init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
Y
Yong Zhao 已提交
2016

2017
		r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
2018
						mapping->start, mapping->last,
Y
Yong Zhao 已提交
2019
						init_pte_value, 0, &f);
2020
		amdgpu_vm_free_mapping(adev, vm, mapping, f);
2021
		if (r) {
2022
			dma_fence_put(f);
A
Alex Deucher 已提交
2023
			return r;
2024
		}
2025
	}
A
Alex Deucher 已提交
2026

2027 2028 2029 2030 2031
	if (fence && f) {
		dma_fence_put(*fence);
		*fence = f;
	} else {
		dma_fence_put(f);
A
Alex Deucher 已提交
2032
	}
2033

A
Alex Deucher 已提交
2034 2035 2036 2037 2038
	return 0;

}

/**
2039
 * amdgpu_vm_handle_moved - handle moved BOs in the PT
A
Alex Deucher 已提交
2040 2041 2042
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
2043
 * @sync: sync object to add fences to
A
Alex Deucher 已提交
2044
 *
2045
 * Make sure all BOs which are moved are updated in the PTs.
A
Alex Deucher 已提交
2046 2047
 * Returns 0 for success.
 *
2048
 * PTs have to be reserved!
A
Alex Deucher 已提交
2049
 */
2050
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
2051
			   struct amdgpu_vm *vm)
A
Alex Deucher 已提交
2052
{
2053
	bool clear;
2054
	int r = 0;
A
Alex Deucher 已提交
2055 2056

	spin_lock(&vm->status_lock);
2057
	while (!list_empty(&vm->moved)) {
2058 2059
		struct amdgpu_bo_va *bo_va;

2060
		bo_va = list_first_entry(&vm->moved,
2061
			struct amdgpu_bo_va, base.vm_status);
A
Alex Deucher 已提交
2062
		spin_unlock(&vm->status_lock);
2063

2064 2065 2066 2067
		/* Per VM BOs never need to bo cleared in the page tables */
		clear = bo_va->base.bo->tbo.resv != vm->root.base.bo->tbo.resv;

		r = amdgpu_vm_bo_update(adev, bo_va, clear);
A
Alex Deucher 已提交
2068 2069 2070 2071 2072 2073 2074
		if (r)
			return r;

		spin_lock(&vm->status_lock);
	}
	spin_unlock(&vm->status_lock);

2075
	return r;
A
Alex Deucher 已提交
2076 2077 2078 2079 2080 2081 2082 2083 2084
}

/**
 * amdgpu_vm_bo_add - add a bo to a specific vm
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 * @bo: amdgpu buffer object
 *
2085
 * Add @bo into the requested vm.
A
Alex Deucher 已提交
2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100
 * Add @bo to the list of bos associated with the vm
 * Returns newly added bo_va or NULL for failure
 *
 * Object has to be reserved!
 */
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
				      struct amdgpu_vm *vm,
				      struct amdgpu_bo *bo)
{
	struct amdgpu_bo_va *bo_va;

	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
	if (bo_va == NULL) {
		return NULL;
	}
2101 2102 2103 2104 2105
	bo_va->base.vm = vm;
	bo_va->base.bo = bo;
	INIT_LIST_HEAD(&bo_va->base.bo_list);
	INIT_LIST_HEAD(&bo_va->base.vm_status);

A
Alex Deucher 已提交
2106
	bo_va->ref_count = 1;
2107 2108
	INIT_LIST_HEAD(&bo_va->valids);
	INIT_LIST_HEAD(&bo_va->invalids);
2109

2110
	if (bo)
2111
		list_add_tail(&bo_va->base.bo_list, &bo->va);
A
Alex Deucher 已提交
2112 2113 2114 2115

	return bo_va;
}

2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132

/**
 * amdgpu_vm_bo_insert_mapping - insert a new mapping
 *
 * @adev: amdgpu_device pointer
 * @bo_va: bo_va to store the address
 * @mapping: the mapping to insert
 *
 * Insert a new mapping into all structures.
 */
static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
				    struct amdgpu_bo_va *bo_va,
				    struct amdgpu_bo_va_mapping *mapping)
{
	struct amdgpu_vm *vm = bo_va->base.vm;
	struct amdgpu_bo *bo = bo_va->base.bo;

2133
	mapping->bo_va = bo_va;
2134 2135 2136 2137 2138 2139 2140 2141
	list_add(&mapping->list, &bo_va->invalids);
	amdgpu_vm_it_insert(mapping, &vm->va);

	if (mapping->flags & AMDGPU_PTE_PRT)
		amdgpu_vm_prt_get(adev);

	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
		spin_lock(&vm->status_lock);
2142 2143
		if (list_empty(&bo_va->base.vm_status))
			list_add(&bo_va->base.vm_status, &vm->moved);
2144 2145 2146 2147 2148
		spin_unlock(&vm->status_lock);
	}
	trace_amdgpu_vm_bo_map(bo_va, mapping);
}

A
Alex Deucher 已提交
2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160
/**
 * amdgpu_vm_bo_map - map bo inside a vm
 *
 * @adev: amdgpu_device pointer
 * @bo_va: bo_va to store the address
 * @saddr: where to map the BO
 * @offset: requested offset in the BO
 * @flags: attributes of pages (read/write/valid/etc.)
 *
 * Add a mapping of the BO at the specefied addr into the VM.
 * Returns 0 for success, error for failure.
 *
2161
 * Object has to be reserved and unreserved outside!
A
Alex Deucher 已提交
2162 2163 2164 2165
 */
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
		     struct amdgpu_bo_va *bo_va,
		     uint64_t saddr, uint64_t offset,
2166
		     uint64_t size, uint64_t flags)
A
Alex Deucher 已提交
2167
{
2168
	struct amdgpu_bo_va_mapping *mapping, *tmp;
2169 2170
	struct amdgpu_bo *bo = bo_va->base.bo;
	struct amdgpu_vm *vm = bo_va->base.vm;
A
Alex Deucher 已提交
2171 2172
	uint64_t eaddr;

2173 2174
	/* validate the parameters */
	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2175
	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2176 2177
		return -EINVAL;

A
Alex Deucher 已提交
2178
	/* make sure object fit at this offset */
2179
	eaddr = saddr + size - 1;
2180
	if (saddr >= eaddr ||
2181
	    (bo && offset + size > amdgpu_bo_size(bo)))
A
Alex Deucher 已提交
2182 2183 2184 2185 2186
		return -EINVAL;

	saddr /= AMDGPU_GPU_PAGE_SIZE;
	eaddr /= AMDGPU_GPU_PAGE_SIZE;

2187 2188
	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
	if (tmp) {
A
Alex Deucher 已提交
2189 2190
		/* bo and tmp overlap, invalid addr */
		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2191
			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2192
			tmp->start, tmp->last + 1);
2193
		return -EINVAL;
A
Alex Deucher 已提交
2194 2195 2196
	}

	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2197 2198
	if (!mapping)
		return -ENOMEM;
A
Alex Deucher 已提交
2199

2200 2201
	mapping->start = saddr;
	mapping->last = eaddr;
A
Alex Deucher 已提交
2202 2203 2204
	mapping->offset = offset;
	mapping->flags = flags;

2205
	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230

	return 0;
}

/**
 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
 *
 * @adev: amdgpu_device pointer
 * @bo_va: bo_va to store the address
 * @saddr: where to map the BO
 * @offset: requested offset in the BO
 * @flags: attributes of pages (read/write/valid/etc.)
 *
 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
 * mappings as we do so.
 * Returns 0 for success, error for failure.
 *
 * Object has to be reserved and unreserved outside!
 */
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
			     struct amdgpu_bo_va *bo_va,
			     uint64_t saddr, uint64_t offset,
			     uint64_t size, uint64_t flags)
{
	struct amdgpu_bo_va_mapping *mapping;
2231
	struct amdgpu_bo *bo = bo_va->base.bo;
2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242
	uint64_t eaddr;
	int r;

	/* validate the parameters */
	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
		return -EINVAL;

	/* make sure object fit at this offset */
	eaddr = saddr + size - 1;
	if (saddr >= eaddr ||
2243
	    (bo && offset + size > amdgpu_bo_size(bo)))
2244 2245 2246 2247 2248 2249 2250
		return -EINVAL;

	/* Allocate all the needed memory */
	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
	if (!mapping)
		return -ENOMEM;

2251
	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2252 2253 2254 2255 2256 2257 2258 2259
	if (r) {
		kfree(mapping);
		return r;
	}

	saddr /= AMDGPU_GPU_PAGE_SIZE;
	eaddr /= AMDGPU_GPU_PAGE_SIZE;

2260 2261
	mapping->start = saddr;
	mapping->last = eaddr;
2262 2263 2264
	mapping->offset = offset;
	mapping->flags = flags;

2265
	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2266

A
Alex Deucher 已提交
2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279
	return 0;
}

/**
 * amdgpu_vm_bo_unmap - remove bo mapping from vm
 *
 * @adev: amdgpu_device pointer
 * @bo_va: bo_va to remove the address from
 * @saddr: where to the BO is mapped
 *
 * Remove a mapping of the BO at the specefied addr from the VM.
 * Returns 0 for success, error for failure.
 *
2280
 * Object has to be reserved and unreserved outside!
A
Alex Deucher 已提交
2281 2282 2283 2284 2285 2286
 */
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
		       struct amdgpu_bo_va *bo_va,
		       uint64_t saddr)
{
	struct amdgpu_bo_va_mapping *mapping;
2287
	struct amdgpu_vm *vm = bo_va->base.vm;
2288
	bool valid = true;
A
Alex Deucher 已提交
2289

2290
	saddr /= AMDGPU_GPU_PAGE_SIZE;
2291

2292
	list_for_each_entry(mapping, &bo_va->valids, list) {
2293
		if (mapping->start == saddr)
A
Alex Deucher 已提交
2294 2295 2296
			break;
	}

2297 2298 2299 2300
	if (&mapping->list == &bo_va->valids) {
		valid = false;

		list_for_each_entry(mapping, &bo_va->invalids, list) {
2301
			if (mapping->start == saddr)
2302 2303 2304
				break;
		}

2305
		if (&mapping->list == &bo_va->invalids)
2306
			return -ENOENT;
A
Alex Deucher 已提交
2307
	}
2308

A
Alex Deucher 已提交
2309
	list_del(&mapping->list);
2310
	amdgpu_vm_it_remove(mapping, &vm->va);
2311
	mapping->bo_va = NULL;
2312
	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
A
Alex Deucher 已提交
2313

2314
	if (valid)
A
Alex Deucher 已提交
2315
		list_add(&mapping->list, &vm->freed);
2316
	else
2317 2318
		amdgpu_vm_free_mapping(adev, vm, mapping,
				       bo_va->last_pt_update);
A
Alex Deucher 已提交
2319 2320 2321 2322

	return 0;
}

2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349
/**
 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
 *
 * @adev: amdgpu_device pointer
 * @vm: VM structure to use
 * @saddr: start of the range
 * @size: size of the range
 *
 * Remove all mappings in a range, split them as appropriate.
 * Returns 0 for success, error for failure.
 */
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
				struct amdgpu_vm *vm,
				uint64_t saddr, uint64_t size)
{
	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
	LIST_HEAD(removed);
	uint64_t eaddr;

	eaddr = saddr + size - 1;
	saddr /= AMDGPU_GPU_PAGE_SIZE;
	eaddr /= AMDGPU_GPU_PAGE_SIZE;

	/* Allocate all the needed memory */
	before = kzalloc(sizeof(*before), GFP_KERNEL);
	if (!before)
		return -ENOMEM;
2350
	INIT_LIST_HEAD(&before->list);
2351 2352 2353 2354 2355 2356

	after = kzalloc(sizeof(*after), GFP_KERNEL);
	if (!after) {
		kfree(before);
		return -ENOMEM;
	}
2357
	INIT_LIST_HEAD(&after->list);
2358 2359

	/* Now gather all removed mappings */
2360 2361
	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
	while (tmp) {
2362
		/* Remember mapping split at the start */
2363 2364 2365
		if (tmp->start < saddr) {
			before->start = tmp->start;
			before->last = saddr - 1;
2366 2367 2368 2369 2370 2371
			before->offset = tmp->offset;
			before->flags = tmp->flags;
			list_add(&before->list, &tmp->list);
		}

		/* Remember mapping split at the end */
2372 2373 2374
		if (tmp->last > eaddr) {
			after->start = eaddr + 1;
			after->last = tmp->last;
2375
			after->offset = tmp->offset;
2376
			after->offset += after->start - tmp->start;
2377 2378 2379 2380 2381 2382
			after->flags = tmp->flags;
			list_add(&after->list, &tmp->list);
		}

		list_del(&tmp->list);
		list_add(&tmp->list, &removed);
2383 2384

		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2385 2386 2387 2388
	}

	/* And free them up */
	list_for_each_entry_safe(tmp, next, &removed, list) {
2389
		amdgpu_vm_it_remove(tmp, &vm->va);
2390 2391
		list_del(&tmp->list);

2392 2393 2394 2395
		if (tmp->start < saddr)
		    tmp->start = saddr;
		if (tmp->last > eaddr)
		    tmp->last = eaddr;
2396

2397
		tmp->bo_va = NULL;
2398 2399 2400 2401
		list_add(&tmp->list, &vm->freed);
		trace_amdgpu_vm_bo_unmap(NULL, tmp);
	}

2402 2403
	/* Insert partial mapping before the range */
	if (!list_empty(&before->list)) {
2404
		amdgpu_vm_it_insert(before, &vm->va);
2405 2406 2407 2408 2409 2410 2411
		if (before->flags & AMDGPU_PTE_PRT)
			amdgpu_vm_prt_get(adev);
	} else {
		kfree(before);
	}

	/* Insert partial mapping after the range */
2412
	if (!list_empty(&after->list)) {
2413
		amdgpu_vm_it_insert(after, &vm->va);
2414 2415 2416 2417 2418 2419 2420 2421 2422
		if (after->flags & AMDGPU_PTE_PRT)
			amdgpu_vm_prt_get(adev);
	} else {
		kfree(after);
	}

	return 0;
}

2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435
/**
 * amdgpu_vm_bo_lookup_mapping - find mapping by address
 *
 * @vm: the requested VM
 *
 * Find a mapping by it's address.
 */
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
							 uint64_t addr)
{
	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
}

A
Alex Deucher 已提交
2436 2437 2438 2439 2440 2441
/**
 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
 *
 * @adev: amdgpu_device pointer
 * @bo_va: requested bo_va
 *
2442
 * Remove @bo_va->bo from the requested vm.
A
Alex Deucher 已提交
2443 2444 2445 2446 2447 2448 2449
 *
 * Object have to be reserved!
 */
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
		      struct amdgpu_bo_va *bo_va)
{
	struct amdgpu_bo_va_mapping *mapping, *next;
2450
	struct amdgpu_vm *vm = bo_va->base.vm;
A
Alex Deucher 已提交
2451

2452
	list_del(&bo_va->base.bo_list);
A
Alex Deucher 已提交
2453 2454

	spin_lock(&vm->status_lock);
2455
	list_del(&bo_va->base.vm_status);
A
Alex Deucher 已提交
2456 2457
	spin_unlock(&vm->status_lock);

2458
	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
A
Alex Deucher 已提交
2459
		list_del(&mapping->list);
2460
		amdgpu_vm_it_remove(mapping, &vm->va);
2461
		mapping->bo_va = NULL;
2462
		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2463 2464 2465 2466
		list_add(&mapping->list, &vm->freed);
	}
	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
		list_del(&mapping->list);
2467
		amdgpu_vm_it_remove(mapping, &vm->va);
2468 2469
		amdgpu_vm_free_mapping(adev, vm, mapping,
				       bo_va->last_pt_update);
A
Alex Deucher 已提交
2470
	}
2471

2472
	dma_fence_put(bo_va->last_pt_update);
A
Alex Deucher 已提交
2473 2474 2475 2476 2477 2478 2479 2480 2481 2482
	kfree(bo_va);
}

/**
 * amdgpu_vm_bo_invalidate - mark the bo as invalid
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 * @bo: amdgpu buffer object
 *
2483
 * Mark @bo as invalid.
A
Alex Deucher 已提交
2484 2485
 */
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2486
			     struct amdgpu_bo *bo, bool evicted)
A
Alex Deucher 已提交
2487
{
2488 2489 2490
	struct amdgpu_vm_bo_base *bo_base;

	list_for_each_entry(bo_base, &bo->va, bo_list) {
2491 2492
		struct amdgpu_vm *vm = bo_base->vm;

2493
		bo_base->moved = true;
2494 2495
		if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
			spin_lock(&bo_base->vm->status_lock);
2496 2497 2498 2499 2500
			if (bo->tbo.type == ttm_bo_type_kernel)
				list_move(&bo_base->vm_status, &vm->evicted);
			else
				list_move_tail(&bo_base->vm_status,
					       &vm->evicted);
2501 2502 2503 2504
			spin_unlock(&bo_base->vm->status_lock);
			continue;
		}

2505 2506 2507 2508 2509
		if (bo->tbo.type == ttm_bo_type_kernel) {
			spin_lock(&bo_base->vm->status_lock);
			if (list_empty(&bo_base->vm_status))
				list_add(&bo_base->vm_status, &vm->relocated);
			spin_unlock(&bo_base->vm->status_lock);
2510
			continue;
2511
		}
2512

2513 2514
		spin_lock(&bo_base->vm->status_lock);
		if (list_empty(&bo_base->vm_status))
2515
			list_add(&bo_base->vm_status, &vm->moved);
2516
		spin_unlock(&bo_base->vm->status_lock);
A
Alex Deucher 已提交
2517 2518 2519
	}
}

2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532
static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
{
	/* Total bits covered by PD + PTs */
	unsigned bits = ilog2(vm_size) + 18;

	/* Make sure the PD is 4K in size up to 8GB address space.
	   Above that split equal between PD and PTs */
	if (vm_size <= 8)
		return (bits - 9);
	else
		return ((bits + 3) / 2);
}

2533 2534
/**
 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2535 2536 2537 2538
 *
 * @adev: amdgpu_device pointer
 * @vm_size: the default vm size if it's set auto
 */
2539
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
2540 2541
			   uint32_t fragment_size_default, unsigned max_level,
			   unsigned max_bits)
2542
{
2543 2544 2545
	uint64_t tmp;

	/* adjust vm size first */
2546 2547 2548
	if (amdgpu_vm_size != -1) {
		unsigned max_size = 1 << (max_bits - 30);

2549
		vm_size = amdgpu_vm_size;
2550 2551 2552 2553 2554 2555
		if (vm_size > max_size) {
			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
				 amdgpu_vm_size, max_size);
			vm_size = max_size;
		}
	}
2556 2557

	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2558 2559

	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2560 2561
	if (amdgpu_vm_block_size != -1)
		tmp >>= amdgpu_vm_block_size - 9;
2562 2563
	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
	adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2564

2565
	/* block size depends on vm size and hw setup*/
2566
	if (amdgpu_vm_block_size != -1)
2567
		adev->vm_manager.block_size =
2568 2569 2570 2571 2572
			min((unsigned)amdgpu_vm_block_size, max_bits
			    - AMDGPU_GPU_PAGE_SHIFT
			    - 9 * adev->vm_manager.num_level);
	else if (adev->vm_manager.num_level > 1)
		adev->vm_manager.block_size = 9;
2573
	else
2574
		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2575

2576 2577 2578 2579
	if (amdgpu_vm_fragment_size == -1)
		adev->vm_manager.fragment_size = fragment_size_default;
	else
		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2580

2581 2582 2583
	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
		 vm_size, adev->vm_manager.num_level + 1,
		 adev->vm_manager.block_size,
2584
		 adev->vm_manager.fragment_size);
2585 2586
}

A
Alex Deucher 已提交
2587 2588 2589 2590 2591
/**
 * amdgpu_vm_init - initialize a vm instance
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
2592
 * @vm_context: Indicates if it GFX or Compute context
A
Alex Deucher 已提交
2593
 *
2594
 * Init @vm fields.
A
Alex Deucher 已提交
2595
 */
2596
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2597
		   int vm_context, unsigned int pasid)
A
Alex Deucher 已提交
2598 2599
{
	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2600
		AMDGPU_VM_PTE_COUNT(adev) * 8);
2601 2602
	unsigned ring_instance;
	struct amdgpu_ring *ring;
2603
	struct drm_sched_rq *rq;
2604
	int r, i;
2605
	u64 flags;
Y
Yong Zhao 已提交
2606
	uint64_t init_pde_value = 0;
A
Alex Deucher 已提交
2607

2608
	vm->va = RB_ROOT_CACHED;
2609
	vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
2610 2611
	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
		vm->reserved_vmid[i] = NULL;
A
Alex Deucher 已提交
2612
	spin_lock_init(&vm->status_lock);
2613
	INIT_LIST_HEAD(&vm->evicted);
2614
	INIT_LIST_HEAD(&vm->relocated);
2615
	INIT_LIST_HEAD(&vm->moved);
A
Alex Deucher 已提交
2616
	INIT_LIST_HEAD(&vm->freed);
2617

2618
	/* create scheduler entity for page table updates */
2619 2620 2621 2622

	ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
	ring_instance %= adev->vm_manager.vm_pte_num_rings;
	ring = adev->vm_manager.vm_pte_rings[ring_instance];
2623 2624
	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
	r = drm_sched_entity_init(&ring->sched, &vm->entity,
2625
				  rq, amdgpu_sched_jobs, NULL);
2626
	if (r)
2627
		return r;
2628

Y
Yong Zhao 已提交
2629 2630 2631
	vm->pte_support_ats = false;

	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2632 2633
		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
						AMDGPU_VM_USE_CPU_FOR_COMPUTE);
Y
Yong Zhao 已提交
2634 2635 2636

		if (adev->asic_type == CHIP_RAVEN) {
			vm->pte_support_ats = true;
2637 2638 2639
			init_pde_value = AMDGPU_PTE_DEFAULT_ATC
					| AMDGPU_PDE_PTE;

Y
Yong Zhao 已提交
2640 2641
		}
	} else
2642 2643 2644 2645 2646 2647
		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
						AMDGPU_VM_USE_CPU_FOR_GFX);
	DRM_DEBUG_DRIVER("VM update mode is %s\n",
			 vm->use_cpu_for_update ? "CPU" : "SDMA");
	WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
		  "CPU update of VM recommended only for large BAR system\n");
2648
	vm->last_update = NULL;
2649

2650 2651 2652 2653 2654 2655 2656 2657
	flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
			AMDGPU_GEM_CREATE_VRAM_CLEARED;
	if (vm->use_cpu_for_update)
		flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
	else
		flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
				AMDGPU_GEM_CREATE_SHADOW);

2658
	r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
2659
			     AMDGPU_GEM_DOMAIN_VRAM,
2660
			     flags,
2661
			     NULL, NULL, init_pde_value, &vm->root.base.bo);
A
Alex Deucher 已提交
2662
	if (r)
2663 2664
		goto error_free_sched_entity;

2665 2666 2667
	vm->root.base.vm = vm;
	list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
	INIT_LIST_HEAD(&vm->root.base.vm_status);
2668 2669

	if (vm->use_cpu_for_update) {
2670
		r = amdgpu_bo_reserve(vm->root.base.bo, false);
2671 2672 2673
		if (r)
			goto error_free_root;

2674
		r = amdgpu_bo_kmap(vm->root.base.bo, NULL);
2675
		amdgpu_bo_unreserve(vm->root.base.bo);
2676 2677 2678
		if (r)
			goto error_free_root;
	}
A
Alex Deucher 已提交
2679

2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690
	if (pasid) {
		unsigned long flags;

		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
		r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
			      GFP_ATOMIC);
		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
		if (r < 0)
			goto error_free_root;

		vm->pasid = pasid;
2691 2692
	}

2693
	INIT_KFIFO(vm->faults);
2694
	vm->fault_credit = 16;
A
Alex Deucher 已提交
2695 2696

	return 0;
2697

2698
error_free_root:
2699 2700 2701
	amdgpu_bo_unref(&vm->root.base.bo->shadow);
	amdgpu_bo_unref(&vm->root.base.bo);
	vm->root.base.bo = NULL;
2702 2703

error_free_sched_entity:
2704
	drm_sched_entity_fini(&ring->sched, &vm->entity);
2705 2706

	return r;
A
Alex Deucher 已提交
2707 2708
}

2709 2710 2711
/**
 * amdgpu_vm_free_levels - free PD/PT levels
 *
2712 2713 2714
 * @adev: amdgpu device structure
 * @parent: PD/PT starting level to free
 * @level: level of parent structure
2715 2716 2717
 *
 * Free the page directory or page table level and all sub levels.
 */
2718 2719 2720
static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
				  struct amdgpu_vm_pt *parent,
				  unsigned level)
2721
{
2722
	unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
2723

2724 2725 2726 2727 2728
	if (parent->base.bo) {
		list_del(&parent->base.bo_list);
		list_del(&parent->base.vm_status);
		amdgpu_bo_unref(&parent->base.bo->shadow);
		amdgpu_bo_unref(&parent->base.bo);
2729 2730
	}

2731 2732 2733 2734
	if (parent->entries)
		for (i = 0; i < num_entries; i++)
			amdgpu_vm_free_levels(adev, &parent->entries[i],
					      level + 1);
2735

2736
	kvfree(parent->entries);
2737 2738
}

A
Alex Deucher 已提交
2739 2740 2741 2742 2743 2744
/**
 * amdgpu_vm_fini - tear down a vm instance
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 *
2745
 * Tear down @vm.
A
Alex Deucher 已提交
2746 2747 2748 2749 2750
 * Unbind the VM and remove all bos from the vm bo list
 */
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
	struct amdgpu_bo_va_mapping *mapping, *tmp;
2751
	bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
2752
	struct amdgpu_bo *root;
2753
	u64 fault;
2754
	int i, r;
A
Alex Deucher 已提交
2755

2756 2757 2758 2759
	/* Clear pending page faults from IH when the VM is destroyed */
	while (kfifo_get(&vm->faults, &fault))
		amdgpu_ih_clear_fault(adev, fault);

2760 2761 2762 2763 2764 2765 2766 2767
	if (vm->pasid) {
		unsigned long flags;

		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
	}

2768
	drm_sched_entity_fini(vm->entity.sched, &vm->entity);
2769

2770
	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
A
Alex Deucher 已提交
2771 2772
		dev_err(adev->dev, "still active bo inside vm\n");
	}
2773 2774
	rbtree_postorder_for_each_entry_safe(mapping, tmp,
					     &vm->va.rb_root, rb) {
A
Alex Deucher 已提交
2775
		list_del(&mapping->list);
2776
		amdgpu_vm_it_remove(mapping, &vm->va);
A
Alex Deucher 已提交
2777 2778 2779
		kfree(mapping);
	}
	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2780
		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2781
			amdgpu_vm_prt_fini(adev, vm);
2782
			prt_fini_needed = false;
2783
		}
2784

A
Alex Deucher 已提交
2785
		list_del(&mapping->list);
2786
		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
A
Alex Deucher 已提交
2787 2788
	}

2789 2790 2791 2792 2793
	root = amdgpu_bo_ref(vm->root.base.bo);
	r = amdgpu_bo_reserve(root, true);
	if (r) {
		dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
	} else {
2794
		amdgpu_vm_free_levels(adev, &vm->root, 0);
2795 2796 2797
		amdgpu_bo_unreserve(root);
	}
	amdgpu_bo_unref(&root);
2798
	dma_fence_put(vm->last_update);
2799 2800
	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
		amdgpu_vm_free_reserved_vmid(adev, vm, i);
A
Alex Deucher 已提交
2801
}
2802

2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832
/**
 * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
 *
 * @adev: amdgpu_device pointer
 * @pasid: PASID do identify the VM
 *
 * This function is expected to be called in interrupt context. Returns
 * true if there was fault credit, false otherwise
 */
bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
				  unsigned int pasid)
{
	struct amdgpu_vm *vm;

	spin_lock(&adev->vm_manager.pasid_lock);
	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
	spin_unlock(&adev->vm_manager.pasid_lock);
	if (!vm)
		/* VM not found, can't track fault credit */
		return true;

	/* No lock needed. only accessed by IRQ handler */
	if (!vm->fault_credit)
		/* Too many faults in this VM */
		return false;

	vm->fault_credit--;
	return true;
}

2833 2834 2835 2836 2837 2838 2839 2840 2841
/**
 * amdgpu_vm_manager_init - init the VM manager
 *
 * @adev: amdgpu_device pointer
 *
 * Initialize the VM manager structures
 */
void amdgpu_vm_manager_init(struct amdgpu_device *adev)
{
2842 2843 2844 2845 2846
	unsigned i, j;

	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
		struct amdgpu_vm_id_manager *id_mgr =
			&adev->vm_manager.id_mgr[i];
2847

2848 2849
		mutex_init(&id_mgr->lock);
		INIT_LIST_HEAD(&id_mgr->ids_lru);
2850
		atomic_set(&id_mgr->reserved_vmid_num, 0);
2851

2852 2853 2854 2855 2856 2857
		/* skip over VMID 0, since it is the system VM */
		for (j = 1; j < id_mgr->num_ids; ++j) {
			amdgpu_vm_reset_id(adev, i, j);
			amdgpu_sync_create(&id_mgr->ids[i].active);
			list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
		}
2858
	}
2859

2860 2861
	adev->vm_manager.fence_context =
		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2862 2863 2864
	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
		adev->vm_manager.seqno[i] = 0;

2865
	atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
2866
	atomic64_set(&adev->vm_manager.client_counter, 0);
2867
	spin_lock_init(&adev->vm_manager.prt_lock);
2868
	atomic_set(&adev->vm_manager.num_prt_users, 0);
2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885

	/* If not overridden by the user, by default, only in large BAR systems
	 * Compute VM tables will be updated by CPU
	 */
#ifdef CONFIG_X86_64
	if (amdgpu_vm_update_mode == -1) {
		if (amdgpu_vm_is_large_bar(adev))
			adev->vm_manager.vm_update_mode =
				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
		else
			adev->vm_manager.vm_update_mode = 0;
	} else
		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
#else
	adev->vm_manager.vm_update_mode = 0;
#endif

2886 2887
	idr_init(&adev->vm_manager.pasid_idr);
	spin_lock_init(&adev->vm_manager.pasid_lock);
2888 2889
}

2890 2891 2892 2893 2894 2895 2896 2897 2898
/**
 * amdgpu_vm_manager_fini - cleanup VM manager
 *
 * @adev: amdgpu_device pointer
 *
 * Cleanup the VM manager and free resources.
 */
void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
{
2899
	unsigned i, j;
2900

2901 2902 2903
	WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
	idr_destroy(&adev->vm_manager.pasid_idr);

2904 2905 2906
	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
		struct amdgpu_vm_id_manager *id_mgr =
			&adev->vm_manager.id_mgr[i];
2907

2908 2909 2910 2911 2912 2913 2914 2915
		mutex_destroy(&id_mgr->lock);
		for (j = 0; j < AMDGPU_NUM_VM; ++j) {
			struct amdgpu_vm_id *id = &id_mgr->ids[j];

			amdgpu_sync_free(&id->active);
			dma_fence_put(id->flushed_updates);
			dma_fence_put(id->last_flush);
		}
2916
	}
2917
}
C
Chunming Zhou 已提交
2918 2919 2920 2921

int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
	union drm_amdgpu_vm *args = data;
2922 2923 2924
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_fpriv *fpriv = filp->driver_priv;
	int r;
C
Chunming Zhou 已提交
2925 2926 2927

	switch (args->in.op) {
	case AMDGPU_VM_OP_RESERVE_VMID:
2928 2929 2930 2931 2932 2933
		/* current, we only have requirement to reserve vmid from gfxhub */
		r = amdgpu_vm_alloc_reserved_vmid(adev, &fpriv->vm,
						  AMDGPU_GFXHUB);
		if (r)
			return r;
		break;
C
Chunming Zhou 已提交
2934
	case AMDGPU_VM_OP_UNRESERVE_VMID:
2935
		amdgpu_vm_free_reserved_vmid(adev, &fpriv->vm, AMDGPU_GFXHUB);
C
Chunming Zhou 已提交
2936 2937 2938 2939 2940 2941 2942
		break;
	default:
		return -EINVAL;
	}

	return 0;
}