amdgpu_vm.c 79.8 KB
Newer Older
A
Alex Deucher 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
28
#include <linux/dma-fence-array.h>
29
#include <linux/interval_tree_generic.h>
30
#include <linux/idr.h>
A
Alex Deucher 已提交
31 32 33 34
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
35
#include "amdgpu_amdkfd.h"
36
#include "amdgpu_gmc.h"
A
Alex Deucher 已提交
37

38 39 40
/**
 * DOC: GPUVM
 *
A
Alex Deucher 已提交
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
 * GPUVM is similar to the legacy gart on older asics, however
 * rather than there being a single global gart table
 * for the entire GPU, there are multiple VM page tables active
 * at any given time.  The VM page tables can contain a mix
 * vram pages and system memory pages and system memory pages
 * can be mapped as snooped (cached system pages) or unsnooped
 * (uncached system pages).
 * Each VM has an ID associated with it and there is a page table
 * associated with each VMID.  When execting a command buffer,
 * the kernel tells the the ring what VMID to use for that command
 * buffer.  VMIDs are allocated dynamically as commands are submitted.
 * The userspace drivers maintain their own address space and the kernel
 * sets up their pages tables accordingly when they submit their
 * command buffers and a VMID is assigned.
 * Cayman/Trinity support up to 8 active VMs at any given time;
 * SI supports 16.
 */

59 60 61 62 63 64 65 66 67
#define START(node) ((node)->start)
#define LAST(node) ((node)->last)

INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
		     START, LAST, static, amdgpu_vm_it)

#undef START
#undef LAST

68 69 70 71
/**
 * struct amdgpu_pte_update_params - Local structure
 *
 * Encapsulate some VM table update parameters to reduce
72
 * the number of function parameters
73
 *
74
 */
75
struct amdgpu_pte_update_params {
76 77 78 79

	/**
	 * @adev: amdgpu device we do this update for
	 */
80
	struct amdgpu_device *adev;
81 82 83 84

	/**
	 * @vm: optional amdgpu_vm we do this update for
	 */
85
	struct amdgpu_vm *vm;
86 87 88 89

	/**
	 * @src: address where to copy page table entries from
	 */
90
	uint64_t src;
91 92 93 94

	/**
	 * @ib: indirect buffer to fill with commands
	 */
95
	struct amdgpu_ib *ib;
96 97 98 99

	/**
	 * @func: Function which actually does the update
	 */
100 101
	void (*func)(struct amdgpu_pte_update_params *params,
		     struct amdgpu_bo *bo, uint64_t pe,
102
		     uint64_t addr, unsigned count, uint32_t incr,
103
		     uint64_t flags);
104 105 106 107
	/**
	 * @pages_addr:
	 *
	 * DMA addresses to use for mapping, used during VM update by CPU
108 109
	 */
	dma_addr_t *pages_addr;
110 111 112 113 114 115 116

	/**
	 * @kptr:
	 *
	 * Kernel pointer of PD/PT BO that needs to be updated,
	 * used during VM update by CPU
	 */
117
	void *kptr;
118 119
};

120 121 122
/**
 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
 */
123
struct amdgpu_prt_cb {
124 125 126 127

	/**
	 * @adev: amdgpu device
	 */
128
	struct amdgpu_device *adev;
129 130 131 132

	/**
	 * @cb: callback
	 */
133 134 135
	struct dma_fence_cb cb;
};

136 137 138 139
/**
 * amdgpu_vm_level_shift - return the addr shift for each level
 *
 * @adev: amdgpu_device pointer
140
 * @level: VMPT level
141
 *
142 143
 * Returns:
 * The number of bits the pfn needs to be right shifted for a level.
144 145 146 147
 */
static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
				      unsigned level)
{
148 149 150 151 152 153 154
	unsigned shift = 0xff;

	switch (level) {
	case AMDGPU_VM_PDB2:
	case AMDGPU_VM_PDB1:
	case AMDGPU_VM_PDB0:
		shift = 9 * (AMDGPU_VM_PDB0 - level) +
155
			adev->vm_manager.block_size;
156 157 158 159 160 161 162 163 164
		break;
	case AMDGPU_VM_PTB:
		shift = 0;
		break;
	default:
		dev_err(adev->dev, "the level%d isn't supported.\n", level);
	}

	return shift;
165 166
}

A
Alex Deucher 已提交
167
/**
168
 * amdgpu_vm_num_entries - return the number of entries in a PD/PT
A
Alex Deucher 已提交
169 170
 *
 * @adev: amdgpu_device pointer
171
 * @level: VMPT level
A
Alex Deucher 已提交
172
 *
173 174
 * Returns:
 * The number of entries in a page directory or page table.
A
Alex Deucher 已提交
175
 */
176 177
static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
				      unsigned level)
A
Alex Deucher 已提交
178
{
179 180
	unsigned shift = amdgpu_vm_level_shift(adev,
					       adev->vm_manager.root_level);
181

182
	if (level == adev->vm_manager.root_level)
183
		/* For the root directory */
184
		return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
185
	else if (level != AMDGPU_VM_PTB)
186 187 188
		/* Everything in between */
		return 512;
	else
189
		/* For the page tables on the leaves */
190
		return AMDGPU_VM_PTE_COUNT(adev);
A
Alex Deucher 已提交
191 192 193
}

/**
194
 * amdgpu_vm_bo_size - returns the size of the BOs in bytes
A
Alex Deucher 已提交
195 196
 *
 * @adev: amdgpu_device pointer
197
 * @level: VMPT level
A
Alex Deucher 已提交
198
 *
199 200
 * Returns:
 * The size of the BO for a page directory or page table in bytes.
A
Alex Deucher 已提交
201
 */
202
static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
A
Alex Deucher 已提交
203
{
204
	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
A
Alex Deucher 已提交
205 206
}

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
/**
 * amdgpu_vm_bo_evicted - vm_bo is evicted
 *
 * @vm_bo: vm_bo which is evicted
 *
 * State for PDs/PTs and per VM BOs which are not at the location they should
 * be.
 */
static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
{
	struct amdgpu_vm *vm = vm_bo->vm;
	struct amdgpu_bo *bo = vm_bo->bo;

	vm_bo->moved = true;
	if (bo->tbo.type == ttm_bo_type_kernel)
		list_move(&vm_bo->vm_status, &vm->evicted);
	else
		list_move_tail(&vm_bo->vm_status, &vm->evicted);
}

/**
 * amdgpu_vm_bo_relocated - vm_bo is reloacted
 *
 * @vm_bo: vm_bo which is relocated
 *
 * State for PDs/PTs which needs to update their parent PD.
 */
static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
{
	list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
}

/**
 * amdgpu_vm_bo_moved - vm_bo is moved
 *
 * @vm_bo: vm_bo which is moved
 *
 * State for per VM BOs which are moved, but that change is not yet reflected
 * in the page tables.
 */
static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
{
	list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
}

/**
 * amdgpu_vm_bo_idle - vm_bo is idle
 *
 * @vm_bo: vm_bo which is now idle
 *
 * State for PDs/PTs and per VM BOs which have gone through the state machine
 * and are now idle.
 */
static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
{
	list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
	vm_bo->moved = false;
}

/**
 * amdgpu_vm_bo_invalidated - vm_bo is invalidated
 *
 * @vm_bo: vm_bo which is now invalidated
 *
 * State for normal BOs which are invalidated and that change not yet reflected
 * in the PTs.
 */
static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
{
	spin_lock(&vm_bo->vm->invalidated_lock);
	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
	spin_unlock(&vm_bo->vm->invalidated_lock);
}

/**
 * amdgpu_vm_bo_done - vm_bo is done
 *
 * @vm_bo: vm_bo which is now done
 *
 * State for normal BOs which are invalidated and that change has been updated
 * in the PTs.
 */
static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
{
	spin_lock(&vm_bo->vm->invalidated_lock);
	list_del_init(&vm_bo->vm_status);
	spin_unlock(&vm_bo->vm->invalidated_lock);
}

296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
/**
 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
 *
 * @base: base structure for tracking BO usage in a VM
 * @vm: vm to which bo is to be added
 * @bo: amdgpu buffer object
 *
 * Initialize a bo_va_base structure and add it to the appropriate lists
 *
 */
static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
				   struct amdgpu_vm *vm,
				   struct amdgpu_bo *bo)
{
	base->vm = vm;
	base->bo = bo;
	INIT_LIST_HEAD(&base->bo_list);
	INIT_LIST_HEAD(&base->vm_status);

	if (!bo)
		return;
	list_add_tail(&base->bo_list, &bo->va);

	if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
		return;

	vm->bulk_moveable = false;
	if (bo->tbo.type == ttm_bo_type_kernel)
324
		amdgpu_vm_bo_relocated(base);
325
	else
326
		amdgpu_vm_bo_idle(base);
327 328 329 330 331 332 333 334 335 336

	if (bo->preferred_domains &
	    amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
		return;

	/*
	 * we checked all the prerequisites, but it looks like this per vm bo
	 * is currently evicted. add the bo to the evicted list to make sure it
	 * is validated on next vm use to avoid fault.
	 * */
337
	amdgpu_vm_bo_evicted(base);
338 339
}

340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
/**
 * amdgpu_vm_pt_parent - get the parent page directory
 *
 * @pt: child page table
 *
 * Helper to get the parent entry for the child page table. NULL if we are at
 * the root page directory.
 */
static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
{
	struct amdgpu_bo *parent = pt->base.bo->parent;

	if (!parent)
		return NULL;

	return list_first_entry(&parent->va, struct amdgpu_vm_pt, base.bo_list);
}

A
Alex Deucher 已提交
358
/**
359
 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
A
Alex Deucher 已提交
360 361
 *
 * @vm: vm providing the BOs
362
 * @validated: head of validation list
363
 * @entry: entry to add
A
Alex Deucher 已提交
364 365
 *
 * Add the page directory to the list of BOs to
366
 * validate for command submission.
A
Alex Deucher 已提交
367
 */
368 369 370
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
			 struct list_head *validated,
			 struct amdgpu_bo_list_entry *entry)
A
Alex Deucher 已提交
371
{
372
	entry->robj = vm->root.base.bo;
373
	entry->priority = 0;
374
	entry->tv.bo = &entry->robj->tbo;
375
	entry->tv.shared = true;
376
	entry->user_pages = NULL;
377 378
	list_add(&entry->tv.head, validated);
}
A
Alex Deucher 已提交
379

380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
/**
 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
 *
 * @adev: amdgpu device pointer
 * @vm: vm providing the BOs
 *
 * Move all BOs to the end of LRU and remember their positions to put them
 * together.
 */
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
				struct amdgpu_vm *vm)
{
	struct ttm_bo_global *glob = adev->mman.bdev.glob;
	struct amdgpu_vm_bo_base *bo_base;

	if (vm->bulk_moveable) {
		spin_lock(&glob->lru_lock);
		ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
		spin_unlock(&glob->lru_lock);
		return;
	}

	memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));

	spin_lock(&glob->lru_lock);
	list_for_each_entry(bo_base, &vm->idle, vm_status) {
		struct amdgpu_bo *bo = bo_base->bo;

		if (!bo->parent)
			continue;

		ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
		if (bo->shadow)
			ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
						&vm->lru_bulk_move);
	}
	spin_unlock(&glob->lru_lock);

	vm->bulk_moveable = true;
}

421
/**
422
 * amdgpu_vm_validate_pt_bos - validate the page table BOs
423
 *
424
 * @adev: amdgpu device pointer
425
 * @vm: vm providing the BOs
426 427 428 429
 * @validate: callback to do the validation
 * @param: parameter for the validation callback
 *
 * Validate the page table BOs on command submission if neccessary.
430 431 432
 *
 * Returns:
 * Validation result.
433
 */
434 435 436
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
			      int (*validate)(void *p, struct amdgpu_bo *bo),
			      void *param)
437
{
438 439
	struct amdgpu_vm_bo_base *bo_base, *tmp;
	int r = 0;
440

441 442
	vm->bulk_moveable &= list_empty(&vm->evicted);

443 444
	list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
		struct amdgpu_bo *bo = bo_base->bo;
445

446 447 448
		r = validate(param, bo);
		if (r)
			break;
449

450
		if (bo->tbo.type != ttm_bo_type_kernel) {
451
			amdgpu_vm_bo_moved(bo_base);
452
		} else {
453 454 455 456
			if (vm->use_cpu_for_update)
				r = amdgpu_bo_kmap(bo, NULL);
			else
				r = amdgpu_ttm_alloc_gart(&bo->tbo);
457 458
			if (r)
				break;
459 460 461 462 463
			if (bo->shadow) {
				r = amdgpu_ttm_alloc_gart(&bo->shadow->tbo);
				if (r)
					break;
			}
464
			amdgpu_vm_bo_relocated(bo_base);
465
		}
466 467
	}

468
	return r;
469 470
}

471
/**
472
 * amdgpu_vm_ready - check VM is ready for updates
473
 *
474
 * @vm: VM to check
A
Alex Deucher 已提交
475
 *
476
 * Check if all VM PDs/PTs are ready for updates
477 478 479
 *
 * Returns:
 * True if eviction list is empty.
A
Alex Deucher 已提交
480
 */
481
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
A
Alex Deucher 已提交
482
{
483
	return list_empty(&vm->evicted);
484 485
}

486 487 488 489
/**
 * amdgpu_vm_clear_bo - initially clear the PDs/PTs
 *
 * @adev: amdgpu_device pointer
490
 * @vm: VM to clear BO from
491 492
 * @bo: BO to clear
 * @level: level this BO is at
493
 * @pte_support_ats: indicate ATS support from PTE
494 495
 *
 * Root PD needs to be reserved when calling this.
496 497 498
 *
 * Returns:
 * 0 on success, errno otherwise.
499 500
 */
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
501 502
			      struct amdgpu_vm *vm, struct amdgpu_bo *bo,
			      unsigned level, bool pte_support_ats)
503 504 505
{
	struct ttm_operation_ctx ctx = { true, false };
	struct dma_fence *fence = NULL;
506
	unsigned entries, ats_entries;
507 508
	struct amdgpu_ring *ring;
	struct amdgpu_job *job;
509
	uint64_t addr;
510 511
	int r;

512 513 514 515 516 517
	entries = amdgpu_bo_size(bo) / 8;

	if (pte_support_ats) {
		if (level == adev->vm_manager.root_level) {
			ats_entries = amdgpu_vm_level_shift(adev, level);
			ats_entries += AMDGPU_GPU_PAGE_SHIFT;
518
			ats_entries = AMDGPU_GMC_HOLE_START >> ats_entries;
519 520 521 522 523 524
			ats_entries = min(ats_entries, entries);
			entries -= ats_entries;
		} else {
			ats_entries = entries;
			entries = 0;
		}
525
	} else {
526
		ats_entries = 0;
527 528
	}

529
	ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
530 531 532 533 534 535 536 537 538

	r = reservation_object_reserve_shared(bo->tbo.resv);
	if (r)
		return r;

	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
	if (r)
		goto error;

539 540 541 542
	r = amdgpu_ttm_alloc_gart(&bo->tbo);
	if (r)
		return r;

543 544 545 546
	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
	if (r)
		goto error;

547
	addr = amdgpu_bo_gpu_offset(bo);
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
	if (ats_entries) {
		uint64_t ats_value;

		ats_value = AMDGPU_PTE_DEFAULT_ATC;
		if (level != AMDGPU_VM_PTB)
			ats_value |= AMDGPU_PDE_PTE;

		amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
				      ats_entries, 0, ats_value);
		addr += ats_entries * 8;
	}

	if (entries)
		amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
				      entries, 0, 0);

564 565 566
	amdgpu_ring_pad_ib(ring, &job->ibs[0]);

	WARN_ON(job->ibs[0].length_dw > 64);
567 568 569 570 571
	r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
			     AMDGPU_FENCE_OWNER_UNDEFINED, false);
	if (r)
		goto error_free;

572 573
	r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
			      &fence);
574 575 576 577 578
	if (r)
		goto error_free;

	amdgpu_bo_fence(bo, fence, true);
	dma_fence_put(fence);
579 580 581 582 583

	if (bo->shadow)
		return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
					  level, pte_support_ats);

584 585 586 587 588 589 590 591 592
	return 0;

error_free:
	amdgpu_job_free(job);

error:
	return r;
}

593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
/**
 * amdgpu_vm_bo_param - fill in parameters for PD/PT allocation
 *
 * @adev: amdgpu_device pointer
 * @vm: requesting vm
 * @bp: resulting BO allocation parameters
 */
static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
			       int level, struct amdgpu_bo_param *bp)
{
	memset(bp, 0, sizeof(*bp));

	bp->size = amdgpu_vm_bo_size(adev, level);
	bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
	bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
608 609 610 611 612 613
	if (bp->size <= PAGE_SIZE && adev->asic_type >= CHIP_VEGA10 &&
	    adev->flags & AMD_IS_APU)
		bp->domain |= AMDGPU_GEM_DOMAIN_GTT;
	bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
	bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
		AMDGPU_GEM_CREATE_CPU_GTT_USWC;
614 615
	if (vm->use_cpu_for_update)
		bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
616 617
	else if (!vm->root.base.bo || vm->root.base.bo->shadow)
		bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
618 619 620 621 622
	bp->type = ttm_bo_type_kernel;
	if (vm->root.base.bo)
		bp->resv = vm->root.base.bo->tbo.resv;
}

623
/**
624 625 626 627
 * amdgpu_vm_alloc_levels - allocate the PD/PT levels
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
628
 * @parent: parent PT
629 630
 * @saddr: start of the address range
 * @eaddr: end of the address range
631 632
 * @level: VMPT level
 * @ats: indicate ATS support from PTE
633 634
 *
 * Make sure the page directories and page tables are allocated
635 636 637
 *
 * Returns:
 * 0 on success, errno otherwise.
638 639 640 641 642
 */
static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
				  struct amdgpu_vm *vm,
				  struct amdgpu_vm_pt *parent,
				  uint64_t saddr, uint64_t eaddr,
643
				  unsigned level, bool ats)
644
{
645
	unsigned shift = amdgpu_vm_level_shift(adev, level);
646
	struct amdgpu_bo_param bp;
647
	unsigned pt_idx, from, to;
648
	int r;
649 650 651 652

	if (!parent->entries) {
		unsigned num_entries = amdgpu_vm_num_entries(adev, level);

M
Michal Hocko 已提交
653 654 655
		parent->entries = kvmalloc_array(num_entries,
						   sizeof(struct amdgpu_vm_pt),
						   GFP_KERNEL | __GFP_ZERO);
656 657 658 659
		if (!parent->entries)
			return -ENOMEM;
	}

660 661 662 663 664
	from = saddr >> shift;
	to = eaddr >> shift;
	if (from >= amdgpu_vm_num_entries(adev, level) ||
	    to >= amdgpu_vm_num_entries(adev, level))
		return -EINVAL;
665 666

	++level;
667 668
	saddr = saddr & ((1 << shift) - 1);
	eaddr = eaddr & ((1 << shift) - 1);
669

670
	amdgpu_vm_bo_param(adev, vm, level, &bp);
671

672 673 674 675 676
	/* walk over the address space and allocate the page tables */
	for (pt_idx = from; pt_idx <= to; ++pt_idx) {
		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
		struct amdgpu_bo *pt;

677
		if (!entry->base.bo) {
678
			r = amdgpu_bo_create(adev, &bp, &pt);
679 680 681
			if (r)
				return r;

682
			r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats);
683
			if (r) {
684
				amdgpu_bo_unref(&pt->shadow);
685 686 687 688
				amdgpu_bo_unref(&pt);
				return r;
			}

689 690 691
			if (vm->use_cpu_for_update) {
				r = amdgpu_bo_kmap(pt, NULL);
				if (r) {
692
					amdgpu_bo_unref(&pt->shadow);
693 694 695 696 697
					amdgpu_bo_unref(&pt);
					return r;
				}
			}

698 699 700
			/* Keep a reference to the root directory to avoid
			* freeing them up in the wrong order.
			*/
701
			pt->parent = amdgpu_bo_ref(parent->base.bo);
702

703
			amdgpu_vm_bo_base_init(&entry->base, vm, pt);
704 705
		}

706
		if (level < AMDGPU_VM_PTB) {
707 708 709 710
			uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
			uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
				((1 << shift) - 1);
			r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
711
						   sub_eaddr, level, ats);
712 713 714 715 716 717 718 719
			if (r)
				return r;
		}
	}

	return 0;
}

720 721 722 723 724 725 726 727 728
/**
 * amdgpu_vm_alloc_pts - Allocate page tables.
 *
 * @adev: amdgpu_device pointer
 * @vm: VM to allocate page tables for
 * @saddr: Start address which needs to be allocated
 * @size: Size from start address we need.
 *
 * Make sure the page tables are allocated.
729 730 731
 *
 * Returns:
 * 0 on success, errno otherwise.
732 733 734 735 736 737
 */
int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
			struct amdgpu_vm *vm,
			uint64_t saddr, uint64_t size)
{
	uint64_t eaddr;
738
	bool ats = false;
739 740 741 742 743 744

	/* validate the parameters */
	if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
		return -EINVAL;

	eaddr = saddr + size - 1;
745 746

	if (vm->pte_support_ats)
747
		ats = saddr < AMDGPU_GMC_HOLE_START;
748 749 750 751

	saddr /= AMDGPU_GPU_PAGE_SIZE;
	eaddr /= AMDGPU_GPU_PAGE_SIZE;

752 753 754 755 756 757
	if (eaddr >= adev->vm_manager.max_pfn) {
		dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
			eaddr, adev->vm_manager.max_pfn);
		return -EINVAL;
	}

758
	return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
759
				      adev->vm_manager.root_level, ats);
760 761
}

762 763 764 765 766 767
/**
 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
 *
 * @adev: amdgpu_device pointer
 */
void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
768
{
769
	const struct amdgpu_ip_block *ip_block;
770 771 772
	bool has_compute_vm_bug;
	struct amdgpu_ring *ring;
	int i;
773

774
	has_compute_vm_bug = false;
775

776
	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
777 778 779 780 781 782 783 784 785
	if (ip_block) {
		/* Compute has a VM bug for GFX version < 7.
		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
		if (ip_block->version->major <= 7)
			has_compute_vm_bug = true;
		else if (ip_block->version->major == 8)
			if (adev->gfx.mec_fw_version < 673)
				has_compute_vm_bug = true;
	}
786

787 788 789 790 791
	for (i = 0; i < adev->num_rings; i++) {
		ring = adev->rings[i];
		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
			/* only compute rings */
			ring->has_compute_vm_bug = has_compute_vm_bug;
792
		else
793
			ring->has_compute_vm_bug = false;
794 795 796
	}
}

797 798 799 800 801 802 803 804 805
/**
 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
 *
 * @ring: ring on which the job will be submitted
 * @job: job to submit
 *
 * Returns:
 * True if sync is needed.
 */
806 807
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
				  struct amdgpu_job *job)
A
Alex Xie 已提交
808
{
809 810
	struct amdgpu_device *adev = ring->adev;
	unsigned vmhub = ring->funcs->vmhub;
811 812
	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
	struct amdgpu_vmid *id;
813
	bool gds_switch_needed;
814
	bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
815

816
	if (job->vmid == 0)
817
		return false;
818
	id = &id_mgr->ids[job->vmid];
819 820 821 822 823 824 825
	gds_switch_needed = ring->funcs->emit_gds_switch && (
		id->gds_base != job->gds_base ||
		id->gds_size != job->gds_size ||
		id->gws_base != job->gws_base ||
		id->gws_size != job->gws_size ||
		id->oa_base != job->oa_base ||
		id->oa_size != job->oa_size);
A
Alex Xie 已提交
826

827
	if (amdgpu_vmid_had_gpu_reset(adev, id))
828
		return true;
A
Alex Xie 已提交
829

830
	return vm_flush_needed || gds_switch_needed;
831 832
}

A
Alex Deucher 已提交
833 834 835 836
/**
 * amdgpu_vm_flush - hardware flush the vm
 *
 * @ring: ring to use for flush
837
 * @job:  related job
838
 * @need_pipe_sync: is pipe sync needed
A
Alex Deucher 已提交
839
 *
840
 * Emit a VM flush when it is necessary.
841 842 843
 *
 * Returns:
 * 0 on success, errno otherwise.
A
Alex Deucher 已提交
844
 */
M
Monk Liu 已提交
845
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
A
Alex Deucher 已提交
846
{
847
	struct amdgpu_device *adev = ring->adev;
848
	unsigned vmhub = ring->funcs->vmhub;
849
	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
850
	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
851
	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
852 853 854 855 856 857
		id->gds_base != job->gds_base ||
		id->gds_size != job->gds_size ||
		id->gws_base != job->gws_base ||
		id->gws_size != job->gws_size ||
		id->oa_base != job->oa_base ||
		id->oa_size != job->oa_size);
858
	bool vm_flush_needed = job->vm_needs_flush;
859 860 861 862
	bool pasid_mapping_needed = id->pasid != job->pasid ||
		!id->pasid_mapping ||
		!dma_fence_is_signaled(id->pasid_mapping);
	struct dma_fence *fence = NULL;
863
	unsigned patch_offset = 0;
864
	int r;
865

866
	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
867 868
		gds_switch_needed = true;
		vm_flush_needed = true;
869
		pasid_mapping_needed = true;
870
	}
871

872 873 874 875 876
	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
	vm_flush_needed &= !!ring->funcs->emit_vm_flush;
	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
		ring->funcs->emit_wreg;

M
Monk Liu 已提交
877
	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
878
		return 0;
879

880 881
	if (ring->funcs->init_cond_exec)
		patch_offset = amdgpu_ring_init_cond_exec(ring);
882

M
Monk Liu 已提交
883 884 885
	if (need_pipe_sync)
		amdgpu_ring_emit_pipeline_sync(ring);

886
	if (vm_flush_needed) {
887
		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
888
		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
889 890 891 892
	}

	if (pasid_mapping_needed)
		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
893

894
	if (vm_flush_needed || pasid_mapping_needed) {
895
		r = amdgpu_fence_emit(ring, &fence, 0);
896 897
		if (r)
			return r;
898
	}
899

900
	if (vm_flush_needed) {
901
		mutex_lock(&id_mgr->lock);
902
		dma_fence_put(id->last_flush);
903 904 905
		id->last_flush = dma_fence_get(fence);
		id->current_gpu_reset_count =
			atomic_read(&adev->gpu_reset_counter);
906
		mutex_unlock(&id_mgr->lock);
907
	}
908

909 910 911 912 913 914 915
	if (pasid_mapping_needed) {
		id->pasid = job->pasid;
		dma_fence_put(id->pasid_mapping);
		id->pasid_mapping = dma_fence_get(fence);
	}
	dma_fence_put(fence);

916
	if (ring->funcs->emit_gds_switch && gds_switch_needed) {
917 918 919 920 921 922
		id->gds_base = job->gds_base;
		id->gds_size = job->gds_size;
		id->gws_base = job->gws_base;
		id->gws_size = job->gws_size;
		id->oa_base = job->oa_base;
		id->oa_size = job->oa_size;
923
		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
924 925 926 927 928 929 930 931 932 933 934 935
					    job->gds_size, job->gws_base,
					    job->gws_size, job->oa_base,
					    job->oa_size);
	}

	if (ring->funcs->patch_cond_exec)
		amdgpu_ring_patch_cond_exec(ring, patch_offset);

	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
	if (ring->funcs->emit_switch_buffer) {
		amdgpu_ring_emit_switch_buffer(ring);
		amdgpu_ring_emit_switch_buffer(ring);
936
	}
937
	return 0;
938 939
}

A
Alex Deucher 已提交
940 941 942 943 944 945
/**
 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
 *
 * @vm: requested vm
 * @bo: requested buffer object
 *
946
 * Find @bo inside the requested vm.
A
Alex Deucher 已提交
947 948 949 950
 * Search inside the @bos vm list for the requested vm
 * Returns the found bo_va or NULL if none is found
 *
 * Object has to be reserved!
951 952 953
 *
 * Returns:
 * Found bo_va or NULL.
A
Alex Deucher 已提交
954 955 956 957 958 959
 */
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
				       struct amdgpu_bo *bo)
{
	struct amdgpu_bo_va *bo_va;

960 961
	list_for_each_entry(bo_va, &bo->va, base.bo_list) {
		if (bo_va->base.vm == vm) {
A
Alex Deucher 已提交
962 963 964 965 966 967 968
			return bo_va;
		}
	}
	return NULL;
}

/**
969
 * amdgpu_vm_do_set_ptes - helper to call the right asic function
A
Alex Deucher 已提交
970
 *
971
 * @params: see amdgpu_pte_update_params definition
972
 * @bo: PD/PT to update
A
Alex Deucher 已提交
973 974 975 976 977 978 979 980 981
 * @pe: addr of the page entry
 * @addr: dst addr to write into pe
 * @count: number of page entries to update
 * @incr: increase next addr by incr bytes
 * @flags: hw access flags
 *
 * Traces the parameters and calls the right asic functions
 * to setup the page table using the DMA.
 */
982
static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
983
				  struct amdgpu_bo *bo,
984 985
				  uint64_t pe, uint64_t addr,
				  unsigned count, uint32_t incr,
986
				  uint64_t flags)
A
Alex Deucher 已提交
987
{
988
	pe += amdgpu_bo_gpu_offset(bo);
989
	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
A
Alex Deucher 已提交
990

991
	if (count < 3) {
992 993
		amdgpu_vm_write_pte(params->adev, params->ib, pe,
				    addr | flags, count, incr);
A
Alex Deucher 已提交
994 995

	} else {
996
		amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
A
Alex Deucher 已提交
997 998 999 1000
				      count, incr, flags);
	}
}

1001 1002 1003 1004
/**
 * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
 *
 * @params: see amdgpu_pte_update_params definition
1005
 * @bo: PD/PT to update
1006 1007 1008 1009 1010 1011 1012 1013 1014
 * @pe: addr of the page entry
 * @addr: dst addr to write into pe
 * @count: number of page entries to update
 * @incr: increase next addr by incr bytes
 * @flags: hw access flags
 *
 * Traces the parameters and calls the DMA function to copy the PTEs.
 */
static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
1015
				   struct amdgpu_bo *bo,
1016 1017
				   uint64_t pe, uint64_t addr,
				   unsigned count, uint32_t incr,
1018
				   uint64_t flags)
1019
{
1020
	uint64_t src = (params->src + (addr >> 12) * 8);
1021

1022
	pe += amdgpu_bo_gpu_offset(bo);
1023 1024 1025
	trace_amdgpu_vm_copy_ptes(pe, src, count);

	amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
1026 1027
}

A
Alex Deucher 已提交
1028
/**
1029
 * amdgpu_vm_map_gart - Resolve gart mapping of addr
A
Alex Deucher 已提交
1030
 *
1031
 * @pages_addr: optional DMA address to use for lookup
A
Alex Deucher 已提交
1032 1033 1034
 * @addr: the unmapped addr
 *
 * Look up the physical address of the page that the pte resolves
1035 1036 1037 1038
 * to.
 *
 * Returns:
 * The pointer for the page table entry.
A
Alex Deucher 已提交
1039
 */
1040
static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
A
Alex Deucher 已提交
1041 1042 1043
{
	uint64_t result;

1044 1045
	/* page table offset */
	result = pages_addr[addr >> PAGE_SHIFT];
1046

1047 1048
	/* in case cpu page size != gpu page size*/
	result |= addr & (~PAGE_MASK);
A
Alex Deucher 已提交
1049

1050
	result &= 0xFFFFFFFFFFFFF000ULL;
A
Alex Deucher 已提交
1051 1052 1053 1054

	return result;
}

1055 1056 1057 1058
/**
 * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
 *
 * @params: see amdgpu_pte_update_params definition
1059
 * @bo: PD/PT to update
1060 1061 1062 1063 1064 1065 1066 1067 1068
 * @pe: kmap addr of the page entry
 * @addr: dst addr to write into pe
 * @count: number of page entries to update
 * @incr: increase next addr by incr bytes
 * @flags: hw access flags
 *
 * Write count number of PT/PD entries directly.
 */
static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
1069
				   struct amdgpu_bo *bo,
1070 1071 1072 1073 1074
				   uint64_t pe, uint64_t addr,
				   unsigned count, uint32_t incr,
				   uint64_t flags)
{
	unsigned int i;
1075
	uint64_t value;
1076

1077 1078
	pe += (unsigned long)amdgpu_bo_kptr(bo);

1079 1080
	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);

1081
	for (i = 0; i < count; i++) {
1082 1083 1084
		value = params->pages_addr ?
			amdgpu_vm_map_gart(params->pages_addr, addr) :
			addr;
1085 1086
		amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
				       i, value, flags);
1087 1088 1089 1090
		addr += incr;
	}
}

1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101

/**
 * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
 *
 * @adev: amdgpu_device pointer
 * @vm: related vm
 * @owner: fence owner
 *
 * Returns:
 * 0 on success, errno otherwise.
 */
1102 1103
static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
			     void *owner)
1104 1105 1106 1107 1108
{
	struct amdgpu_sync sync;
	int r;

	amdgpu_sync_create(&sync);
1109
	amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
1110 1111 1112 1113 1114 1115
	r = amdgpu_sync_wait(&sync, true);
	amdgpu_sync_free(&sync);

	return r;
}

1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
/**
 * amdgpu_vm_update_func - helper to call update function
 *
 * Calls the update function for both the given BO as well as its shadow.
 */
static void amdgpu_vm_update_func(struct amdgpu_pte_update_params *params,
				  struct amdgpu_bo *bo,
				  uint64_t pe, uint64_t addr,
				  unsigned count, uint32_t incr,
				  uint64_t flags)
{
	if (bo->shadow)
		params->func(params, bo->shadow, pe, addr, count, incr, flags);
	params->func(params, bo, pe, addr, count, incr, flags);
}

1132
/*
1133
 * amdgpu_vm_update_pde - update a single level in the hierarchy
1134
 *
1135
 * @param: parameters for the update
1136
 * @vm: requested vm
1137
 * @parent: parent directory
1138
 * @entry: entry to update
1139
 *
1140
 * Makes sure the requested entry in parent is up to date.
1141
 */
1142 1143 1144 1145
static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
				 struct amdgpu_vm *vm,
				 struct amdgpu_vm_pt *parent,
				 struct amdgpu_vm_pt *entry)
A
Alex Deucher 已提交
1146
{
1147
	struct amdgpu_bo *bo = parent->base.bo, *pbo;
1148 1149
	uint64_t pde, pt, flags;
	unsigned level;
C
Chunming Zhou 已提交
1150

1151 1152 1153
	/* Don't update huge pages here */
	if (entry->huge)
		return;
A
Alex Deucher 已提交
1154

1155
	for (level = 0, pbo = bo->parent; pbo; ++level)
1156 1157
		pbo = pbo->parent;

1158
	level += params->adev->vm_manager.root_level;
1159
	amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
1160
	pde = (entry - parent->entries) * 8;
1161
	amdgpu_vm_update_func(params, bo, pde, pt, 1, 0, flags);
A
Alex Deucher 已提交
1162 1163
}

1164 1165 1166
/*
 * amdgpu_vm_invalidate_level - mark all PD levels as invalid
 *
1167 1168
 * @adev: amdgpu_device pointer
 * @vm: related vm
1169
 * @parent: parent PD
1170
 * @level: VMPT level
1171 1172 1173
 *
 * Mark all PD level as invalid after an error.
 */
1174 1175 1176 1177
static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
				       struct amdgpu_vm *vm,
				       struct amdgpu_vm_pt *parent,
				       unsigned level)
1178
{
1179
	unsigned pt_idx, num_entries;
1180 1181 1182 1183 1184

	/*
	 * Recurse into the subdirectories. This recursion is harmless because
	 * we only have a maximum of 5 layers.
	 */
1185 1186
	num_entries = amdgpu_vm_num_entries(adev, level);
	for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
1187 1188
		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];

1189
		if (!entry->base.bo)
1190 1191
			continue;

1192
		if (!entry->base.moved)
1193
			amdgpu_vm_bo_relocated(&entry->base);
1194
		amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
1195 1196 1197
	}
}

1198 1199 1200 1201 1202 1203 1204
/*
 * amdgpu_vm_update_directories - make sure that all directories are valid
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 *
 * Makes sure all directories are up to date.
1205 1206 1207
 *
 * Returns:
 * 0 for success, error for failure.
1208 1209 1210 1211
 */
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
				 struct amdgpu_vm *vm)
{
1212 1213 1214
	struct amdgpu_pte_update_params params;
	struct amdgpu_job *job;
	unsigned ndw = 0;
1215
	int r = 0;
1216

1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
	if (list_empty(&vm->relocated))
		return 0;

restart:
	memset(&params, 0, sizeof(params));
	params.adev = adev;

	if (vm->use_cpu_for_update) {
		r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
		if (unlikely(r))
			return r;

		params.func = amdgpu_vm_cpu_set_ptes;
	} else {
		ndw = 512 * 8;
		r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
		if (r)
			return r;

		params.ib = &job->ibs[0];
		params.func = amdgpu_vm_do_set_ptes;
	}

1240
	while (!list_empty(&vm->relocated)) {
1241
		struct amdgpu_vm_pt *pt, *entry;
1242

1243 1244 1245
		entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt,
					 base.vm_status);
		amdgpu_vm_bo_idle(&entry->base);
1246

1247 1248
		pt = amdgpu_vm_pt_parent(entry);
		if (!pt)
1249 1250 1251 1252 1253 1254 1255
			continue;

		amdgpu_vm_update_pde(&params, vm, pt, entry);

		if (!vm->use_cpu_for_update &&
		    (ndw - params.ib->length_dw) < 32)
			break;
1256
	}
1257

1258 1259 1260
	if (vm->use_cpu_for_update) {
		/* Flush HDP */
		mb();
1261
		amdgpu_asic_flush_hdp(adev, NULL);
1262 1263 1264 1265 1266 1267 1268
	} else if (params.ib->length_dw == 0) {
		amdgpu_job_free(job);
	} else {
		struct amdgpu_bo *root = vm->root.base.bo;
		struct amdgpu_ring *ring;
		struct dma_fence *fence;

1269
		ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
1270 1271 1272 1273 1274 1275
				    sched);

		amdgpu_ring_pad_ib(ring, params.ib);
		amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
				 AMDGPU_FENCE_OWNER_VM, false);
		WARN_ON(params.ib->length_dw > ndw);
1276 1277
		r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
				      &fence);
1278 1279 1280 1281 1282 1283
		if (r)
			goto error;

		amdgpu_bo_fence(root, fence, true);
		dma_fence_put(vm->last_update);
		vm->last_update = fence;
1284 1285
	}

1286 1287 1288 1289 1290 1291
	if (!list_empty(&vm->relocated))
		goto restart;

	return 0;

error:
1292 1293
	amdgpu_vm_invalidate_level(adev, vm, &vm->root,
				   adev->vm_manager.root_level);
1294
	amdgpu_job_free(job);
1295
	return r;
1296 1297
}

1298
/**
1299
 * amdgpu_vm_find_entry - find the entry for an address
1300 1301 1302
 *
 * @p: see amdgpu_pte_update_params definition
 * @addr: virtual address in question
1303 1304
 * @entry: resulting entry or NULL
 * @parent: parent entry
1305
 *
1306
 * Find the vm_pt entry and it's parent for the given address.
1307
 */
1308 1309 1310
void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
			 struct amdgpu_vm_pt **entry,
			 struct amdgpu_vm_pt **parent)
1311
{
1312
	unsigned level = p->adev->vm_manager.root_level;
1313

1314 1315 1316
	*parent = NULL;
	*entry = &p->vm->root;
	while ((*entry)->entries) {
1317
		unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
1318

1319
		*parent = *entry;
1320 1321
		*entry = &(*entry)->entries[addr >> shift];
		addr &= (1ULL << shift) - 1;
1322 1323
	}

1324
	if (level != AMDGPU_VM_PTB)
1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
		*entry = NULL;
}

/**
 * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
 *
 * @p: see amdgpu_pte_update_params definition
 * @entry: vm_pt entry to check
 * @parent: parent entry
 * @nptes: number of PTEs updated with this operation
 * @dst: destination address where the PTEs should point to
 * @flags: access flags fro the PTEs
 *
 * Check if we can update the PD with a huge page.
 */
1340 1341 1342 1343 1344
static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
					struct amdgpu_vm_pt *entry,
					struct amdgpu_vm_pt *parent,
					unsigned nptes, uint64_t dst,
					uint64_t flags)
1345
{
1346
	uint64_t pde;
1347 1348

	/* In the case of a mixed PT the PDE must point to it*/
1349 1350
	if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
	    nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
1351
		/* Set the huge page flag to stop scanning at this PDE */
1352 1353 1354
		flags |= AMDGPU_PDE_PTE;
	}

1355 1356 1357 1358
	if (!(flags & AMDGPU_PDE_PTE)) {
		if (entry->huge) {
			/* Add the entry to the relocated list to update it. */
			entry->huge = false;
1359
			amdgpu_vm_bo_relocated(&entry->base);
1360
		}
1361
		return;
1362
	}
1363

1364
	entry->huge = true;
1365
	amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
1366

1367
	pde = (entry - parent->entries) * 8;
1368
	amdgpu_vm_update_func(p, parent->base.bo, pde, dst, 1, 0, flags);
1369 1370
}

A
Alex Deucher 已提交
1371 1372 1373
/**
 * amdgpu_vm_update_ptes - make sure that page tables are valid
 *
1374
 * @params: see amdgpu_pte_update_params definition
A
Alex Deucher 已提交
1375 1376
 * @start: start of GPU address range
 * @end: end of GPU address range
1377
 * @dst: destination address to map to, the next dst inside the function
A
Alex Deucher 已提交
1378 1379
 * @flags: mapping flags
 *
1380
 * Update the page tables in the range @start - @end.
1381 1382 1383
 *
 * Returns:
 * 0 for success, -EINVAL for failure.
A
Alex Deucher 已提交
1384
 */
1385
static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1386
				  uint64_t start, uint64_t end,
1387
				  uint64_t dst, uint64_t flags)
A
Alex Deucher 已提交
1388
{
1389 1390
	struct amdgpu_device *adev = params->adev;
	const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
1391

1392
	uint64_t addr, pe_start;
1393
	struct amdgpu_bo *pt;
1394
	unsigned nptes;
A
Alex Deucher 已提交
1395 1396

	/* walk over the address space and update the page tables */
1397 1398 1399 1400 1401 1402 1403
	for (addr = start; addr < end; addr += nptes,
	     dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
		struct amdgpu_vm_pt *entry, *parent;

		amdgpu_vm_get_entry(params, addr, &entry, &parent);
		if (!entry)
			return -ENOENT;
1404

A
Alex Deucher 已提交
1405 1406 1407
		if ((addr & ~mask) == (end & ~mask))
			nptes = end - addr;
		else
1408
			nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
A
Alex Deucher 已提交
1409

1410 1411
		amdgpu_vm_handle_huge_pages(params, entry, parent,
					    nptes, dst, flags);
1412
		/* We don't need to update PTEs for huge pages */
1413
		if (entry->huge)
1414 1415
			continue;

1416
		pt = entry->base.bo;
1417
		pe_start = (addr & mask) * 8;
1418 1419 1420
		amdgpu_vm_update_func(params, pt, pe_start, dst, nptes,
				      AMDGPU_GPU_PAGE_SIZE, flags);

A
Alex Deucher 已提交
1421 1422
	}

1423
	return 0;
1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434
}

/*
 * amdgpu_vm_frag_ptes - add fragment information to PTEs
 *
 * @params: see amdgpu_pte_update_params definition
 * @vm: requested vm
 * @start: first PTE to handle
 * @end: last PTE to handle
 * @dst: addr those PTEs should point to
 * @flags: hw mapping flags
1435 1436 1437
 *
 * Returns:
 * 0 for success, -EINVAL for failure.
1438
 */
1439
static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params	*params,
1440
				uint64_t start, uint64_t end,
1441
				uint64_t dst, uint64_t flags)
1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
{
	/**
	 * The MC L1 TLB supports variable sized pages, based on a fragment
	 * field in the PTE. When this field is set to a non-zero value, page
	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
	 * flags are considered valid for all PTEs within the fragment range
	 * and corresponding mappings are assumed to be physically contiguous.
	 *
	 * The L1 TLB can store a single PTE for the whole fragment,
	 * significantly increasing the space available for translation
	 * caching. This leads to large improvements in throughput when the
	 * TLB is under pressure.
	 *
	 * The L2 TLB distributes small and large fragments into two
	 * asymmetric partitions. The large fragment cache is significantly
	 * larger. Thus, we try to use large fragments wherever possible.
	 * Userspace can support this by aligning virtual base address and
	 * allocation size to the fragment size.
	 */
1461 1462
	unsigned max_frag = params->adev->vm_manager.fragment_size;
	int r;
1463 1464

	/* system pages are non continuously */
1465
	if (params->src || !(flags & AMDGPU_PTE_VALID))
1466
		return amdgpu_vm_update_ptes(params, start, end, dst, flags);
1467

1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484
	while (start != end) {
		uint64_t frag_flags, frag_end;
		unsigned frag;

		/* This intentionally wraps around if no bit is set */
		frag = min((unsigned)ffs(start) - 1,
			   (unsigned)fls64(end - start) - 1);
		if (frag >= max_frag) {
			frag_flags = AMDGPU_PTE_FRAG(max_frag);
			frag_end = end & ~((1ULL << max_frag) - 1);
		} else {
			frag_flags = AMDGPU_PTE_FRAG(frag);
			frag_end = start + (1 << frag);
		}

		r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
					  flags | frag_flags);
1485 1486
		if (r)
			return r;
1487

1488 1489
		dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
		start = frag_end;
1490
	}
1491 1492

	return 0;
A
Alex Deucher 已提交
1493 1494 1495 1496 1497 1498
}

/**
 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
 *
 * @adev: amdgpu_device pointer
1499
 * @exclusive: fence we need to sync to
1500
 * @pages_addr: DMA addresses to use for mapping
A
Alex Deucher 已提交
1501
 * @vm: requested vm
1502 1503 1504
 * @start: start of mapped range
 * @last: last mapped entry
 * @flags: flags for the entries
A
Alex Deucher 已提交
1505 1506 1507
 * @addr: addr to set the area to
 * @fence: optional resulting fence
 *
1508
 * Fill in the page table entries between @start and @last.
1509 1510 1511
 *
 * Returns:
 * 0 for success, -EINVAL for failure.
A
Alex Deucher 已提交
1512 1513
 */
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1514
				       struct dma_fence *exclusive,
1515
				       dma_addr_t *pages_addr,
A
Alex Deucher 已提交
1516
				       struct amdgpu_vm *vm,
1517
				       uint64_t start, uint64_t last,
1518
				       uint64_t flags, uint64_t addr,
1519
				       struct dma_fence **fence)
A
Alex Deucher 已提交
1520
{
1521
	struct amdgpu_ring *ring;
1522
	void *owner = AMDGPU_FENCE_OWNER_VM;
A
Alex Deucher 已提交
1523
	unsigned nptes, ncmds, ndw;
1524
	struct amdgpu_job *job;
1525
	struct amdgpu_pte_update_params params;
1526
	struct dma_fence *f = NULL;
A
Alex Deucher 已提交
1527 1528
	int r;

1529 1530
	memset(&params, 0, sizeof(params));
	params.adev = adev;
1531
	params.vm = vm;
1532

1533 1534 1535 1536
	/* sync to everything on unmapping */
	if (!(flags & AMDGPU_PTE_VALID))
		owner = AMDGPU_FENCE_OWNER_UNDEFINED;

1537 1538 1539 1540 1541 1542 1543 1544
	if (vm->use_cpu_for_update) {
		/* params.src is used as flag to indicate system Memory */
		if (pages_addr)
			params.src = ~0;

		/* Wait for PT BOs to be free. PTs share the same resv. object
		 * as the root PD BO
		 */
1545
		r = amdgpu_vm_wait_pd(adev, vm, owner);
1546 1547 1548 1549 1550 1551 1552 1553 1554
		if (unlikely(r))
			return r;

		params.func = amdgpu_vm_cpu_set_ptes;
		params.pages_addr = pages_addr;
		return amdgpu_vm_frag_ptes(&params, start, last + 1,
					   addr, flags);
	}

1555
	ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
1556

1557
	nptes = last - start + 1;
A
Alex Deucher 已提交
1558 1559

	/*
1560
	 * reserve space for two commands every (1 << BLOCK_SIZE)
A
Alex Deucher 已提交
1561
	 *  entries or 2k dwords (whatever is smaller)
1562 1563
         *
         * The second command is for the shadow pagetables.
A
Alex Deucher 已提交
1564
	 */
1565 1566 1567 1568
	if (vm->root.base.bo->shadow)
		ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
	else
		ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
A
Alex Deucher 已提交
1569 1570 1571 1572

	/* padding, etc. */
	ndw = 64;

1573
	if (pages_addr) {
1574
		/* copy commands needed */
1575
		ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
A
Alex Deucher 已提交
1576

1577
		/* and also PTEs */
A
Alex Deucher 已提交
1578 1579
		ndw += nptes * 2;

1580 1581
		params.func = amdgpu_vm_do_copy_ptes;

A
Alex Deucher 已提交
1582 1583
	} else {
		/* set page commands needed */
1584
		ndw += ncmds * 10;
A
Alex Deucher 已提交
1585

1586
		/* extra commands for begin/end fragments */
1587 1588 1589 1590
		if (vm->root.base.bo->shadow)
		        ndw += 2 * 10 * adev->vm_manager.fragment_size * 2;
		else
		        ndw += 2 * 10 * adev->vm_manager.fragment_size;
1591 1592

		params.func = amdgpu_vm_do_set_ptes;
A
Alex Deucher 已提交
1593 1594
	}

1595 1596
	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
	if (r)
A
Alex Deucher 已提交
1597
		return r;
1598

1599
	params.ib = &job->ibs[0];
C
Chunming Zhou 已提交
1600

1601
	if (pages_addr) {
1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614
		uint64_t *pte;
		unsigned i;

		/* Put the PTEs at the end of the IB. */
		i = ndw - nptes * 2;
		pte= (uint64_t *)&(job->ibs->ptr[i]);
		params.src = job->ibs->gpu_addr + i * 4;

		for (i = 0; i < nptes; ++i) {
			pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
						    AMDGPU_GPU_PAGE_SIZE);
			pte[i] |= flags;
		}
1615
		addr = 0;
1616 1617
	}

1618
	r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
1619 1620 1621
	if (r)
		goto error_free;

1622
	r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
1623
			     owner, false);
1624 1625
	if (r)
		goto error_free;
A
Alex Deucher 已提交
1626

1627
	r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
1628 1629 1630
	if (r)
		goto error_free;

1631 1632 1633
	r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
	if (r)
		goto error_free;
A
Alex Deucher 已提交
1634

1635 1636
	amdgpu_ring_pad_ib(ring, params.ib);
	WARN_ON(params.ib->length_dw > ndw);
1637
	r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
1638 1639
	if (r)
		goto error_free;
A
Alex Deucher 已提交
1640

1641
	amdgpu_bo_fence(vm->root.base.bo, f, true);
1642 1643
	dma_fence_put(*fence);
	*fence = f;
A
Alex Deucher 已提交
1644
	return 0;
C
Chunming Zhou 已提交
1645 1646

error_free:
1647
	amdgpu_job_free(job);
1648
	return r;
A
Alex Deucher 已提交
1649 1650
}

1651 1652 1653 1654
/**
 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
 *
 * @adev: amdgpu_device pointer
1655
 * @exclusive: fence we need to sync to
1656
 * @pages_addr: DMA addresses to use for mapping
1657 1658
 * @vm: requested vm
 * @mapping: mapped range and flags to use for the update
1659
 * @flags: HW flags for the mapping
1660
 * @nodes: array of drm_mm_nodes with the MC addresses
1661 1662 1663 1664
 * @fence: optional resulting fence
 *
 * Split the mapping into smaller chunks so that each update fits
 * into a SDMA IB.
1665 1666 1667
 *
 * Returns:
 * 0 for success, -EINVAL for failure.
1668 1669
 */
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1670
				      struct dma_fence *exclusive,
1671
				      dma_addr_t *pages_addr,
1672 1673
				      struct amdgpu_vm *vm,
				      struct amdgpu_bo_va_mapping *mapping,
1674
				      uint64_t flags,
1675
				      struct drm_mm_node *nodes,
1676
				      struct dma_fence **fence)
1677
{
1678
	unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1679
	uint64_t pfn, start = mapping->start;
1680 1681 1682 1683 1684 1685 1686 1687 1688 1689
	int r;

	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
	 * but in case of something, we filter the flags in first place
	 */
	if (!(mapping->flags & AMDGPU_PTE_READABLE))
		flags &= ~AMDGPU_PTE_READABLE;
	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
		flags &= ~AMDGPU_PTE_WRITEABLE;

1690 1691 1692
	flags &= ~AMDGPU_PTE_EXECUTABLE;
	flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;

1693 1694 1695
	flags &= ~AMDGPU_PTE_MTYPE_MASK;
	flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);

1696 1697 1698 1699 1700 1701
	if ((mapping->flags & AMDGPU_PTE_PRT) &&
	    (adev->asic_type >= CHIP_VEGA10)) {
		flags |= AMDGPU_PTE_PRT;
		flags &= ~AMDGPU_PTE_VALID;
	}

1702 1703
	trace_amdgpu_vm_bo_update(mapping);

1704 1705 1706 1707 1708 1709
	pfn = mapping->offset >> PAGE_SHIFT;
	if (nodes) {
		while (pfn >= nodes->size) {
			pfn -= nodes->size;
			++nodes;
		}
1710
	}
1711

1712
	do {
1713
		dma_addr_t *dma_addr = NULL;
1714 1715
		uint64_t max_entries;
		uint64_t addr, last;
1716

1717 1718 1719
		if (nodes) {
			addr = nodes->start << PAGE_SHIFT;
			max_entries = (nodes->size - pfn) *
1720
				AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1721 1722 1723 1724
		} else {
			addr = 0;
			max_entries = S64_MAX;
		}
1725

1726
		if (pages_addr) {
1727 1728
			uint64_t count;

1729
			max_entries = min(max_entries, 16ull * 1024ull);
1730
			for (count = 1;
1731
			     count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1732
			     ++count) {
1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744
				uint64_t idx = pfn + count;

				if (pages_addr[idx] !=
				    (pages_addr[idx - 1] + PAGE_SIZE))
					break;
			}

			if (count < min_linear_pages) {
				addr = pfn << PAGE_SHIFT;
				dma_addr = pages_addr;
			} else {
				addr = pages_addr[pfn];
1745
				max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1746 1747
			}

1748 1749
		} else if (flags & AMDGPU_PTE_VALID) {
			addr += adev->vm_manager.vram_base_offset;
1750
			addr += pfn << PAGE_SHIFT;
1751 1752
		}

1753
		last = min((uint64_t)mapping->last, start + max_entries - 1);
1754
		r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
1755 1756 1757 1758 1759
						start, last, flags, addr,
						fence);
		if (r)
			return r;

1760
		pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1761 1762 1763 1764
		if (nodes && nodes->size == pfn) {
			pfn = 0;
			++nodes;
		}
1765
		start = last + 1;
1766

1767
	} while (unlikely(start != mapping->last + 1));
1768 1769 1770 1771

	return 0;
}

A
Alex Deucher 已提交
1772 1773 1774 1775 1776
/**
 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
 *
 * @adev: amdgpu_device pointer
 * @bo_va: requested BO and VM object
1777
 * @clear: if true clear the entries
A
Alex Deucher 已提交
1778 1779
 *
 * Fill in the page table entries for @bo_va.
1780 1781 1782
 *
 * Returns:
 * 0 for success, -EINVAL for failure.
A
Alex Deucher 已提交
1783 1784 1785
 */
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
			struct amdgpu_bo_va *bo_va,
1786
			bool clear)
A
Alex Deucher 已提交
1787
{
1788 1789
	struct amdgpu_bo *bo = bo_va->base.bo;
	struct amdgpu_vm *vm = bo_va->base.vm;
A
Alex Deucher 已提交
1790
	struct amdgpu_bo_va_mapping *mapping;
1791
	dma_addr_t *pages_addr = NULL;
1792
	struct ttm_mem_reg *mem;
1793
	struct drm_mm_node *nodes;
1794
	struct dma_fence *exclusive, **last_update;
1795
	uint64_t flags;
A
Alex Deucher 已提交
1796 1797
	int r;

1798
	if (clear || !bo) {
1799
		mem = NULL;
1800
		nodes = NULL;
1801 1802
		exclusive = NULL;
	} else {
1803 1804
		struct ttm_dma_tt *ttm;

1805
		mem = &bo->tbo.mem;
1806 1807
		nodes = mem->mm_node;
		if (mem->mem_type == TTM_PL_TT) {
1808
			ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
1809
			pages_addr = ttm->dma_address;
1810
		}
1811
		exclusive = reservation_object_get_excl(bo->tbo.resv);
A
Alex Deucher 已提交
1812 1813
	}

1814
	if (bo)
1815
		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1816
	else
1817
		flags = 0x0;
A
Alex Deucher 已提交
1818

1819 1820 1821 1822 1823
	if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
		last_update = &vm->last_update;
	else
		last_update = &bo_va->last_pt_update;

1824 1825
	if (!clear && bo_va->base.moved) {
		bo_va->base.moved = false;
1826
		list_splice_init(&bo_va->valids, &bo_va->invalids);
1827

1828 1829
	} else if (bo_va->cleared != clear) {
		list_splice_init(&bo_va->valids, &bo_va->invalids);
1830
	}
1831 1832

	list_for_each_entry(mapping, &bo_va->invalids, list) {
1833
		r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
1834
					       mapping, flags, nodes,
1835
					       last_update);
A
Alex Deucher 已提交
1836 1837 1838 1839
		if (r)
			return r;
	}

1840 1841 1842
	if (vm->use_cpu_for_update) {
		/* Flush HDP */
		mb();
1843
		amdgpu_asic_flush_hdp(adev, NULL);
1844 1845
	}

1846 1847 1848 1849
	/* If the BO is not in its preferred location add it back to
	 * the evicted list so that it gets validated again on the
	 * next command submission.
	 */
1850 1851 1852 1853
	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
		uint32_t mem_type = bo->tbo.mem.mem_type;

		if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
1854
			amdgpu_vm_bo_evicted(&bo_va->base);
1855
		else
1856
			amdgpu_vm_bo_idle(&bo_va->base);
1857
	} else {
1858
		amdgpu_vm_bo_done(&bo_va->base);
1859
	}
A
Alex Deucher 已提交
1860

1861 1862 1863 1864 1865 1866
	list_splice_init(&bo_va->invalids, &bo_va->valids);
	bo_va->cleared = clear;

	if (trace_amdgpu_vm_bo_mapping_enabled()) {
		list_for_each_entry(mapping, &bo_va->valids, list)
			trace_amdgpu_vm_bo_mapping(mapping);
1867 1868
	}

A
Alex Deucher 已提交
1869 1870 1871
	return 0;
}

1872 1873
/**
 * amdgpu_vm_update_prt_state - update the global PRT state
1874 1875
 *
 * @adev: amdgpu_device pointer
1876 1877 1878 1879 1880 1881 1882
 */
static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
{
	unsigned long flags;
	bool enable;

	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1883
	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1884
	adev->gmc.gmc_funcs->set_prt(adev, enable);
1885 1886 1887
	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
}

1888
/**
1889
 * amdgpu_vm_prt_get - add a PRT user
1890 1891
 *
 * @adev: amdgpu_device pointer
1892 1893 1894
 */
static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
{
1895
	if (!adev->gmc.gmc_funcs->set_prt)
1896 1897
		return;

1898 1899 1900 1901
	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
		amdgpu_vm_update_prt_state(adev);
}

1902 1903
/**
 * amdgpu_vm_prt_put - drop a PRT user
1904 1905
 *
 * @adev: amdgpu_device pointer
1906 1907 1908
 */
static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
{
1909
	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1910 1911 1912
		amdgpu_vm_update_prt_state(adev);
}

1913
/**
1914
 * amdgpu_vm_prt_cb - callback for updating the PRT status
1915 1916
 *
 * @fence: fence for the callback
1917
 * @_cb: the callback function
1918 1919 1920 1921 1922
 */
static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
{
	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);

1923
	amdgpu_vm_prt_put(cb->adev);
1924 1925 1926
	kfree(cb);
}

1927 1928
/**
 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1929 1930 1931
 *
 * @adev: amdgpu_device pointer
 * @fence: fence for the callback
1932 1933 1934 1935
 */
static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
				 struct dma_fence *fence)
{
1936
	struct amdgpu_prt_cb *cb;
1937

1938
	if (!adev->gmc.gmc_funcs->set_prt)
1939 1940 1941
		return;

	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1942 1943 1944 1945 1946
	if (!cb) {
		/* Last resort when we are OOM */
		if (fence)
			dma_fence_wait(fence, false);

1947
		amdgpu_vm_prt_put(adev);
1948 1949 1950 1951 1952 1953 1954 1955
	} else {
		cb->adev = adev;
		if (!fence || dma_fence_add_callback(fence, &cb->cb,
						     amdgpu_vm_prt_cb))
			amdgpu_vm_prt_cb(fence, &cb->cb);
	}
}

1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970
/**
 * amdgpu_vm_free_mapping - free a mapping
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 * @mapping: mapping to be freed
 * @fence: fence of the unmap operation
 *
 * Free a mapping and make sure we decrease the PRT usage count if applicable.
 */
static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
				   struct amdgpu_vm *vm,
				   struct amdgpu_bo_va_mapping *mapping,
				   struct dma_fence *fence)
{
1971 1972 1973 1974
	if (mapping->flags & AMDGPU_PTE_PRT)
		amdgpu_vm_add_prt_cb(adev, fence);
	kfree(mapping);
}
1975

1976 1977 1978 1979 1980 1981 1982 1983 1984 1985
/**
 * amdgpu_vm_prt_fini - finish all prt mappings
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 *
 * Register a cleanup callback to disable PRT support after VM dies.
 */
static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
1986
	struct reservation_object *resv = vm->root.base.bo->tbo.resv;
1987 1988 1989
	struct dma_fence *excl, **shared;
	unsigned i, shared_count;
	int r;
1990

1991 1992 1993 1994 1995 1996 1997 1998 1999
	r = reservation_object_get_fences_rcu(resv, &excl,
					      &shared_count, &shared);
	if (r) {
		/* Not enough memory to grab the fence list, as last resort
		 * block for all the fences to complete.
		 */
		reservation_object_wait_timeout_rcu(resv, true, false,
						    MAX_SCHEDULE_TIMEOUT);
		return;
2000
	}
2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011

	/* Add a callback for each fence in the reservation object */
	amdgpu_vm_prt_get(adev);
	amdgpu_vm_add_prt_cb(adev, excl);

	for (i = 0; i < shared_count; ++i) {
		amdgpu_vm_prt_get(adev);
		amdgpu_vm_add_prt_cb(adev, shared[i]);
	}

	kfree(shared);
2012 2013
}

A
Alex Deucher 已提交
2014 2015 2016 2017 2018
/**
 * amdgpu_vm_clear_freed - clear freed BOs in the PT
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
2019 2020
 * @fence: optional resulting fence (unchanged if no work needed to be done
 * or if an error occurred)
A
Alex Deucher 已提交
2021 2022 2023
 *
 * Make sure all freed BOs are cleared in the PT.
 * PTs have to be reserved and mutex must be locked!
2024 2025 2026 2027
 *
 * Returns:
 * 0 for success.
 *
A
Alex Deucher 已提交
2028 2029
 */
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
2030 2031
			  struct amdgpu_vm *vm,
			  struct dma_fence **fence)
A
Alex Deucher 已提交
2032 2033
{
	struct amdgpu_bo_va_mapping *mapping;
2034
	uint64_t init_pte_value = 0;
2035
	struct dma_fence *f = NULL;
A
Alex Deucher 已提交
2036 2037 2038 2039 2040 2041
	int r;

	while (!list_empty(&vm->freed)) {
		mapping = list_first_entry(&vm->freed,
			struct amdgpu_bo_va_mapping, list);
		list_del(&mapping->list);
2042

2043 2044
		if (vm->pte_support_ats &&
		    mapping->start < AMDGPU_GMC_HOLE_START)
2045
			init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
Y
Yong Zhao 已提交
2046

2047
		r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
2048
						mapping->start, mapping->last,
Y
Yong Zhao 已提交
2049
						init_pte_value, 0, &f);
2050
		amdgpu_vm_free_mapping(adev, vm, mapping, f);
2051
		if (r) {
2052
			dma_fence_put(f);
A
Alex Deucher 已提交
2053
			return r;
2054
		}
2055
	}
A
Alex Deucher 已提交
2056

2057 2058 2059 2060 2061
	if (fence && f) {
		dma_fence_put(*fence);
		*fence = f;
	} else {
		dma_fence_put(f);
A
Alex Deucher 已提交
2062
	}
2063

A
Alex Deucher 已提交
2064 2065 2066 2067 2068
	return 0;

}

/**
2069
 * amdgpu_vm_handle_moved - handle moved BOs in the PT
A
Alex Deucher 已提交
2070 2071 2072 2073
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 *
2074
 * Make sure all BOs which are moved are updated in the PTs.
2075 2076 2077
 *
 * Returns:
 * 0 for success.
A
Alex Deucher 已提交
2078
 *
2079
 * PTs have to be reserved!
A
Alex Deucher 已提交
2080
 */
2081
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
2082
			   struct amdgpu_vm *vm)
A
Alex Deucher 已提交
2083
{
2084
	struct amdgpu_bo_va *bo_va, *tmp;
2085
	struct reservation_object *resv;
2086
	bool clear;
2087
	int r;
A
Alex Deucher 已提交
2088

2089 2090 2091 2092 2093 2094
	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
		/* Per VM BOs never need to bo cleared in the page tables */
		r = amdgpu_vm_bo_update(adev, bo_va, false);
		if (r)
			return r;
	}
2095

2096 2097 2098 2099 2100 2101
	spin_lock(&vm->invalidated_lock);
	while (!list_empty(&vm->invalidated)) {
		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
					 base.vm_status);
		resv = bo_va->base.bo->tbo.resv;
		spin_unlock(&vm->invalidated_lock);
2102 2103

		/* Try to reserve the BO to avoid clearing its ptes */
2104
		if (!amdgpu_vm_debug && reservation_object_trylock(resv))
2105 2106 2107 2108
			clear = false;
		/* Somebody else is using the BO right now */
		else
			clear = true;
2109 2110

		r = amdgpu_vm_bo_update(adev, bo_va, clear);
2111
		if (r)
A
Alex Deucher 已提交
2112 2113
			return r;

2114
		if (!clear)
2115
			reservation_object_unlock(resv);
2116
		spin_lock(&vm->invalidated_lock);
A
Alex Deucher 已提交
2117
	}
2118
	spin_unlock(&vm->invalidated_lock);
A
Alex Deucher 已提交
2119

2120
	return 0;
A
Alex Deucher 已提交
2121 2122 2123 2124 2125 2126 2127 2128 2129
}

/**
 * amdgpu_vm_bo_add - add a bo to a specific vm
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 * @bo: amdgpu buffer object
 *
2130
 * Add @bo into the requested vm.
A
Alex Deucher 已提交
2131
 * Add @bo to the list of bos associated with the vm
2132 2133 2134
 *
 * Returns:
 * Newly added bo_va or NULL for failure
A
Alex Deucher 已提交
2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147
 *
 * Object has to be reserved!
 */
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
				      struct amdgpu_vm *vm,
				      struct amdgpu_bo *bo)
{
	struct amdgpu_bo_va *bo_va;

	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
	if (bo_va == NULL) {
		return NULL;
	}
2148
	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
2149

A
Alex Deucher 已提交
2150
	bo_va->ref_count = 1;
2151 2152
	INIT_LIST_HEAD(&bo_va->valids);
	INIT_LIST_HEAD(&bo_va->invalids);
2153

A
Alex Deucher 已提交
2154 2155 2156
	return bo_va;
}

2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173

/**
 * amdgpu_vm_bo_insert_mapping - insert a new mapping
 *
 * @adev: amdgpu_device pointer
 * @bo_va: bo_va to store the address
 * @mapping: the mapping to insert
 *
 * Insert a new mapping into all structures.
 */
static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
				    struct amdgpu_bo_va *bo_va,
				    struct amdgpu_bo_va_mapping *mapping)
{
	struct amdgpu_vm *vm = bo_va->base.vm;
	struct amdgpu_bo *bo = bo_va->base.bo;

2174
	mapping->bo_va = bo_va;
2175 2176 2177 2178 2179 2180
	list_add(&mapping->list, &bo_va->invalids);
	amdgpu_vm_it_insert(mapping, &vm->va);

	if (mapping->flags & AMDGPU_PTE_PRT)
		amdgpu_vm_prt_get(adev);

2181 2182 2183
	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
	    !bo_va->base.moved) {
		list_move(&bo_va->base.vm_status, &vm->moved);
2184 2185 2186 2187
	}
	trace_amdgpu_vm_bo_map(bo_va, mapping);
}

A
Alex Deucher 已提交
2188 2189 2190 2191 2192 2193 2194
/**
 * amdgpu_vm_bo_map - map bo inside a vm
 *
 * @adev: amdgpu_device pointer
 * @bo_va: bo_va to store the address
 * @saddr: where to map the BO
 * @offset: requested offset in the BO
2195
 * @size: BO size in bytes
A
Alex Deucher 已提交
2196 2197 2198
 * @flags: attributes of pages (read/write/valid/etc.)
 *
 * Add a mapping of the BO at the specefied addr into the VM.
2199 2200 2201
 *
 * Returns:
 * 0 for success, error for failure.
A
Alex Deucher 已提交
2202
 *
2203
 * Object has to be reserved and unreserved outside!
A
Alex Deucher 已提交
2204 2205 2206 2207
 */
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
		     struct amdgpu_bo_va *bo_va,
		     uint64_t saddr, uint64_t offset,
2208
		     uint64_t size, uint64_t flags)
A
Alex Deucher 已提交
2209
{
2210
	struct amdgpu_bo_va_mapping *mapping, *tmp;
2211 2212
	struct amdgpu_bo *bo = bo_va->base.bo;
	struct amdgpu_vm *vm = bo_va->base.vm;
A
Alex Deucher 已提交
2213 2214
	uint64_t eaddr;

2215 2216
	/* validate the parameters */
	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2217
	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2218 2219
		return -EINVAL;

A
Alex Deucher 已提交
2220
	/* make sure object fit at this offset */
2221
	eaddr = saddr + size - 1;
2222
	if (saddr >= eaddr ||
2223
	    (bo && offset + size > amdgpu_bo_size(bo)))
A
Alex Deucher 已提交
2224 2225 2226 2227 2228
		return -EINVAL;

	saddr /= AMDGPU_GPU_PAGE_SIZE;
	eaddr /= AMDGPU_GPU_PAGE_SIZE;

2229 2230
	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
	if (tmp) {
A
Alex Deucher 已提交
2231 2232
		/* bo and tmp overlap, invalid addr */
		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2233
			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2234
			tmp->start, tmp->last + 1);
2235
		return -EINVAL;
A
Alex Deucher 已提交
2236 2237 2238
	}

	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2239 2240
	if (!mapping)
		return -ENOMEM;
A
Alex Deucher 已提交
2241

2242 2243
	mapping->start = saddr;
	mapping->last = eaddr;
A
Alex Deucher 已提交
2244 2245 2246
	mapping->offset = offset;
	mapping->flags = flags;

2247
	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258

	return 0;
}

/**
 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
 *
 * @adev: amdgpu_device pointer
 * @bo_va: bo_va to store the address
 * @saddr: where to map the BO
 * @offset: requested offset in the BO
2259
 * @size: BO size in bytes
2260 2261 2262 2263
 * @flags: attributes of pages (read/write/valid/etc.)
 *
 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
 * mappings as we do so.
2264 2265 2266
 *
 * Returns:
 * 0 for success, error for failure.
2267 2268 2269 2270 2271 2272 2273 2274 2275
 *
 * Object has to be reserved and unreserved outside!
 */
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
			     struct amdgpu_bo_va *bo_va,
			     uint64_t saddr, uint64_t offset,
			     uint64_t size, uint64_t flags)
{
	struct amdgpu_bo_va_mapping *mapping;
2276
	struct amdgpu_bo *bo = bo_va->base.bo;
2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287
	uint64_t eaddr;
	int r;

	/* validate the parameters */
	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
		return -EINVAL;

	/* make sure object fit at this offset */
	eaddr = saddr + size - 1;
	if (saddr >= eaddr ||
2288
	    (bo && offset + size > amdgpu_bo_size(bo)))
2289 2290 2291 2292 2293 2294 2295
		return -EINVAL;

	/* Allocate all the needed memory */
	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
	if (!mapping)
		return -ENOMEM;

2296
	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2297 2298 2299 2300 2301 2302 2303 2304
	if (r) {
		kfree(mapping);
		return r;
	}

	saddr /= AMDGPU_GPU_PAGE_SIZE;
	eaddr /= AMDGPU_GPU_PAGE_SIZE;

2305 2306
	mapping->start = saddr;
	mapping->last = eaddr;
2307 2308 2309
	mapping->offset = offset;
	mapping->flags = flags;

2310
	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2311

A
Alex Deucher 已提交
2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322
	return 0;
}

/**
 * amdgpu_vm_bo_unmap - remove bo mapping from vm
 *
 * @adev: amdgpu_device pointer
 * @bo_va: bo_va to remove the address from
 * @saddr: where to the BO is mapped
 *
 * Remove a mapping of the BO at the specefied addr from the VM.
2323 2324 2325
 *
 * Returns:
 * 0 for success, error for failure.
A
Alex Deucher 已提交
2326
 *
2327
 * Object has to be reserved and unreserved outside!
A
Alex Deucher 已提交
2328 2329 2330 2331 2332 2333
 */
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
		       struct amdgpu_bo_va *bo_va,
		       uint64_t saddr)
{
	struct amdgpu_bo_va_mapping *mapping;
2334
	struct amdgpu_vm *vm = bo_va->base.vm;
2335
	bool valid = true;
A
Alex Deucher 已提交
2336

2337
	saddr /= AMDGPU_GPU_PAGE_SIZE;
2338

2339
	list_for_each_entry(mapping, &bo_va->valids, list) {
2340
		if (mapping->start == saddr)
A
Alex Deucher 已提交
2341 2342 2343
			break;
	}

2344 2345 2346 2347
	if (&mapping->list == &bo_va->valids) {
		valid = false;

		list_for_each_entry(mapping, &bo_va->invalids, list) {
2348
			if (mapping->start == saddr)
2349 2350 2351
				break;
		}

2352
		if (&mapping->list == &bo_va->invalids)
2353
			return -ENOENT;
A
Alex Deucher 已提交
2354
	}
2355

A
Alex Deucher 已提交
2356
	list_del(&mapping->list);
2357
	amdgpu_vm_it_remove(mapping, &vm->va);
2358
	mapping->bo_va = NULL;
2359
	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
A
Alex Deucher 已提交
2360

2361
	if (valid)
A
Alex Deucher 已提交
2362
		list_add(&mapping->list, &vm->freed);
2363
	else
2364 2365
		amdgpu_vm_free_mapping(adev, vm, mapping,
				       bo_va->last_pt_update);
A
Alex Deucher 已提交
2366 2367 2368 2369

	return 0;
}

2370 2371 2372 2373 2374 2375 2376 2377 2378
/**
 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
 *
 * @adev: amdgpu_device pointer
 * @vm: VM structure to use
 * @saddr: start of the range
 * @size: size of the range
 *
 * Remove all mappings in a range, split them as appropriate.
2379 2380 2381
 *
 * Returns:
 * 0 for success, error for failure.
2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398
 */
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
				struct amdgpu_vm *vm,
				uint64_t saddr, uint64_t size)
{
	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
	LIST_HEAD(removed);
	uint64_t eaddr;

	eaddr = saddr + size - 1;
	saddr /= AMDGPU_GPU_PAGE_SIZE;
	eaddr /= AMDGPU_GPU_PAGE_SIZE;

	/* Allocate all the needed memory */
	before = kzalloc(sizeof(*before), GFP_KERNEL);
	if (!before)
		return -ENOMEM;
2399
	INIT_LIST_HEAD(&before->list);
2400 2401 2402 2403 2404 2405

	after = kzalloc(sizeof(*after), GFP_KERNEL);
	if (!after) {
		kfree(before);
		return -ENOMEM;
	}
2406
	INIT_LIST_HEAD(&after->list);
2407 2408

	/* Now gather all removed mappings */
2409 2410
	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
	while (tmp) {
2411
		/* Remember mapping split at the start */
2412 2413 2414
		if (tmp->start < saddr) {
			before->start = tmp->start;
			before->last = saddr - 1;
2415 2416
			before->offset = tmp->offset;
			before->flags = tmp->flags;
2417 2418
			before->bo_va = tmp->bo_va;
			list_add(&before->list, &tmp->bo_va->invalids);
2419 2420 2421
		}

		/* Remember mapping split at the end */
2422 2423 2424
		if (tmp->last > eaddr) {
			after->start = eaddr + 1;
			after->last = tmp->last;
2425
			after->offset = tmp->offset;
2426
			after->offset += after->start - tmp->start;
2427
			after->flags = tmp->flags;
2428 2429
			after->bo_va = tmp->bo_va;
			list_add(&after->list, &tmp->bo_va->invalids);
2430 2431 2432 2433
		}

		list_del(&tmp->list);
		list_add(&tmp->list, &removed);
2434 2435

		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2436 2437 2438 2439
	}

	/* And free them up */
	list_for_each_entry_safe(tmp, next, &removed, list) {
2440
		amdgpu_vm_it_remove(tmp, &vm->va);
2441 2442
		list_del(&tmp->list);

2443 2444 2445 2446
		if (tmp->start < saddr)
		    tmp->start = saddr;
		if (tmp->last > eaddr)
		    tmp->last = eaddr;
2447

2448
		tmp->bo_va = NULL;
2449 2450 2451 2452
		list_add(&tmp->list, &vm->freed);
		trace_amdgpu_vm_bo_unmap(NULL, tmp);
	}

2453 2454
	/* Insert partial mapping before the range */
	if (!list_empty(&before->list)) {
2455
		amdgpu_vm_it_insert(before, &vm->va);
2456 2457 2458 2459 2460 2461 2462
		if (before->flags & AMDGPU_PTE_PRT)
			amdgpu_vm_prt_get(adev);
	} else {
		kfree(before);
	}

	/* Insert partial mapping after the range */
2463
	if (!list_empty(&after->list)) {
2464
		amdgpu_vm_it_insert(after, &vm->va);
2465 2466 2467 2468 2469 2470 2471 2472 2473
		if (after->flags & AMDGPU_PTE_PRT)
			amdgpu_vm_prt_get(adev);
	} else {
		kfree(after);
	}

	return 0;
}

2474 2475 2476 2477
/**
 * amdgpu_vm_bo_lookup_mapping - find mapping by address
 *
 * @vm: the requested VM
2478
 * @addr: the address
2479 2480
 *
 * Find a mapping by it's address.
2481 2482 2483 2484
 *
 * Returns:
 * The amdgpu_bo_va_mapping matching for addr or NULL
 *
2485 2486 2487 2488 2489 2490 2491
 */
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
							 uint64_t addr)
{
	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
}

2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520
/**
 * amdgpu_vm_bo_trace_cs - trace all reserved mappings
 *
 * @vm: the requested vm
 * @ticket: CS ticket
 *
 * Trace all mappings of BOs reserved during a command submission.
 */
void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
{
	struct amdgpu_bo_va_mapping *mapping;

	if (!trace_amdgpu_vm_bo_cs_enabled())
		return;

	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
		if (mapping->bo_va && mapping->bo_va->base.bo) {
			struct amdgpu_bo *bo;

			bo = mapping->bo_va->base.bo;
			if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
				continue;
		}

		trace_amdgpu_vm_bo_cs(mapping);
	}
}

A
Alex Deucher 已提交
2521 2522 2523 2524 2525 2526
/**
 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
 *
 * @adev: amdgpu_device pointer
 * @bo_va: requested bo_va
 *
2527
 * Remove @bo_va->bo from the requested vm.
A
Alex Deucher 已提交
2528 2529 2530 2531 2532 2533 2534
 *
 * Object have to be reserved!
 */
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
		      struct amdgpu_bo_va *bo_va)
{
	struct amdgpu_bo_va_mapping *mapping, *next;
2535
	struct amdgpu_bo *bo = bo_va->base.bo;
2536
	struct amdgpu_vm *vm = bo_va->base.vm;
A
Alex Deucher 已提交
2537

2538 2539 2540
	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv)
		vm->bulk_moveable = false;

2541
	list_del(&bo_va->base.bo_list);
A
Alex Deucher 已提交
2542

2543
	spin_lock(&vm->invalidated_lock);
2544
	list_del(&bo_va->base.vm_status);
2545
	spin_unlock(&vm->invalidated_lock);
A
Alex Deucher 已提交
2546

2547
	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
A
Alex Deucher 已提交
2548
		list_del(&mapping->list);
2549
		amdgpu_vm_it_remove(mapping, &vm->va);
2550
		mapping->bo_va = NULL;
2551
		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2552 2553 2554 2555
		list_add(&mapping->list, &vm->freed);
	}
	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
		list_del(&mapping->list);
2556
		amdgpu_vm_it_remove(mapping, &vm->va);
2557 2558
		amdgpu_vm_free_mapping(adev, vm, mapping,
				       bo_va->last_pt_update);
A
Alex Deucher 已提交
2559
	}
2560

2561
	dma_fence_put(bo_va->last_pt_update);
A
Alex Deucher 已提交
2562 2563 2564 2565 2566 2567 2568 2569
	kfree(bo_va);
}

/**
 * amdgpu_vm_bo_invalidate - mark the bo as invalid
 *
 * @adev: amdgpu_device pointer
 * @bo: amdgpu buffer object
2570
 * @evicted: is the BO evicted
A
Alex Deucher 已提交
2571
 *
2572
 * Mark @bo as invalid.
A
Alex Deucher 已提交
2573 2574
 */
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2575
			     struct amdgpu_bo *bo, bool evicted)
A
Alex Deucher 已提交
2576
{
2577 2578
	struct amdgpu_vm_bo_base *bo_base;

2579 2580 2581 2582
	/* shadow bo doesn't have bo base, its validation needs its parent */
	if (bo->parent && bo->parent->shadow == bo)
		bo = bo->parent;

2583
	list_for_each_entry(bo_base, &bo->va, bo_list) {
2584 2585 2586
		struct amdgpu_vm *vm = bo_base->vm;

		if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2587
			amdgpu_vm_bo_evicted(bo_base);
2588 2589 2590
			continue;
		}

2591
		if (bo_base->moved)
2592
			continue;
2593
		bo_base->moved = true;
2594

2595 2596 2597 2598 2599 2600
		if (bo->tbo.type == ttm_bo_type_kernel)
			amdgpu_vm_bo_relocated(bo_base);
		else if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
			amdgpu_vm_bo_moved(bo_base);
		else
			amdgpu_vm_bo_invalidated(bo_base);
A
Alex Deucher 已提交
2601 2602 2603
	}
}

2604 2605 2606 2607 2608 2609 2610 2611
/**
 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
 *
 * @vm_size: VM size
 *
 * Returns:
 * VM page table as power of two
 */
2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624
static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
{
	/* Total bits covered by PD + PTs */
	unsigned bits = ilog2(vm_size) + 18;

	/* Make sure the PD is 4K in size up to 8GB address space.
	   Above that split equal between PD and PTs */
	if (vm_size <= 8)
		return (bits - 9);
	else
		return ((bits + 3) / 2);
}

2625 2626
/**
 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2627 2628
 *
 * @adev: amdgpu_device pointer
2629
 * @min_vm_size: the minimum vm size in GB if it's set auto
2630 2631 2632 2633
 * @fragment_size_default: Default PTE fragment size
 * @max_level: max VMPT level
 * @max_bits: max address space size in bits
 *
2634
 */
2635
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2636 2637
			   uint32_t fragment_size_default, unsigned max_level,
			   unsigned max_bits)
2638
{
2639 2640
	unsigned int max_size = 1 << (max_bits - 30);
	unsigned int vm_size;
2641 2642 2643
	uint64_t tmp;

	/* adjust vm size first */
2644
	if (amdgpu_vm_size != -1) {
2645
		vm_size = amdgpu_vm_size;
2646 2647 2648 2649 2650
		if (vm_size > max_size) {
			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
				 amdgpu_vm_size, max_size);
			vm_size = max_size;
		}
2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674
	} else {
		struct sysinfo si;
		unsigned int phys_ram_gb;

		/* Optimal VM size depends on the amount of physical
		 * RAM available. Underlying requirements and
		 * assumptions:
		 *
		 *  - Need to map system memory and VRAM from all GPUs
		 *     - VRAM from other GPUs not known here
		 *     - Assume VRAM <= system memory
		 *  - On GFX8 and older, VM space can be segmented for
		 *    different MTYPEs
		 *  - Need to allow room for fragmentation, guard pages etc.
		 *
		 * This adds up to a rough guess of system memory x3.
		 * Round up to power of two to maximize the available
		 * VM size with the given page table size.
		 */
		si_meminfo(&si);
		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
			       (1 << 30) - 1) >> 30;
		vm_size = roundup_pow_of_two(
			min(max(phys_ram_gb * 3, min_vm_size), max_size));
2675
	}
2676 2677

	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2678 2679

	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2680 2681
	if (amdgpu_vm_block_size != -1)
		tmp >>= amdgpu_vm_block_size - 9;
2682 2683
	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
	adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696
	switch (adev->vm_manager.num_level) {
	case 3:
		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
		break;
	case 2:
		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
		break;
	case 1:
		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
		break;
	default:
		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
	}
2697
	/* block size depends on vm size and hw setup*/
2698
	if (amdgpu_vm_block_size != -1)
2699
		adev->vm_manager.block_size =
2700 2701 2702 2703 2704
			min((unsigned)amdgpu_vm_block_size, max_bits
			    - AMDGPU_GPU_PAGE_SHIFT
			    - 9 * adev->vm_manager.num_level);
	else if (adev->vm_manager.num_level > 1)
		adev->vm_manager.block_size = 9;
2705
	else
2706
		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2707

2708 2709 2710 2711
	if (amdgpu_vm_fragment_size == -1)
		adev->vm_manager.fragment_size = fragment_size_default;
	else
		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2712

2713 2714 2715
	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
		 vm_size, adev->vm_manager.num_level + 1,
		 adev->vm_manager.block_size,
2716
		 adev->vm_manager.fragment_size);
2717 2718
}

A
Alex Deucher 已提交
2719 2720 2721 2722 2723
/**
 * amdgpu_vm_init - initialize a vm instance
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
2724
 * @vm_context: Indicates if it GFX or Compute context
2725
 * @pasid: Process address space identifier
A
Alex Deucher 已提交
2726
 *
2727
 * Init @vm fields.
2728 2729 2730
 *
 * Returns:
 * 0 for success, error for failure.
A
Alex Deucher 已提交
2731
 */
2732
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2733
		   int vm_context, unsigned int pasid)
A
Alex Deucher 已提交
2734
{
2735
	struct amdgpu_bo_param bp;
2736
	struct amdgpu_bo *root;
2737
	int r, i;
A
Alex Deucher 已提交
2738

2739
	vm->va = RB_ROOT_CACHED;
2740 2741
	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
		vm->reserved_vmid[i] = NULL;
2742
	INIT_LIST_HEAD(&vm->evicted);
2743
	INIT_LIST_HEAD(&vm->relocated);
2744
	INIT_LIST_HEAD(&vm->moved);
2745
	INIT_LIST_HEAD(&vm->idle);
2746 2747
	INIT_LIST_HEAD(&vm->invalidated);
	spin_lock_init(&vm->invalidated_lock);
A
Alex Deucher 已提交
2748
	INIT_LIST_HEAD(&vm->freed);
2749

2750
	/* create scheduler entity for page table updates */
2751 2752
	r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs,
				  adev->vm_manager.vm_pte_num_rqs, NULL);
2753
	if (r)
2754
		return r;
2755

Y
Yong Zhao 已提交
2756 2757 2758
	vm->pte_support_ats = false;

	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2759 2760
		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
						AMDGPU_VM_USE_CPU_FOR_COMPUTE);
Y
Yong Zhao 已提交
2761

2762
		if (adev->asic_type == CHIP_RAVEN)
Y
Yong Zhao 已提交
2763
			vm->pte_support_ats = true;
2764
	} else {
2765 2766
		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
						AMDGPU_VM_USE_CPU_FOR_GFX);
2767
	}
2768 2769
	DRM_DEBUG_DRIVER("VM update mode is %s\n",
			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2770
	WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2771
		  "CPU update of VM recommended only for large BAR system\n");
2772
	vm->last_update = NULL;
2773

2774
	amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp);
2775 2776
	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
		bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
2777
	r = amdgpu_bo_create(adev, &bp, &root);
A
Alex Deucher 已提交
2778
	if (r)
2779 2780
		goto error_free_sched_entity;

2781
	r = amdgpu_bo_reserve(root, true);
2782 2783 2784
	if (r)
		goto error_free_root;

2785
	r = amdgpu_vm_clear_bo(adev, vm, root,
2786 2787
			       adev->vm_manager.root_level,
			       vm->pte_support_ats);
2788 2789 2790
	if (r)
		goto error_unreserve;

2791
	amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
2792
	amdgpu_bo_unreserve(vm->root.base.bo);
A
Alex Deucher 已提交
2793

2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804
	if (pasid) {
		unsigned long flags;

		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
		r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
			      GFP_ATOMIC);
		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
		if (r < 0)
			goto error_free_root;

		vm->pasid = pasid;
2805 2806
	}

2807
	INIT_KFIFO(vm->faults);
2808
	vm->fault_credit = 16;
A
Alex Deucher 已提交
2809 2810

	return 0;
2811

2812 2813 2814
error_unreserve:
	amdgpu_bo_unreserve(vm->root.base.bo);

2815
error_free_root:
2816 2817 2818
	amdgpu_bo_unref(&vm->root.base.bo->shadow);
	amdgpu_bo_unref(&vm->root.base.bo);
	vm->root.base.bo = NULL;
2819 2820

error_free_sched_entity:
2821
	drm_sched_entity_destroy(&vm->entity);
2822 2823

	return r;
A
Alex Deucher 已提交
2824 2825
}

2826 2827 2828
/**
 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
 *
2829 2830 2831
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 *
2832 2833 2834 2835 2836 2837 2838 2839 2840
 * This only works on GFX VMs that don't have any BOs added and no
 * page tables allocated yet.
 *
 * Changes the following VM parameters:
 * - use_cpu_for_update
 * - pte_supports_ats
 * - pasid (old PASID is released, because compute manages its own PASIDs)
 *
 * Reinitializes the page directory to reflect the changed ATS
2841
 * setting.
2842
 *
2843 2844
 * Returns:
 * 0 for success, -errno for errors.
2845
 */
2846
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid)
2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857
{
	bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
	int r;

	r = amdgpu_bo_reserve(vm->root.base.bo, true);
	if (r)
		return r;

	/* Sanity checks */
	if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
		r = -EINVAL;
2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871
		goto unreserve_bo;
	}

	if (pasid) {
		unsigned long flags;

		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
		r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
			      GFP_ATOMIC);
		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);

		if (r == -ENOSPC)
			goto unreserve_bo;
		r = 0;
2872 2873 2874 2875 2876 2877 2878 2879 2880 2881
	}

	/* Check if PD needs to be reinitialized and do it before
	 * changing any other state, in case it fails.
	 */
	if (pte_support_ats != vm->pte_support_ats) {
		r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
			       adev->vm_manager.root_level,
			       pte_support_ats);
		if (r)
2882
			goto free_idr;
2883 2884 2885 2886 2887 2888 2889 2890
	}

	/* Update VM state */
	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
	vm->pte_support_ats = pte_support_ats;
	DRM_DEBUG_DRIVER("VM update mode is %s\n",
			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2891
	WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2892 2893 2894 2895 2896 2897 2898 2899 2900
		  "CPU update of VM recommended only for large BAR system\n");

	if (vm->pasid) {
		unsigned long flags;

		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);

2901 2902 2903 2904
		/* Free the original amdgpu allocated pasid
		 * Will be replaced with kfd allocated pasid
		 */
		amdgpu_pasid_free(vm->pasid);
2905 2906 2907
		vm->pasid = 0;
	}

2908 2909 2910
	/* Free the shadow bo for compute VM */
	amdgpu_bo_unref(&vm->root.base.bo->shadow);

2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924
	if (pasid)
		vm->pasid = pasid;

	goto unreserve_bo;

free_idr:
	if (pasid) {
		unsigned long flags;

		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
		idr_remove(&adev->vm_manager.pasid_idr, pasid);
		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
	}
unreserve_bo:
2925 2926 2927 2928
	amdgpu_bo_unreserve(vm->root.base.bo);
	return r;
}

2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948
/**
 * amdgpu_vm_release_compute - release a compute vm
 * @adev: amdgpu_device pointer
 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
 *
 * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
 * pasid from vm. Compute should stop use of vm after this call.
 */
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
	if (vm->pasid) {
		unsigned long flags;

		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
	}
	vm->pasid = 0;
}

2949 2950 2951
/**
 * amdgpu_vm_free_levels - free PD/PT levels
 *
2952 2953 2954
 * @adev: amdgpu device structure
 * @parent: PD/PT starting level to free
 * @level: level of parent structure
2955 2956 2957
 *
 * Free the page directory or page table level and all sub levels.
 */
2958 2959 2960
static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
				  struct amdgpu_vm_pt *parent,
				  unsigned level)
2961
{
2962
	unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
2963

2964 2965 2966 2967 2968
	if (parent->base.bo) {
		list_del(&parent->base.bo_list);
		list_del(&parent->base.vm_status);
		amdgpu_bo_unref(&parent->base.bo->shadow);
		amdgpu_bo_unref(&parent->base.bo);
2969 2970
	}

2971 2972 2973 2974
	if (parent->entries)
		for (i = 0; i < num_entries; i++)
			amdgpu_vm_free_levels(adev, &parent->entries[i],
					      level + 1);
2975

2976
	kvfree(parent->entries);
2977 2978
}

A
Alex Deucher 已提交
2979 2980 2981 2982 2983 2984
/**
 * amdgpu_vm_fini - tear down a vm instance
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 *
2985
 * Tear down @vm.
A
Alex Deucher 已提交
2986 2987 2988 2989 2990
 * Unbind the VM and remove all bos from the vm bo list
 */
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
	struct amdgpu_bo_va_mapping *mapping, *tmp;
2991
	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2992
	struct amdgpu_bo *root;
2993
	u64 fault;
2994
	int i, r;
A
Alex Deucher 已提交
2995

2996 2997
	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);

2998 2999 3000 3001
	/* Clear pending page faults from IH when the VM is destroyed */
	while (kfifo_get(&vm->faults, &fault))
		amdgpu_ih_clear_fault(adev, fault);

3002 3003 3004 3005 3006 3007 3008 3009
	if (vm->pasid) {
		unsigned long flags;

		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
	}

3010
	drm_sched_entity_destroy(&vm->entity);
3011

3012
	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
A
Alex Deucher 已提交
3013 3014
		dev_err(adev->dev, "still active bo inside vm\n");
	}
3015 3016
	rbtree_postorder_for_each_entry_safe(mapping, tmp,
					     &vm->va.rb_root, rb) {
A
Alex Deucher 已提交
3017
		list_del(&mapping->list);
3018
		amdgpu_vm_it_remove(mapping, &vm->va);
A
Alex Deucher 已提交
3019 3020 3021
		kfree(mapping);
	}
	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
3022
		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
3023
			amdgpu_vm_prt_fini(adev, vm);
3024
			prt_fini_needed = false;
3025
		}
3026

A
Alex Deucher 已提交
3027
		list_del(&mapping->list);
3028
		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
A
Alex Deucher 已提交
3029 3030
	}

3031 3032 3033 3034 3035
	root = amdgpu_bo_ref(vm->root.base.bo);
	r = amdgpu_bo_reserve(root, true);
	if (r) {
		dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
	} else {
3036 3037
		amdgpu_vm_free_levels(adev, &vm->root,
				      adev->vm_manager.root_level);
3038 3039 3040
		amdgpu_bo_unreserve(root);
	}
	amdgpu_bo_unref(&root);
3041
	dma_fence_put(vm->last_update);
3042
	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
3043
		amdgpu_vmid_free_reserved(adev, vm, i);
A
Alex Deucher 已提交
3044
}
3045

3046 3047 3048 3049 3050 3051
/**
 * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
 *
 * @adev: amdgpu_device pointer
 * @pasid: PASID do identify the VM
 *
3052 3053 3054 3055
 * This function is expected to be called in interrupt context.
 *
 * Returns:
 * True if there was fault credit, false otherwise
3056 3057 3058 3059 3060 3061 3062 3063
 */
bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
				  unsigned int pasid)
{
	struct amdgpu_vm *vm;

	spin_lock(&adev->vm_manager.pasid_lock);
	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3064
	if (!vm) {
3065
		/* VM not found, can't track fault credit */
3066
		spin_unlock(&adev->vm_manager.pasid_lock);
3067
		return true;
3068
	}
3069 3070

	/* No lock needed. only accessed by IRQ handler */
3071
	if (!vm->fault_credit) {
3072
		/* Too many faults in this VM */
3073
		spin_unlock(&adev->vm_manager.pasid_lock);
3074
		return false;
3075
	}
3076 3077

	vm->fault_credit--;
3078
	spin_unlock(&adev->vm_manager.pasid_lock);
3079 3080 3081
	return true;
}

3082 3083 3084 3085 3086 3087 3088 3089 3090
/**
 * amdgpu_vm_manager_init - init the VM manager
 *
 * @adev: amdgpu_device pointer
 *
 * Initialize the VM manager structures
 */
void amdgpu_vm_manager_init(struct amdgpu_device *adev)
{
3091
	unsigned i;
3092

3093
	amdgpu_vmid_mgr_init(adev);
3094

3095 3096
	adev->vm_manager.fence_context =
		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3097 3098 3099
	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
		adev->vm_manager.seqno[i] = 0;

3100
	spin_lock_init(&adev->vm_manager.prt_lock);
3101
	atomic_set(&adev->vm_manager.num_prt_users, 0);
3102 3103 3104 3105 3106 3107

	/* If not overridden by the user, by default, only in large BAR systems
	 * Compute VM tables will be updated by CPU
	 */
#ifdef CONFIG_X86_64
	if (amdgpu_vm_update_mode == -1) {
3108
		if (amdgpu_gmc_vram_full_visible(&adev->gmc))
3109 3110 3111 3112 3113 3114 3115 3116 3117 3118
			adev->vm_manager.vm_update_mode =
				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
		else
			adev->vm_manager.vm_update_mode = 0;
	} else
		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
#else
	adev->vm_manager.vm_update_mode = 0;
#endif

3119 3120
	idr_init(&adev->vm_manager.pasid_idr);
	spin_lock_init(&adev->vm_manager.pasid_lock);
3121 3122
}

3123 3124 3125 3126 3127 3128 3129 3130 3131
/**
 * amdgpu_vm_manager_fini - cleanup VM manager
 *
 * @adev: amdgpu_device pointer
 *
 * Cleanup the VM manager and free resources.
 */
void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
{
3132 3133 3134
	WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
	idr_destroy(&adev->vm_manager.pasid_idr);

3135
	amdgpu_vmid_mgr_fini(adev);
3136
}
C
Chunming Zhou 已提交
3137

3138 3139 3140 3141 3142 3143 3144 3145 3146 3147
/**
 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
 *
 * @dev: drm device pointer
 * @data: drm_amdgpu_vm
 * @filp: drm file pointer
 *
 * Returns:
 * 0 for success, -errno for errors.
 */
C
Chunming Zhou 已提交
3148 3149 3150
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
	union drm_amdgpu_vm *args = data;
3151 3152 3153
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_fpriv *fpriv = filp->driver_priv;
	int r;
C
Chunming Zhou 已提交
3154 3155 3156

	switch (args->in.op) {
	case AMDGPU_VM_OP_RESERVE_VMID:
3157
		/* current, we only have requirement to reserve vmid from gfxhub */
3158
		r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
3159 3160 3161
		if (r)
			return r;
		break;
C
Chunming Zhou 已提交
3162
	case AMDGPU_VM_OP_UNRESERVE_VMID:
3163
		amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
C
Chunming Zhou 已提交
3164 3165 3166 3167 3168 3169 3170
		break;
	default:
		return -EINVAL;
	}

	return 0;
}
3171 3172 3173 3174

/**
 * amdgpu_vm_get_task_info - Extracts task info for a PASID.
 *
3175
 * @adev: drm device pointer
3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209
 * @pasid: PASID identifier for VM
 * @task_info: task_info to fill.
 */
void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
			 struct amdgpu_task_info *task_info)
{
	struct amdgpu_vm *vm;

	spin_lock(&adev->vm_manager.pasid_lock);

	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
	if (vm)
		*task_info = vm->task_info;

	spin_unlock(&adev->vm_manager.pasid_lock);
}

/**
 * amdgpu_vm_set_task_info - Sets VMs task info.
 *
 * @vm: vm for which to set the info
 */
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
{
	if (!vm->task_info.pid) {
		vm->task_info.pid = current->pid;
		get_task_comm(vm->task_info.task_name, current);

		if (current->group_leader->mm == current->mm) {
			vm->task_info.tgid = current->group_leader->pid;
			get_task_comm(vm->task_info.process_name, current->group_leader);
		}
	}
}