amdgpu_vm.c 66.3 KB
Newer Older
A
Alex Deucher 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
28
#include <linux/dma-fence-array.h>
29
#include <linux/interval_tree_generic.h>
30
#include <linux/idr.h>
A
Alex Deucher 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"

/*
 * GPUVM
 * GPUVM is similar to the legacy gart on older asics, however
 * rather than there being a single global gart table
 * for the entire GPU, there are multiple VM page tables active
 * at any given time.  The VM page tables can contain a mix
 * vram pages and system memory pages and system memory pages
 * can be mapped as snooped (cached system pages) or unsnooped
 * (uncached system pages).
 * Each VM has an ID associated with it and there is a page table
 * associated with each VMID.  When execting a command buffer,
 * the kernel tells the the ring what VMID to use for that command
 * buffer.  VMIDs are allocated dynamically as commands are submitted.
 * The userspace drivers maintain their own address space and the kernel
 * sets up their pages tables accordingly when they submit their
 * command buffers and a VMID is assigned.
 * Cayman/Trinity support up to 8 active VMs at any given time;
 * SI supports 16.
 */

56 57 58 59 60 61 62 63 64
#define START(node) ((node)->start)
#define LAST(node) ((node)->last)

INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
		     START, LAST, static, amdgpu_vm_it)

#undef START
#undef LAST

65 66 67
/* Local structure. Encapsulate some VM table update parameters to reduce
 * the number of function parameters
 */
68
struct amdgpu_pte_update_params {
69 70
	/* amdgpu device we do this update for */
	struct amdgpu_device *adev;
71 72
	/* optional amdgpu_vm we do this update for */
	struct amdgpu_vm *vm;
73 74 75 76
	/* address where to copy page table entries from */
	uint64_t src;
	/* indirect buffer to fill with commands */
	struct amdgpu_ib *ib;
77 78 79
	/* Function which actually does the update */
	void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
		     uint64_t addr, unsigned count, uint32_t incr,
80
		     uint64_t flags);
81 82 83 84 85 86
	/* The next two are used during VM update by CPU
	 *  DMA addresses to use for mapping
	 *  Kernel pointer of PD/PT BO that needs to be updated
	 */
	dma_addr_t *pages_addr;
	void *kptr;
87 88
};

89 90 91 92 93 94
/* Helper to disable partial resident texture feature from a fence callback */
struct amdgpu_prt_cb {
	struct amdgpu_device *adev;
	struct dma_fence_cb cb;
};

95 96 97 98 99 100 101 102 103 104
/**
 * amdgpu_vm_level_shift - return the addr shift for each level
 *
 * @adev: amdgpu_device pointer
 *
 * Returns the number of bits the pfn needs to be right shifted for a level.
 */
static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
				      unsigned level)
{
105 106 107 108 109 110 111
	unsigned shift = 0xff;

	switch (level) {
	case AMDGPU_VM_PDB2:
	case AMDGPU_VM_PDB1:
	case AMDGPU_VM_PDB0:
		shift = 9 * (AMDGPU_VM_PDB0 - level) +
112
			adev->vm_manager.block_size;
113 114 115 116 117 118 119 120 121
		break;
	case AMDGPU_VM_PTB:
		shift = 0;
		break;
	default:
		dev_err(adev->dev, "the level%d isn't supported.\n", level);
	}

	return shift;
122 123
}

A
Alex Deucher 已提交
124
/**
125
 * amdgpu_vm_num_entries - return the number of entries in a PD/PT
A
Alex Deucher 已提交
126 127 128
 *
 * @adev: amdgpu_device pointer
 *
129
 * Calculate the number of entries in a page directory or page table.
A
Alex Deucher 已提交
130
 */
131 132
static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
				      unsigned level)
A
Alex Deucher 已提交
133
{
134 135
	unsigned shift = amdgpu_vm_level_shift(adev,
					       adev->vm_manager.root_level);
136

137
	if (level == adev->vm_manager.root_level)
138
		/* For the root directory */
139
		return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
140
	else if (level != AMDGPU_VM_PTB)
141 142 143
		/* Everything in between */
		return 512;
	else
144
		/* For the page tables on the leaves */
145
		return AMDGPU_VM_PTE_COUNT(adev);
A
Alex Deucher 已提交
146 147 148
}

/**
149
 * amdgpu_vm_bo_size - returns the size of the BOs in bytes
A
Alex Deucher 已提交
150 151 152
 *
 * @adev: amdgpu_device pointer
 *
153
 * Calculate the size of the BO for a page directory or page table in bytes.
A
Alex Deucher 已提交
154
 */
155
static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
A
Alex Deucher 已提交
156
{
157
	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
A
Alex Deucher 已提交
158 159 160
}

/**
161
 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
A
Alex Deucher 已提交
162 163
 *
 * @vm: vm providing the BOs
164
 * @validated: head of validation list
165
 * @entry: entry to add
A
Alex Deucher 已提交
166 167
 *
 * Add the page directory to the list of BOs to
168
 * validate for command submission.
A
Alex Deucher 已提交
169
 */
170 171 172
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
			 struct list_head *validated,
			 struct amdgpu_bo_list_entry *entry)
A
Alex Deucher 已提交
173
{
174
	entry->robj = vm->root.base.bo;
175
	entry->priority = 0;
176
	entry->tv.bo = &entry->robj->tbo;
177
	entry->tv.shared = true;
178
	entry->user_pages = NULL;
179 180
	list_add(&entry->tv.head, validated);
}
A
Alex Deucher 已提交
181

182
/**
183
 * amdgpu_vm_validate_pt_bos - validate the page table BOs
184
 *
185
 * @adev: amdgpu device pointer
186
 * @vm: vm providing the BOs
187 188 189 190 191
 * @validate: callback to do the validation
 * @param: parameter for the validation callback
 *
 * Validate the page table BOs on command submission if neccessary.
 */
192 193 194
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
			      int (*validate)(void *p, struct amdgpu_bo *bo),
			      void *param)
195
{
196
	struct ttm_bo_global *glob = adev->mman.bdev.glob;
197 198
	int r;

199 200 201 202
	spin_lock(&vm->status_lock);
	while (!list_empty(&vm->evicted)) {
		struct amdgpu_vm_bo_base *bo_base;
		struct amdgpu_bo *bo;
203

204 205 206 207
		bo_base = list_first_entry(&vm->evicted,
					   struct amdgpu_vm_bo_base,
					   vm_status);
		spin_unlock(&vm->status_lock);
208

209 210 211 212 213 214
		bo = bo_base->bo;
		BUG_ON(!bo);
		if (bo->parent) {
			r = validate(param, bo);
			if (r)
				return r;
215

216 217 218 219 220 221
			spin_lock(&glob->lru_lock);
			ttm_bo_move_to_lru_tail(&bo->tbo);
			if (bo->shadow)
				ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
			spin_unlock(&glob->lru_lock);
		}
222

223 224
		if (bo->tbo.type == ttm_bo_type_kernel &&
		    vm->use_cpu_for_update) {
225 226 227 228
			r = amdgpu_bo_kmap(bo, NULL);
			if (r)
				return r;
		}
229

230
		spin_lock(&vm->status_lock);
231 232 233 234
		if (bo->tbo.type != ttm_bo_type_kernel)
			list_move(&bo_base->vm_status, &vm->moved);
		else
			list_move(&bo_base->vm_status, &vm->relocated);
235
	}
236
	spin_unlock(&vm->status_lock);
237

238
	return 0;
239 240
}

241
/**
242
 * amdgpu_vm_ready - check VM is ready for updates
243
 *
244
 * @vm: VM to check
A
Alex Deucher 已提交
245
 *
246
 * Check if all VM PDs/PTs are ready for updates
A
Alex Deucher 已提交
247
 */
248
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
A
Alex Deucher 已提交
249
{
250
	bool ready;
A
Alex Deucher 已提交
251

252 253 254
	spin_lock(&vm->status_lock);
	ready = list_empty(&vm->evicted);
	spin_unlock(&vm->status_lock);
255

256
	return ready;
257 258 259
}

/**
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
 * amdgpu_vm_alloc_levels - allocate the PD/PT levels
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 * @saddr: start of the address range
 * @eaddr: end of the address range
 *
 * Make sure the page directories and page tables are allocated
 */
static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
				  struct amdgpu_vm *vm,
				  struct amdgpu_vm_pt *parent,
				  uint64_t saddr, uint64_t eaddr,
				  unsigned level)
{
275
	unsigned shift = amdgpu_vm_level_shift(adev, level);
276 277
	unsigned pt_idx, from, to;
	int r;
278
	u64 flags;
Y
Yong Zhao 已提交
279
	uint64_t init_value = 0;
280 281 282 283

	if (!parent->entries) {
		unsigned num_entries = amdgpu_vm_num_entries(adev, level);

M
Michal Hocko 已提交
284 285 286
		parent->entries = kvmalloc_array(num_entries,
						   sizeof(struct amdgpu_vm_pt),
						   GFP_KERNEL | __GFP_ZERO);
287 288 289 290 291
		if (!parent->entries)
			return -ENOMEM;
		memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
	}

292 293 294 295 296
	from = saddr >> shift;
	to = eaddr >> shift;
	if (from >= amdgpu_vm_num_entries(adev, level) ||
	    to >= amdgpu_vm_num_entries(adev, level))
		return -EINVAL;
297 298

	++level;
299 300
	saddr = saddr & ((1 << shift) - 1);
	eaddr = eaddr & ((1 << shift) - 1);
301

302 303 304 305 306 307 308 309
	flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
			AMDGPU_GEM_CREATE_VRAM_CLEARED;
	if (vm->use_cpu_for_update)
		flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
	else
		flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
				AMDGPU_GEM_CREATE_SHADOW);

Y
Yong Zhao 已提交
310
	if (vm->pte_support_ats) {
311
		init_value = AMDGPU_PTE_DEFAULT_ATC;
312
		if (level != AMDGPU_VM_PTB)
Y
Yong Zhao 已提交
313
			init_value |= AMDGPU_PDE_PTE;
314

Y
Yong Zhao 已提交
315 316
	}

317 318
	/* walk over the address space and allocate the page tables */
	for (pt_idx = from; pt_idx <= to; ++pt_idx) {
319
		struct reservation_object *resv = vm->root.base.bo->tbo.resv;
320 321 322
		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
		struct amdgpu_bo *pt;

323
		if (!entry->base.bo) {
324 325 326 327
			r = amdgpu_bo_create(adev,
					     amdgpu_vm_bo_size(adev, level),
					     AMDGPU_GPU_PAGE_SIZE, true,
					     AMDGPU_GEM_DOMAIN_VRAM,
328
					     flags,
Y
Yong Zhao 已提交
329
					     NULL, resv, init_value, &pt);
330 331 332
			if (r)
				return r;

333 334 335 336 337 338 339 340
			if (vm->use_cpu_for_update) {
				r = amdgpu_bo_kmap(pt, NULL);
				if (r) {
					amdgpu_bo_unref(&pt);
					return r;
				}
			}

341 342 343
			/* Keep a reference to the root directory to avoid
			* freeing them up in the wrong order.
			*/
344
			pt->parent = amdgpu_bo_ref(parent->base.bo);
345

346 347 348
			entry->base.vm = vm;
			entry->base.bo = pt;
			list_add_tail(&entry->base.bo_list, &pt->va);
349 350 351
			spin_lock(&vm->status_lock);
			list_add(&entry->base.vm_status, &vm->relocated);
			spin_unlock(&vm->status_lock);
352 353
		}

354
		if (level < AMDGPU_VM_PTB) {
355 356 357 358 359
			uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
			uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
				((1 << shift) - 1);
			r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
						   sub_eaddr, level);
360 361 362 363 364 365 366 367
			if (r)
				return r;
		}
	}

	return 0;
}

368 369 370 371 372 373 374 375 376 377 378 379 380 381
/**
 * amdgpu_vm_alloc_pts - Allocate page tables.
 *
 * @adev: amdgpu_device pointer
 * @vm: VM to allocate page tables for
 * @saddr: Start address which needs to be allocated
 * @size: Size from start address we need.
 *
 * Make sure the page tables are allocated.
 */
int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
			struct amdgpu_vm *vm,
			uint64_t saddr, uint64_t size)
{
F
Felix Kuehling 已提交
382
	uint64_t last_pfn;
383 384 385 386 387 388 389 390 391
	uint64_t eaddr;

	/* validate the parameters */
	if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
		return -EINVAL;

	eaddr = saddr + size - 1;
	last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
	if (last_pfn >= adev->vm_manager.max_pfn) {
F
Felix Kuehling 已提交
392
		dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
393 394 395 396 397 398 399
			last_pfn, adev->vm_manager.max_pfn);
		return -EINVAL;
	}

	saddr /= AMDGPU_GPU_PAGE_SIZE;
	eaddr /= AMDGPU_GPU_PAGE_SIZE;

400 401
	return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
				      adev->vm_manager.root_level);
402 403
}

404 405 406 407 408 409
/**
 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
 *
 * @adev: amdgpu_device pointer
 */
void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
410
{
411
	const struct amdgpu_ip_block *ip_block;
412 413 414
	bool has_compute_vm_bug;
	struct amdgpu_ring *ring;
	int i;
415

416
	has_compute_vm_bug = false;
417

418
	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
419 420 421 422 423 424 425 426 427
	if (ip_block) {
		/* Compute has a VM bug for GFX version < 7.
		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
		if (ip_block->version->major <= 7)
			has_compute_vm_bug = true;
		else if (ip_block->version->major == 8)
			if (adev->gfx.mec_fw_version < 673)
				has_compute_vm_bug = true;
	}
428

429 430 431 432 433
	for (i = 0; i < adev->num_rings; i++) {
		ring = adev->rings[i];
		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
			/* only compute rings */
			ring->has_compute_vm_bug = has_compute_vm_bug;
434
		else
435
			ring->has_compute_vm_bug = false;
436 437 438
	}
}

439 440
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
				  struct amdgpu_job *job)
A
Alex Xie 已提交
441
{
442 443
	struct amdgpu_device *adev = ring->adev;
	unsigned vmhub = ring->funcs->vmhub;
444 445
	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
	struct amdgpu_vmid *id;
446
	bool gds_switch_needed;
447
	bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
448

449
	if (job->vmid == 0)
450
		return false;
451
	id = &id_mgr->ids[job->vmid];
452 453 454 455 456 457 458
	gds_switch_needed = ring->funcs->emit_gds_switch && (
		id->gds_base != job->gds_base ||
		id->gds_size != job->gds_size ||
		id->gws_base != job->gws_base ||
		id->gws_size != job->gws_size ||
		id->oa_base != job->oa_base ||
		id->oa_size != job->oa_size);
A
Alex Xie 已提交
459

460
	if (amdgpu_vmid_had_gpu_reset(adev, id))
461
		return true;
A
Alex Xie 已提交
462

463
	return vm_flush_needed || gds_switch_needed;
464 465
}

466 467
static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
{
468
	return (adev->gmc.real_vram_size == adev->gmc.visible_vram_size);
A
Alex Xie 已提交
469 470
}

A
Alex Deucher 已提交
471 472 473 474
/**
 * amdgpu_vm_flush - hardware flush the vm
 *
 * @ring: ring to use for flush
475
 * @vmid: vmid number to use
476
 * @pd_addr: address of the page directory
A
Alex Deucher 已提交
477
 *
478
 * Emit a VM flush when it is necessary.
A
Alex Deucher 已提交
479
 */
M
Monk Liu 已提交
480
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
A
Alex Deucher 已提交
481
{
482
	struct amdgpu_device *adev = ring->adev;
483
	unsigned vmhub = ring->funcs->vmhub;
484
	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
485
	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
486
	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
487 488 489 490 491 492
		id->gds_base != job->gds_base ||
		id->gds_size != job->gds_size ||
		id->gws_base != job->gws_base ||
		id->gws_size != job->gws_size ||
		id->oa_base != job->oa_base ||
		id->oa_size != job->oa_size);
493
	bool vm_flush_needed = job->vm_needs_flush;
494
	unsigned patch_offset = 0;
495
	int r;
496

497
	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
498 499 500
		gds_switch_needed = true;
		vm_flush_needed = true;
	}
501

M
Monk Liu 已提交
502
	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
503
		return 0;
504

505 506
	if (ring->funcs->init_cond_exec)
		patch_offset = amdgpu_ring_init_cond_exec(ring);
507

M
Monk Liu 已提交
508 509 510
	if (need_pipe_sync)
		amdgpu_ring_emit_pipeline_sync(ring);

511
	if (ring->funcs->emit_vm_flush && vm_flush_needed) {
512
		struct dma_fence *fence;
513

514 515
		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
516

517 518 519
		r = amdgpu_fence_emit(ring, &fence);
		if (r)
			return r;
520

521
		mutex_lock(&id_mgr->lock);
522 523
		dma_fence_put(id->last_flush);
		id->last_flush = fence;
524
		id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
525
		mutex_unlock(&id_mgr->lock);
526
	}
527

528
	if (ring->funcs->emit_gds_switch && gds_switch_needed) {
529 530 531 532 533 534
		id->gds_base = job->gds_base;
		id->gds_size = job->gds_size;
		id->gws_base = job->gws_base;
		id->gws_size = job->gws_size;
		id->oa_base = job->oa_base;
		id->oa_size = job->oa_size;
535
		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
536 537 538 539 540 541 542 543 544 545 546 547
					    job->gds_size, job->gws_base,
					    job->gws_size, job->oa_base,
					    job->oa_size);
	}

	if (ring->funcs->patch_cond_exec)
		amdgpu_ring_patch_cond_exec(ring, patch_offset);

	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
	if (ring->funcs->emit_switch_buffer) {
		amdgpu_ring_emit_switch_buffer(ring);
		amdgpu_ring_emit_switch_buffer(ring);
548
	}
549
	return 0;
550 551
}

A
Alex Deucher 已提交
552 553 554 555 556 557
/**
 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
 *
 * @vm: requested vm
 * @bo: requested buffer object
 *
558
 * Find @bo inside the requested vm.
A
Alex Deucher 已提交
559 560 561 562 563 564 565 566 567 568
 * Search inside the @bos vm list for the requested vm
 * Returns the found bo_va or NULL if none is found
 *
 * Object has to be reserved!
 */
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
				       struct amdgpu_bo *bo)
{
	struct amdgpu_bo_va *bo_va;

569 570
	list_for_each_entry(bo_va, &bo->va, base.bo_list) {
		if (bo_va->base.vm == vm) {
A
Alex Deucher 已提交
571 572 573 574 575 576 577
			return bo_va;
		}
	}
	return NULL;
}

/**
578
 * amdgpu_vm_do_set_ptes - helper to call the right asic function
A
Alex Deucher 已提交
579
 *
580
 * @params: see amdgpu_pte_update_params definition
A
Alex Deucher 已提交
581 582 583 584 585 586 587 588 589
 * @pe: addr of the page entry
 * @addr: dst addr to write into pe
 * @count: number of page entries to update
 * @incr: increase next addr by incr bytes
 * @flags: hw access flags
 *
 * Traces the parameters and calls the right asic functions
 * to setup the page table using the DMA.
 */
590 591 592
static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
				  uint64_t pe, uint64_t addr,
				  unsigned count, uint32_t incr,
593
				  uint64_t flags)
A
Alex Deucher 已提交
594
{
595
	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
A
Alex Deucher 已提交
596

597
	if (count < 3) {
598 599
		amdgpu_vm_write_pte(params->adev, params->ib, pe,
				    addr | flags, count, incr);
A
Alex Deucher 已提交
600 601

	} else {
602
		amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
A
Alex Deucher 已提交
603 604 605 606
				      count, incr, flags);
	}
}

607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
/**
 * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
 *
 * @params: see amdgpu_pte_update_params definition
 * @pe: addr of the page entry
 * @addr: dst addr to write into pe
 * @count: number of page entries to update
 * @incr: increase next addr by incr bytes
 * @flags: hw access flags
 *
 * Traces the parameters and calls the DMA function to copy the PTEs.
 */
static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
				   uint64_t pe, uint64_t addr,
				   unsigned count, uint32_t incr,
622
				   uint64_t flags)
623
{
624
	uint64_t src = (params->src + (addr >> 12) * 8);
625

626 627 628 629

	trace_amdgpu_vm_copy_ptes(pe, src, count);

	amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
630 631
}

A
Alex Deucher 已提交
632
/**
633
 * amdgpu_vm_map_gart - Resolve gart mapping of addr
A
Alex Deucher 已提交
634
 *
635
 * @pages_addr: optional DMA address to use for lookup
A
Alex Deucher 已提交
636 637 638
 * @addr: the unmapped addr
 *
 * Look up the physical address of the page that the pte resolves
639
 * to and return the pointer for the page table entry.
A
Alex Deucher 已提交
640
 */
641
static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
A
Alex Deucher 已提交
642 643 644
{
	uint64_t result;

645 646
	/* page table offset */
	result = pages_addr[addr >> PAGE_SHIFT];
647

648 649
	/* in case cpu page size != gpu page size*/
	result |= addr & (~PAGE_MASK);
A
Alex Deucher 已提交
650

651
	result &= 0xFFFFFFFFFFFFF000ULL;
A
Alex Deucher 已提交
652 653 654 655

	return result;
}

656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
/**
 * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
 *
 * @params: see amdgpu_pte_update_params definition
 * @pe: kmap addr of the page entry
 * @addr: dst addr to write into pe
 * @count: number of page entries to update
 * @incr: increase next addr by incr bytes
 * @flags: hw access flags
 *
 * Write count number of PT/PD entries directly.
 */
static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
				   uint64_t pe, uint64_t addr,
				   unsigned count, uint32_t incr,
				   uint64_t flags)
{
	unsigned int i;
674
	uint64_t value;
675

676 677
	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);

678
	for (i = 0; i < count; i++) {
679 680 681
		value = params->pages_addr ?
			amdgpu_vm_map_gart(params->pages_addr, addr) :
			addr;
682 683
		amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
				       i, value, flags);
684 685 686 687
		addr += incr;
	}
}

688 689
static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
			     void *owner)
690 691 692 693 694
{
	struct amdgpu_sync sync;
	int r;

	amdgpu_sync_create(&sync);
695
	amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
696 697 698 699 700 701
	r = amdgpu_sync_wait(&sync, true);
	amdgpu_sync_free(&sync);

	return r;
}

702
/*
703
 * amdgpu_vm_update_pde - update a single level in the hierarchy
704
 *
705
 * @param: parameters for the update
706
 * @vm: requested vm
707
 * @parent: parent directory
708
 * @entry: entry to update
709
 *
710
 * Makes sure the requested entry in parent is up to date.
711
 */
712 713 714 715
static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
				 struct amdgpu_vm *vm,
				 struct amdgpu_vm_pt *parent,
				 struct amdgpu_vm_pt *entry)
A
Alex Deucher 已提交
716
{
717
	struct amdgpu_bo *bo = entry->base.bo, *shadow = NULL, *pbo;
718
	uint64_t pd_addr, shadow_addr = 0;
719 720
	uint64_t pde, pt, flags;
	unsigned level;
C
Chunming Zhou 已提交
721

722 723 724
	/* Don't update huge pages here */
	if (entry->huge)
		return;
A
Alex Deucher 已提交
725

726
	if (vm->use_cpu_for_update) {
727
		pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
728
	} else {
729
		pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
730
		shadow = parent->base.bo->shadow;
731
		if (shadow)
732 733
			shadow_addr = amdgpu_bo_gpu_offset(shadow);
	}
734

735 736 737
	for (level = 0, pbo = parent->base.bo->parent; pbo; ++level)
		pbo = pbo->parent;

738
	level += params->adev->vm_manager.root_level;
739
	pt = amdgpu_bo_gpu_offset(bo);
740
	flags = AMDGPU_PTE_VALID;
741
	amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags);
742 743
	if (shadow) {
		pde = shadow_addr + (entry - parent->entries) * 8;
744
		params->func(params, pde, pt, 1, 0, flags);
745
	}
A
Alex Deucher 已提交
746

747
	pde = pd_addr + (entry - parent->entries) * 8;
748
	params->func(params, pde, pt, 1, 0, flags);
A
Alex Deucher 已提交
749 750
}

751 752 753 754 755 756 757
/*
 * amdgpu_vm_invalidate_level - mark all PD levels as invalid
 *
 * @parent: parent PD
 *
 * Mark all PD level as invalid after an error.
 */
758 759 760 761
static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
				       struct amdgpu_vm *vm,
				       struct amdgpu_vm_pt *parent,
				       unsigned level)
762
{
763
	unsigned pt_idx, num_entries;
764 765 766 767 768

	/*
	 * Recurse into the subdirectories. This recursion is harmless because
	 * we only have a maximum of 5 layers.
	 */
769 770
	num_entries = amdgpu_vm_num_entries(adev, level);
	for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
771 772
		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];

773
		if (!entry->base.bo)
774 775
			continue;

776
		spin_lock(&vm->status_lock);
777 778
		if (list_empty(&entry->base.vm_status))
			list_add(&entry->base.vm_status, &vm->relocated);
779
		spin_unlock(&vm->status_lock);
780
		amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
781 782 783
	}
}

784 785 786 787 788 789 790 791 792 793 794 795
/*
 * amdgpu_vm_update_directories - make sure that all directories are valid
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 *
 * Makes sure all directories are up to date.
 * Returns 0 for success, error for failure.
 */
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
				 struct amdgpu_vm *vm)
{
796 797 798
	struct amdgpu_pte_update_params params;
	struct amdgpu_job *job;
	unsigned ndw = 0;
799
	int r = 0;
800

801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823
	if (list_empty(&vm->relocated))
		return 0;

restart:
	memset(&params, 0, sizeof(params));
	params.adev = adev;

	if (vm->use_cpu_for_update) {
		r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
		if (unlikely(r))
			return r;

		params.func = amdgpu_vm_cpu_set_ptes;
	} else {
		ndw = 512 * 8;
		r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
		if (r)
			return r;

		params.ib = &job->ibs[0];
		params.func = amdgpu_vm_do_set_ptes;
	}

824 825
	spin_lock(&vm->status_lock);
	while (!list_empty(&vm->relocated)) {
826 827
		struct amdgpu_vm_bo_base *bo_base, *parent;
		struct amdgpu_vm_pt *pt, *entry;
828 829 830 831 832
		struct amdgpu_bo *bo;

		bo_base = list_first_entry(&vm->relocated,
					   struct amdgpu_vm_bo_base,
					   vm_status);
833
		list_del_init(&bo_base->vm_status);
834 835 836
		spin_unlock(&vm->status_lock);

		bo = bo_base->bo->parent;
837
		if (!bo) {
838
			spin_lock(&vm->status_lock);
839
			continue;
840
		}
841 842 843 844 845 846 847 848 849 850 851 852

		parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
					  bo_list);
		pt = container_of(parent, struct amdgpu_vm_pt, base);
		entry = container_of(bo_base, struct amdgpu_vm_pt, base);

		amdgpu_vm_update_pde(&params, vm, pt, entry);

		spin_lock(&vm->status_lock);
		if (!vm->use_cpu_for_update &&
		    (ndw - params.ib->length_dw) < 32)
			break;
853 854
	}
	spin_unlock(&vm->status_lock);
855

856 857 858
	if (vm->use_cpu_for_update) {
		/* Flush HDP */
		mb();
859
		amdgpu_asic_flush_hdp(adev);
860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886
	} else if (params.ib->length_dw == 0) {
		amdgpu_job_free(job);
	} else {
		struct amdgpu_bo *root = vm->root.base.bo;
		struct amdgpu_ring *ring;
		struct dma_fence *fence;

		ring = container_of(vm->entity.sched, struct amdgpu_ring,
				    sched);

		amdgpu_ring_pad_ib(ring, params.ib);
		amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
				 AMDGPU_FENCE_OWNER_VM, false);
		if (root->shadow)
			amdgpu_sync_resv(adev, &job->sync,
					 root->shadow->tbo.resv,
					 AMDGPU_FENCE_OWNER_VM, false);

		WARN_ON(params.ib->length_dw > ndw);
		r = amdgpu_job_submit(job, ring, &vm->entity,
				      AMDGPU_FENCE_OWNER_VM, &fence);
		if (r)
			goto error;

		amdgpu_bo_fence(root, fence, true);
		dma_fence_put(vm->last_update);
		vm->last_update = fence;
887 888
	}

889 890 891 892 893 894
	if (!list_empty(&vm->relocated))
		goto restart;

	return 0;

error:
895 896
	amdgpu_vm_invalidate_level(adev, vm, &vm->root,
				   adev->vm_manager.root_level);
897
	amdgpu_job_free(job);
898
	return r;
899 900
}

901
/**
902
 * amdgpu_vm_find_entry - find the entry for an address
903 904 905
 *
 * @p: see amdgpu_pte_update_params definition
 * @addr: virtual address in question
906 907
 * @entry: resulting entry or NULL
 * @parent: parent entry
908
 *
909
 * Find the vm_pt entry and it's parent for the given address.
910
 */
911 912 913
void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
			 struct amdgpu_vm_pt **entry,
			 struct amdgpu_vm_pt **parent)
914
{
915
	unsigned level = p->adev->vm_manager.root_level;
916

917 918 919
	*parent = NULL;
	*entry = &p->vm->root;
	while ((*entry)->entries) {
920
		unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
921

922
		*parent = *entry;
923 924
		*entry = &(*entry)->entries[addr >> shift];
		addr &= (1ULL << shift) - 1;
925 926
	}

927
	if (level != AMDGPU_VM_PTB)
928 929 930 931 932 933 934 935 936 937 938 939 940 941 942
		*entry = NULL;
}

/**
 * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
 *
 * @p: see amdgpu_pte_update_params definition
 * @entry: vm_pt entry to check
 * @parent: parent entry
 * @nptes: number of PTEs updated with this operation
 * @dst: destination address where the PTEs should point to
 * @flags: access flags fro the PTEs
 *
 * Check if we can update the PD with a huge page.
 */
943 944 945 946 947
static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
					struct amdgpu_vm_pt *entry,
					struct amdgpu_vm_pt *parent,
					unsigned nptes, uint64_t dst,
					uint64_t flags)
948 949 950 951
{
	uint64_t pd_addr, pde;

	/* In the case of a mixed PT the PDE must point to it*/
952 953
	if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
	    nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
954
		/* Set the huge page flag to stop scanning at this PDE */
955 956 957
		flags |= AMDGPU_PDE_PTE;
	}

958 959 960 961 962 963 964 965
	if (!(flags & AMDGPU_PDE_PTE)) {
		if (entry->huge) {
			/* Add the entry to the relocated list to update it. */
			entry->huge = false;
			spin_lock(&p->vm->status_lock);
			list_move(&entry->base.vm_status, &p->vm->relocated);
			spin_unlock(&p->vm->status_lock);
		}
966
		return;
967
	}
968

969
	entry->huge = true;
970
	amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
971

972 973 974 975 976 977 978 979 980
	if (p->func == amdgpu_vm_cpu_set_ptes) {
		pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
	} else {
		if (parent->base.bo->shadow) {
			pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
			pde = pd_addr + (entry - parent->entries) * 8;
			p->func(p, pde, dst, 1, 0, flags);
		}
		pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
981
	}
982 983
	pde = pd_addr + (entry - parent->entries) * 8;
	p->func(p, pde, dst, 1, 0, flags);
984 985
}

A
Alex Deucher 已提交
986 987 988
/**
 * amdgpu_vm_update_ptes - make sure that page tables are valid
 *
989
 * @params: see amdgpu_pte_update_params definition
A
Alex Deucher 已提交
990 991 992
 * @vm: requested vm
 * @start: start of GPU address range
 * @end: end of GPU address range
993
 * @dst: destination address to map to, the next dst inside the function
A
Alex Deucher 已提交
994 995
 * @flags: mapping flags
 *
996
 * Update the page tables in the range @start - @end.
997
 * Returns 0 for success, -EINVAL for failure.
A
Alex Deucher 已提交
998
 */
999
static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1000
				  uint64_t start, uint64_t end,
1001
				  uint64_t dst, uint64_t flags)
A
Alex Deucher 已提交
1002
{
1003 1004
	struct amdgpu_device *adev = params->adev;
	const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
1005

1006
	uint64_t addr, pe_start;
1007
	struct amdgpu_bo *pt;
1008
	unsigned nptes;
1009
	bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes);
A
Alex Deucher 已提交
1010 1011

	/* walk over the address space and update the page tables */
1012 1013 1014 1015 1016 1017 1018
	for (addr = start; addr < end; addr += nptes,
	     dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
		struct amdgpu_vm_pt *entry, *parent;

		amdgpu_vm_get_entry(params, addr, &entry, &parent);
		if (!entry)
			return -ENOENT;
1019

A
Alex Deucher 已提交
1020 1021 1022
		if ((addr & ~mask) == (end & ~mask))
			nptes = end - addr;
		else
1023
			nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
A
Alex Deucher 已提交
1024

1025 1026
		amdgpu_vm_handle_huge_pages(params, entry, parent,
					    nptes, dst, flags);
1027
		/* We don't need to update PTEs for huge pages */
1028
		if (entry->huge)
1029 1030
			continue;

1031
		pt = entry->base.bo;
1032
		if (use_cpu_update) {
1033
			pe_start = (unsigned long)amdgpu_bo_kptr(pt);
1034 1035 1036 1037 1038 1039 1040
		} else {
			if (pt->shadow) {
				pe_start = amdgpu_bo_gpu_offset(pt->shadow);
				pe_start += (addr & mask) * 8;
				params->func(params, pe_start, dst, nptes,
					     AMDGPU_GPU_PAGE_SIZE, flags);
			}
1041
			pe_start = amdgpu_bo_gpu_offset(pt);
1042
		}
A
Alex Deucher 已提交
1043

1044 1045 1046
		pe_start += (addr & mask) * 8;
		params->func(params, pe_start, dst, nptes,
			     AMDGPU_GPU_PAGE_SIZE, flags);
A
Alex Deucher 已提交
1047 1048
	}

1049
	return 0;
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
}

/*
 * amdgpu_vm_frag_ptes - add fragment information to PTEs
 *
 * @params: see amdgpu_pte_update_params definition
 * @vm: requested vm
 * @start: first PTE to handle
 * @end: last PTE to handle
 * @dst: addr those PTEs should point to
 * @flags: hw mapping flags
1061
 * Returns 0 for success, -EINVAL for failure.
1062
 */
1063
static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params	*params,
1064
				uint64_t start, uint64_t end,
1065
				uint64_t dst, uint64_t flags)
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
{
	/**
	 * The MC L1 TLB supports variable sized pages, based on a fragment
	 * field in the PTE. When this field is set to a non-zero value, page
	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
	 * flags are considered valid for all PTEs within the fragment range
	 * and corresponding mappings are assumed to be physically contiguous.
	 *
	 * The L1 TLB can store a single PTE for the whole fragment,
	 * significantly increasing the space available for translation
	 * caching. This leads to large improvements in throughput when the
	 * TLB is under pressure.
	 *
	 * The L2 TLB distributes small and large fragments into two
	 * asymmetric partitions. The large fragment cache is significantly
	 * larger. Thus, we try to use large fragments wherever possible.
	 * Userspace can support this by aligning virtual base address and
	 * allocation size to the fragment size.
	 */
1085 1086
	unsigned max_frag = params->adev->vm_manager.fragment_size;
	int r;
1087 1088

	/* system pages are non continuously */
1089
	if (params->src || !(flags & AMDGPU_PTE_VALID))
1090
		return amdgpu_vm_update_ptes(params, start, end, dst, flags);
1091

1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
	while (start != end) {
		uint64_t frag_flags, frag_end;
		unsigned frag;

		/* This intentionally wraps around if no bit is set */
		frag = min((unsigned)ffs(start) - 1,
			   (unsigned)fls64(end - start) - 1);
		if (frag >= max_frag) {
			frag_flags = AMDGPU_PTE_FRAG(max_frag);
			frag_end = end & ~((1ULL << max_frag) - 1);
		} else {
			frag_flags = AMDGPU_PTE_FRAG(frag);
			frag_end = start + (1 << frag);
		}

		r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
					  flags | frag_flags);
1109 1110
		if (r)
			return r;
1111

1112 1113
		dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
		start = frag_end;
1114
	}
1115 1116

	return 0;
A
Alex Deucher 已提交
1117 1118 1119 1120 1121 1122
}

/**
 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
 *
 * @adev: amdgpu_device pointer
1123
 * @exclusive: fence we need to sync to
1124
 * @pages_addr: DMA addresses to use for mapping
A
Alex Deucher 已提交
1125
 * @vm: requested vm
1126 1127 1128
 * @start: start of mapped range
 * @last: last mapped entry
 * @flags: flags for the entries
A
Alex Deucher 已提交
1129 1130 1131
 * @addr: addr to set the area to
 * @fence: optional resulting fence
 *
1132
 * Fill in the page table entries between @start and @last.
A
Alex Deucher 已提交
1133 1134 1135
 * Returns 0 for success, -EINVAL for failure.
 */
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1136
				       struct dma_fence *exclusive,
1137
				       dma_addr_t *pages_addr,
A
Alex Deucher 已提交
1138
				       struct amdgpu_vm *vm,
1139
				       uint64_t start, uint64_t last,
1140
				       uint64_t flags, uint64_t addr,
1141
				       struct dma_fence **fence)
A
Alex Deucher 已提交
1142
{
1143
	struct amdgpu_ring *ring;
1144
	void *owner = AMDGPU_FENCE_OWNER_VM;
A
Alex Deucher 已提交
1145
	unsigned nptes, ncmds, ndw;
1146
	struct amdgpu_job *job;
1147
	struct amdgpu_pte_update_params params;
1148
	struct dma_fence *f = NULL;
A
Alex Deucher 已提交
1149 1150
	int r;

1151 1152
	memset(&params, 0, sizeof(params));
	params.adev = adev;
1153
	params.vm = vm;
1154

1155 1156 1157 1158
	/* sync to everything on unmapping */
	if (!(flags & AMDGPU_PTE_VALID))
		owner = AMDGPU_FENCE_OWNER_UNDEFINED;

1159 1160 1161 1162 1163 1164 1165 1166
	if (vm->use_cpu_for_update) {
		/* params.src is used as flag to indicate system Memory */
		if (pages_addr)
			params.src = ~0;

		/* Wait for PT BOs to be free. PTs share the same resv. object
		 * as the root PD BO
		 */
1167
		r = amdgpu_vm_wait_pd(adev, vm, owner);
1168 1169 1170 1171 1172 1173 1174 1175 1176
		if (unlikely(r))
			return r;

		params.func = amdgpu_vm_cpu_set_ptes;
		params.pages_addr = pages_addr;
		return amdgpu_vm_frag_ptes(&params, start, last + 1,
					   addr, flags);
	}

1177
	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
1178

1179
	nptes = last - start + 1;
A
Alex Deucher 已提交
1180 1181

	/*
1182
	 * reserve space for two commands every (1 << BLOCK_SIZE)
A
Alex Deucher 已提交
1183
	 *  entries or 2k dwords (whatever is smaller)
1184 1185
         *
         * The second command is for the shadow pagetables.
A
Alex Deucher 已提交
1186
	 */
1187 1188 1189 1190
	if (vm->root.base.bo->shadow)
		ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
	else
		ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
A
Alex Deucher 已提交
1191 1192 1193 1194

	/* padding, etc. */
	ndw = 64;

1195
	if (pages_addr) {
1196
		/* copy commands needed */
1197
		ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
A
Alex Deucher 已提交
1198

1199
		/* and also PTEs */
A
Alex Deucher 已提交
1200 1201
		ndw += nptes * 2;

1202 1203
		params.func = amdgpu_vm_do_copy_ptes;

A
Alex Deucher 已提交
1204 1205
	} else {
		/* set page commands needed */
1206
		ndw += ncmds * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw;
A
Alex Deucher 已提交
1207

1208
		/* extra commands for begin/end fragments */
1209 1210
		ndw += 2 * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw
				* adev->vm_manager.fragment_size;
1211 1212

		params.func = amdgpu_vm_do_set_ptes;
A
Alex Deucher 已提交
1213 1214
	}

1215 1216
	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
	if (r)
A
Alex Deucher 已提交
1217
		return r;
1218

1219
	params.ib = &job->ibs[0];
C
Chunming Zhou 已提交
1220

1221
	if (pages_addr) {
1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
		uint64_t *pte;
		unsigned i;

		/* Put the PTEs at the end of the IB. */
		i = ndw - nptes * 2;
		pte= (uint64_t *)&(job->ibs->ptr[i]);
		params.src = job->ibs->gpu_addr + i * 4;

		for (i = 0; i < nptes; ++i) {
			pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
						    AMDGPU_GPU_PAGE_SIZE);
			pte[i] |= flags;
		}
1235
		addr = 0;
1236 1237
	}

1238
	r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
1239 1240 1241
	if (r)
		goto error_free;

1242
	r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
1243
			     owner, false);
1244 1245
	if (r)
		goto error_free;
A
Alex Deucher 已提交
1246

1247
	r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
1248 1249 1250
	if (r)
		goto error_free;

1251 1252 1253
	r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
	if (r)
		goto error_free;
A
Alex Deucher 已提交
1254

1255 1256
	amdgpu_ring_pad_ib(ring, params.ib);
	WARN_ON(params.ib->length_dw > ndw);
1257 1258
	r = amdgpu_job_submit(job, ring, &vm->entity,
			      AMDGPU_FENCE_OWNER_VM, &f);
1259 1260
	if (r)
		goto error_free;
A
Alex Deucher 已提交
1261

1262
	amdgpu_bo_fence(vm->root.base.bo, f, true);
1263 1264
	dma_fence_put(*fence);
	*fence = f;
A
Alex Deucher 已提交
1265
	return 0;
C
Chunming Zhou 已提交
1266 1267

error_free:
1268
	amdgpu_job_free(job);
1269
	return r;
A
Alex Deucher 已提交
1270 1271
}

1272 1273 1274 1275
/**
 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
 *
 * @adev: amdgpu_device pointer
1276
 * @exclusive: fence we need to sync to
1277
 * @pages_addr: DMA addresses to use for mapping
1278 1279
 * @vm: requested vm
 * @mapping: mapped range and flags to use for the update
1280
 * @flags: HW flags for the mapping
1281
 * @nodes: array of drm_mm_nodes with the MC addresses
1282 1283 1284 1285 1286 1287 1288
 * @fence: optional resulting fence
 *
 * Split the mapping into smaller chunks so that each update fits
 * into a SDMA IB.
 * Returns 0 for success, -EINVAL for failure.
 */
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1289
				      struct dma_fence *exclusive,
1290
				      dma_addr_t *pages_addr,
1291 1292
				      struct amdgpu_vm *vm,
				      struct amdgpu_bo_va_mapping *mapping,
1293
				      uint64_t flags,
1294
				      struct drm_mm_node *nodes,
1295
				      struct dma_fence **fence)
1296
{
1297
	unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1298
	uint64_t pfn, start = mapping->start;
1299 1300 1301 1302 1303 1304 1305 1306 1307 1308
	int r;

	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
	 * but in case of something, we filter the flags in first place
	 */
	if (!(mapping->flags & AMDGPU_PTE_READABLE))
		flags &= ~AMDGPU_PTE_READABLE;
	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
		flags &= ~AMDGPU_PTE_WRITEABLE;

1309 1310 1311
	flags &= ~AMDGPU_PTE_EXECUTABLE;
	flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;

1312 1313 1314
	flags &= ~AMDGPU_PTE_MTYPE_MASK;
	flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);

1315 1316 1317 1318 1319 1320
	if ((mapping->flags & AMDGPU_PTE_PRT) &&
	    (adev->asic_type >= CHIP_VEGA10)) {
		flags |= AMDGPU_PTE_PRT;
		flags &= ~AMDGPU_PTE_VALID;
	}

1321 1322
	trace_amdgpu_vm_bo_update(mapping);

1323 1324 1325 1326 1327 1328
	pfn = mapping->offset >> PAGE_SHIFT;
	if (nodes) {
		while (pfn >= nodes->size) {
			pfn -= nodes->size;
			++nodes;
		}
1329
	}
1330

1331
	do {
1332
		dma_addr_t *dma_addr = NULL;
1333 1334
		uint64_t max_entries;
		uint64_t addr, last;
1335

1336 1337 1338 1339 1340 1341 1342 1343
		if (nodes) {
			addr = nodes->start << PAGE_SHIFT;
			max_entries = (nodes->size - pfn) *
				(PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
		} else {
			addr = 0;
			max_entries = S64_MAX;
		}
1344

1345
		if (pages_addr) {
1346 1347
			uint64_t count;

1348
			max_entries = min(max_entries, 16ull * 1024ull);
1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
			for (count = 1; count < max_entries; ++count) {
				uint64_t idx = pfn + count;

				if (pages_addr[idx] !=
				    (pages_addr[idx - 1] + PAGE_SIZE))
					break;
			}

			if (count < min_linear_pages) {
				addr = pfn << PAGE_SHIFT;
				dma_addr = pages_addr;
			} else {
				addr = pages_addr[pfn];
				max_entries = count;
			}

1365 1366
		} else if (flags & AMDGPU_PTE_VALID) {
			addr += adev->vm_manager.vram_base_offset;
1367
			addr += pfn << PAGE_SHIFT;
1368 1369
		}

1370
		last = min((uint64_t)mapping->last, start + max_entries - 1);
1371
		r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
1372 1373 1374 1375 1376
						start, last, flags, addr,
						fence);
		if (r)
			return r;

1377 1378 1379 1380 1381
		pfn += last - start + 1;
		if (nodes && nodes->size == pfn) {
			pfn = 0;
			++nodes;
		}
1382
		start = last + 1;
1383

1384
	} while (unlikely(start != mapping->last + 1));
1385 1386 1387 1388

	return 0;
}

A
Alex Deucher 已提交
1389 1390 1391 1392 1393
/**
 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
 *
 * @adev: amdgpu_device pointer
 * @bo_va: requested BO and VM object
1394
 * @clear: if true clear the entries
A
Alex Deucher 已提交
1395 1396 1397 1398 1399 1400
 *
 * Fill in the page table entries for @bo_va.
 * Returns 0 for success, -EINVAL for failure.
 */
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
			struct amdgpu_bo_va *bo_va,
1401
			bool clear)
A
Alex Deucher 已提交
1402
{
1403 1404
	struct amdgpu_bo *bo = bo_va->base.bo;
	struct amdgpu_vm *vm = bo_va->base.vm;
A
Alex Deucher 已提交
1405
	struct amdgpu_bo_va_mapping *mapping;
1406
	dma_addr_t *pages_addr = NULL;
1407
	struct ttm_mem_reg *mem;
1408
	struct drm_mm_node *nodes;
1409
	struct dma_fence *exclusive, **last_update;
1410
	uint64_t flags;
A
Alex Deucher 已提交
1411 1412
	int r;

1413
	if (clear || !bo_va->base.bo) {
1414
		mem = NULL;
1415
		nodes = NULL;
1416 1417
		exclusive = NULL;
	} else {
1418 1419
		struct ttm_dma_tt *ttm;

1420
		mem = &bo_va->base.bo->tbo.mem;
1421 1422
		nodes = mem->mm_node;
		if (mem->mem_type == TTM_PL_TT) {
1423 1424
			ttm = container_of(bo_va->base.bo->tbo.ttm,
					   struct ttm_dma_tt, ttm);
1425
			pages_addr = ttm->dma_address;
1426
		}
1427
		exclusive = reservation_object_get_excl(bo->tbo.resv);
A
Alex Deucher 已提交
1428 1429
	}

1430
	if (bo)
1431
		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1432
	else
1433
		flags = 0x0;
A
Alex Deucher 已提交
1434

1435 1436 1437 1438 1439
	if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
		last_update = &vm->last_update;
	else
		last_update = &bo_va->last_pt_update;

1440 1441
	if (!clear && bo_va->base.moved) {
		bo_va->base.moved = false;
1442
		list_splice_init(&bo_va->valids, &bo_va->invalids);
1443

1444 1445
	} else if (bo_va->cleared != clear) {
		list_splice_init(&bo_va->valids, &bo_va->invalids);
1446
	}
1447 1448

	list_for_each_entry(mapping, &bo_va->invalids, list) {
1449
		r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
1450
					       mapping, flags, nodes,
1451
					       last_update);
A
Alex Deucher 已提交
1452 1453 1454 1455
		if (r)
			return r;
	}

1456 1457 1458
	if (vm->use_cpu_for_update) {
		/* Flush HDP */
		mb();
1459
		amdgpu_asic_flush_hdp(adev);
1460 1461
	}

A
Alex Deucher 已提交
1462
	spin_lock(&vm->status_lock);
1463
	list_del_init(&bo_va->base.vm_status);
A
Alex Deucher 已提交
1464 1465
	spin_unlock(&vm->status_lock);

1466 1467 1468 1469 1470 1471
	list_splice_init(&bo_va->invalids, &bo_va->valids);
	bo_va->cleared = clear;

	if (trace_amdgpu_vm_bo_mapping_enabled()) {
		list_for_each_entry(mapping, &bo_va->valids, list)
			trace_amdgpu_vm_bo_mapping(mapping);
1472 1473
	}

A
Alex Deucher 已提交
1474 1475 1476
	return 0;
}

1477 1478 1479 1480 1481 1482 1483 1484 1485
/**
 * amdgpu_vm_update_prt_state - update the global PRT state
 */
static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
{
	unsigned long flags;
	bool enable;

	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1486
	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1487
	adev->gmc.gmc_funcs->set_prt(adev, enable);
1488 1489 1490
	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
}

1491
/**
1492
 * amdgpu_vm_prt_get - add a PRT user
1493 1494 1495
 */
static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
{
1496
	if (!adev->gmc.gmc_funcs->set_prt)
1497 1498
		return;

1499 1500 1501 1502
	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
		amdgpu_vm_update_prt_state(adev);
}

1503 1504 1505 1506 1507
/**
 * amdgpu_vm_prt_put - drop a PRT user
 */
static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
{
1508
	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1509 1510 1511
		amdgpu_vm_update_prt_state(adev);
}

1512
/**
1513
 * amdgpu_vm_prt_cb - callback for updating the PRT status
1514 1515 1516 1517 1518
 */
static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
{
	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);

1519
	amdgpu_vm_prt_put(cb->adev);
1520 1521 1522
	kfree(cb);
}

1523 1524 1525 1526 1527 1528
/**
 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
 */
static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
				 struct dma_fence *fence)
{
1529
	struct amdgpu_prt_cb *cb;
1530

1531
	if (!adev->gmc.gmc_funcs->set_prt)
1532 1533 1534
		return;

	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1535 1536 1537 1538 1539
	if (!cb) {
		/* Last resort when we are OOM */
		if (fence)
			dma_fence_wait(fence, false);

1540
		amdgpu_vm_prt_put(adev);
1541 1542 1543 1544 1545 1546 1547 1548
	} else {
		cb->adev = adev;
		if (!fence || dma_fence_add_callback(fence, &cb->cb,
						     amdgpu_vm_prt_cb))
			amdgpu_vm_prt_cb(fence, &cb->cb);
	}
}

1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563
/**
 * amdgpu_vm_free_mapping - free a mapping
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 * @mapping: mapping to be freed
 * @fence: fence of the unmap operation
 *
 * Free a mapping and make sure we decrease the PRT usage count if applicable.
 */
static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
				   struct amdgpu_vm *vm,
				   struct amdgpu_bo_va_mapping *mapping,
				   struct dma_fence *fence)
{
1564 1565 1566 1567
	if (mapping->flags & AMDGPU_PTE_PRT)
		amdgpu_vm_add_prt_cb(adev, fence);
	kfree(mapping);
}
1568

1569 1570 1571 1572 1573 1574 1575 1576 1577 1578
/**
 * amdgpu_vm_prt_fini - finish all prt mappings
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 *
 * Register a cleanup callback to disable PRT support after VM dies.
 */
static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
1579
	struct reservation_object *resv = vm->root.base.bo->tbo.resv;
1580 1581 1582
	struct dma_fence *excl, **shared;
	unsigned i, shared_count;
	int r;
1583

1584 1585 1586 1587 1588 1589 1590 1591 1592
	r = reservation_object_get_fences_rcu(resv, &excl,
					      &shared_count, &shared);
	if (r) {
		/* Not enough memory to grab the fence list, as last resort
		 * block for all the fences to complete.
		 */
		reservation_object_wait_timeout_rcu(resv, true, false,
						    MAX_SCHEDULE_TIMEOUT);
		return;
1593
	}
1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604

	/* Add a callback for each fence in the reservation object */
	amdgpu_vm_prt_get(adev);
	amdgpu_vm_add_prt_cb(adev, excl);

	for (i = 0; i < shared_count; ++i) {
		amdgpu_vm_prt_get(adev);
		amdgpu_vm_add_prt_cb(adev, shared[i]);
	}

	kfree(shared);
1605 1606
}

A
Alex Deucher 已提交
1607 1608 1609 1610 1611
/**
 * amdgpu_vm_clear_freed - clear freed BOs in the PT
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
1612 1613
 * @fence: optional resulting fence (unchanged if no work needed to be done
 * or if an error occurred)
A
Alex Deucher 已提交
1614 1615 1616 1617 1618 1619 1620
 *
 * Make sure all freed BOs are cleared in the PT.
 * Returns 0 for success.
 *
 * PTs have to be reserved and mutex must be locked!
 */
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1621 1622
			  struct amdgpu_vm *vm,
			  struct dma_fence **fence)
A
Alex Deucher 已提交
1623 1624
{
	struct amdgpu_bo_va_mapping *mapping;
1625
	struct dma_fence *f = NULL;
A
Alex Deucher 已提交
1626
	int r;
Y
Yong Zhao 已提交
1627
	uint64_t init_pte_value = 0;
A
Alex Deucher 已提交
1628 1629 1630 1631 1632

	while (!list_empty(&vm->freed)) {
		mapping = list_first_entry(&vm->freed,
			struct amdgpu_bo_va_mapping, list);
		list_del(&mapping->list);
1633

Y
Yong Zhao 已提交
1634
		if (vm->pte_support_ats)
1635
			init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
Y
Yong Zhao 已提交
1636

1637
		r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
1638
						mapping->start, mapping->last,
Y
Yong Zhao 已提交
1639
						init_pte_value, 0, &f);
1640
		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1641
		if (r) {
1642
			dma_fence_put(f);
A
Alex Deucher 已提交
1643
			return r;
1644
		}
1645
	}
A
Alex Deucher 已提交
1646

1647 1648 1649 1650 1651
	if (fence && f) {
		dma_fence_put(*fence);
		*fence = f;
	} else {
		dma_fence_put(f);
A
Alex Deucher 已提交
1652
	}
1653

A
Alex Deucher 已提交
1654 1655 1656 1657 1658
	return 0;

}

/**
1659
 * amdgpu_vm_handle_moved - handle moved BOs in the PT
A
Alex Deucher 已提交
1660 1661 1662
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
1663
 * @sync: sync object to add fences to
A
Alex Deucher 已提交
1664
 *
1665
 * Make sure all BOs which are moved are updated in the PTs.
A
Alex Deucher 已提交
1666 1667
 * Returns 0 for success.
 *
1668
 * PTs have to be reserved!
A
Alex Deucher 已提交
1669
 */
1670
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1671
			   struct amdgpu_vm *vm)
A
Alex Deucher 已提交
1672
{
1673
	bool clear;
1674
	int r = 0;
A
Alex Deucher 已提交
1675 1676

	spin_lock(&vm->status_lock);
1677
	while (!list_empty(&vm->moved)) {
1678
		struct amdgpu_bo_va *bo_va;
1679
		struct reservation_object *resv;
1680

1681
		bo_va = list_first_entry(&vm->moved,
1682
			struct amdgpu_bo_va, base.vm_status);
A
Alex Deucher 已提交
1683
		spin_unlock(&vm->status_lock);
1684

1685 1686
		resv = bo_va->base.bo->tbo.resv;

1687
		/* Per VM BOs never need to bo cleared in the page tables */
1688 1689 1690
		if (resv == vm->root.base.bo->tbo.resv)
			clear = false;
		/* Try to reserve the BO to avoid clearing its ptes */
1691
		else if (!amdgpu_vm_debug && reservation_object_trylock(resv))
1692 1693 1694 1695
			clear = false;
		/* Somebody else is using the BO right now */
		else
			clear = true;
1696 1697

		r = amdgpu_vm_bo_update(adev, bo_va, clear);
A
Alex Deucher 已提交
1698 1699 1700
		if (r)
			return r;

1701 1702 1703
		if (!clear && resv != vm->root.base.bo->tbo.resv)
			reservation_object_unlock(resv);

A
Alex Deucher 已提交
1704 1705 1706 1707
		spin_lock(&vm->status_lock);
	}
	spin_unlock(&vm->status_lock);

1708
	return r;
A
Alex Deucher 已提交
1709 1710 1711 1712 1713 1714 1715 1716 1717
}

/**
 * amdgpu_vm_bo_add - add a bo to a specific vm
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 * @bo: amdgpu buffer object
 *
1718
 * Add @bo into the requested vm.
A
Alex Deucher 已提交
1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733
 * Add @bo to the list of bos associated with the vm
 * Returns newly added bo_va or NULL for failure
 *
 * Object has to be reserved!
 */
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
				      struct amdgpu_vm *vm,
				      struct amdgpu_bo *bo)
{
	struct amdgpu_bo_va *bo_va;

	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
	if (bo_va == NULL) {
		return NULL;
	}
1734 1735 1736 1737 1738
	bo_va->base.vm = vm;
	bo_va->base.bo = bo;
	INIT_LIST_HEAD(&bo_va->base.bo_list);
	INIT_LIST_HEAD(&bo_va->base.vm_status);

A
Alex Deucher 已提交
1739
	bo_va->ref_count = 1;
1740 1741
	INIT_LIST_HEAD(&bo_va->valids);
	INIT_LIST_HEAD(&bo_va->invalids);
1742

1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762
	if (!bo)
		return bo_va;

	list_add_tail(&bo_va->base.bo_list, &bo->va);

	if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
		return bo_va;

	if (bo->preferred_domains &
	    amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
		return bo_va;

	/*
	 * We checked all the prerequisites, but it looks like this per VM BO
	 * is currently evicted. add the BO to the evicted list to make sure it
	 * is validated on next VM use to avoid fault.
	 * */
	spin_lock(&vm->status_lock);
	list_move_tail(&bo_va->base.vm_status, &vm->evicted);
	spin_unlock(&vm->status_lock);
A
Alex Deucher 已提交
1763 1764 1765 1766

	return bo_va;
}

1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783

/**
 * amdgpu_vm_bo_insert_mapping - insert a new mapping
 *
 * @adev: amdgpu_device pointer
 * @bo_va: bo_va to store the address
 * @mapping: the mapping to insert
 *
 * Insert a new mapping into all structures.
 */
static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
				    struct amdgpu_bo_va *bo_va,
				    struct amdgpu_bo_va_mapping *mapping)
{
	struct amdgpu_vm *vm = bo_va->base.vm;
	struct amdgpu_bo *bo = bo_va->base.bo;

1784
	mapping->bo_va = bo_va;
1785 1786 1787 1788 1789 1790 1791 1792
	list_add(&mapping->list, &bo_va->invalids);
	amdgpu_vm_it_insert(mapping, &vm->va);

	if (mapping->flags & AMDGPU_PTE_PRT)
		amdgpu_vm_prt_get(adev);

	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
		spin_lock(&vm->status_lock);
1793 1794
		if (list_empty(&bo_va->base.vm_status))
			list_add(&bo_va->base.vm_status, &vm->moved);
1795 1796 1797 1798 1799
		spin_unlock(&vm->status_lock);
	}
	trace_amdgpu_vm_bo_map(bo_va, mapping);
}

A
Alex Deucher 已提交
1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811
/**
 * amdgpu_vm_bo_map - map bo inside a vm
 *
 * @adev: amdgpu_device pointer
 * @bo_va: bo_va to store the address
 * @saddr: where to map the BO
 * @offset: requested offset in the BO
 * @flags: attributes of pages (read/write/valid/etc.)
 *
 * Add a mapping of the BO at the specefied addr into the VM.
 * Returns 0 for success, error for failure.
 *
1812
 * Object has to be reserved and unreserved outside!
A
Alex Deucher 已提交
1813 1814 1815 1816
 */
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
		     struct amdgpu_bo_va *bo_va,
		     uint64_t saddr, uint64_t offset,
1817
		     uint64_t size, uint64_t flags)
A
Alex Deucher 已提交
1818
{
1819
	struct amdgpu_bo_va_mapping *mapping, *tmp;
1820 1821
	struct amdgpu_bo *bo = bo_va->base.bo;
	struct amdgpu_vm *vm = bo_va->base.vm;
A
Alex Deucher 已提交
1822 1823
	uint64_t eaddr;

1824 1825
	/* validate the parameters */
	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1826
	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1827 1828
		return -EINVAL;

A
Alex Deucher 已提交
1829
	/* make sure object fit at this offset */
1830
	eaddr = saddr + size - 1;
1831
	if (saddr >= eaddr ||
1832
	    (bo && offset + size > amdgpu_bo_size(bo)))
A
Alex Deucher 已提交
1833 1834 1835 1836 1837
		return -EINVAL;

	saddr /= AMDGPU_GPU_PAGE_SIZE;
	eaddr /= AMDGPU_GPU_PAGE_SIZE;

1838 1839
	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
	if (tmp) {
A
Alex Deucher 已提交
1840 1841
		/* bo and tmp overlap, invalid addr */
		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1842
			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1843
			tmp->start, tmp->last + 1);
1844
		return -EINVAL;
A
Alex Deucher 已提交
1845 1846 1847
	}

	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1848 1849
	if (!mapping)
		return -ENOMEM;
A
Alex Deucher 已提交
1850

1851 1852
	mapping->start = saddr;
	mapping->last = eaddr;
A
Alex Deucher 已提交
1853 1854 1855
	mapping->offset = offset;
	mapping->flags = flags;

1856
	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881

	return 0;
}

/**
 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
 *
 * @adev: amdgpu_device pointer
 * @bo_va: bo_va to store the address
 * @saddr: where to map the BO
 * @offset: requested offset in the BO
 * @flags: attributes of pages (read/write/valid/etc.)
 *
 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
 * mappings as we do so.
 * Returns 0 for success, error for failure.
 *
 * Object has to be reserved and unreserved outside!
 */
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
			     struct amdgpu_bo_va *bo_va,
			     uint64_t saddr, uint64_t offset,
			     uint64_t size, uint64_t flags)
{
	struct amdgpu_bo_va_mapping *mapping;
1882
	struct amdgpu_bo *bo = bo_va->base.bo;
1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893
	uint64_t eaddr;
	int r;

	/* validate the parameters */
	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
		return -EINVAL;

	/* make sure object fit at this offset */
	eaddr = saddr + size - 1;
	if (saddr >= eaddr ||
1894
	    (bo && offset + size > amdgpu_bo_size(bo)))
1895 1896 1897 1898 1899 1900 1901
		return -EINVAL;

	/* Allocate all the needed memory */
	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
	if (!mapping)
		return -ENOMEM;

1902
	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1903 1904 1905 1906 1907 1908 1909 1910
	if (r) {
		kfree(mapping);
		return r;
	}

	saddr /= AMDGPU_GPU_PAGE_SIZE;
	eaddr /= AMDGPU_GPU_PAGE_SIZE;

1911 1912
	mapping->start = saddr;
	mapping->last = eaddr;
1913 1914 1915
	mapping->offset = offset;
	mapping->flags = flags;

1916
	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1917

A
Alex Deucher 已提交
1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930
	return 0;
}

/**
 * amdgpu_vm_bo_unmap - remove bo mapping from vm
 *
 * @adev: amdgpu_device pointer
 * @bo_va: bo_va to remove the address from
 * @saddr: where to the BO is mapped
 *
 * Remove a mapping of the BO at the specefied addr from the VM.
 * Returns 0 for success, error for failure.
 *
1931
 * Object has to be reserved and unreserved outside!
A
Alex Deucher 已提交
1932 1933 1934 1935 1936 1937
 */
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
		       struct amdgpu_bo_va *bo_va,
		       uint64_t saddr)
{
	struct amdgpu_bo_va_mapping *mapping;
1938
	struct amdgpu_vm *vm = bo_va->base.vm;
1939
	bool valid = true;
A
Alex Deucher 已提交
1940

1941
	saddr /= AMDGPU_GPU_PAGE_SIZE;
1942

1943
	list_for_each_entry(mapping, &bo_va->valids, list) {
1944
		if (mapping->start == saddr)
A
Alex Deucher 已提交
1945 1946 1947
			break;
	}

1948 1949 1950 1951
	if (&mapping->list == &bo_va->valids) {
		valid = false;

		list_for_each_entry(mapping, &bo_va->invalids, list) {
1952
			if (mapping->start == saddr)
1953 1954 1955
				break;
		}

1956
		if (&mapping->list == &bo_va->invalids)
1957
			return -ENOENT;
A
Alex Deucher 已提交
1958
	}
1959

A
Alex Deucher 已提交
1960
	list_del(&mapping->list);
1961
	amdgpu_vm_it_remove(mapping, &vm->va);
1962
	mapping->bo_va = NULL;
1963
	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
A
Alex Deucher 已提交
1964

1965
	if (valid)
A
Alex Deucher 已提交
1966
		list_add(&mapping->list, &vm->freed);
1967
	else
1968 1969
		amdgpu_vm_free_mapping(adev, vm, mapping,
				       bo_va->last_pt_update);
A
Alex Deucher 已提交
1970 1971 1972 1973

	return 0;
}

1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000
/**
 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
 *
 * @adev: amdgpu_device pointer
 * @vm: VM structure to use
 * @saddr: start of the range
 * @size: size of the range
 *
 * Remove all mappings in a range, split them as appropriate.
 * Returns 0 for success, error for failure.
 */
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
				struct amdgpu_vm *vm,
				uint64_t saddr, uint64_t size)
{
	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
	LIST_HEAD(removed);
	uint64_t eaddr;

	eaddr = saddr + size - 1;
	saddr /= AMDGPU_GPU_PAGE_SIZE;
	eaddr /= AMDGPU_GPU_PAGE_SIZE;

	/* Allocate all the needed memory */
	before = kzalloc(sizeof(*before), GFP_KERNEL);
	if (!before)
		return -ENOMEM;
2001
	INIT_LIST_HEAD(&before->list);
2002 2003 2004 2005 2006 2007

	after = kzalloc(sizeof(*after), GFP_KERNEL);
	if (!after) {
		kfree(before);
		return -ENOMEM;
	}
2008
	INIT_LIST_HEAD(&after->list);
2009 2010

	/* Now gather all removed mappings */
2011 2012
	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
	while (tmp) {
2013
		/* Remember mapping split at the start */
2014 2015 2016
		if (tmp->start < saddr) {
			before->start = tmp->start;
			before->last = saddr - 1;
2017 2018 2019 2020 2021 2022
			before->offset = tmp->offset;
			before->flags = tmp->flags;
			list_add(&before->list, &tmp->list);
		}

		/* Remember mapping split at the end */
2023 2024 2025
		if (tmp->last > eaddr) {
			after->start = eaddr + 1;
			after->last = tmp->last;
2026
			after->offset = tmp->offset;
2027
			after->offset += after->start - tmp->start;
2028 2029 2030 2031 2032 2033
			after->flags = tmp->flags;
			list_add(&after->list, &tmp->list);
		}

		list_del(&tmp->list);
		list_add(&tmp->list, &removed);
2034 2035

		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2036 2037 2038 2039
	}

	/* And free them up */
	list_for_each_entry_safe(tmp, next, &removed, list) {
2040
		amdgpu_vm_it_remove(tmp, &vm->va);
2041 2042
		list_del(&tmp->list);

2043 2044 2045 2046
		if (tmp->start < saddr)
		    tmp->start = saddr;
		if (tmp->last > eaddr)
		    tmp->last = eaddr;
2047

2048
		tmp->bo_va = NULL;
2049 2050 2051 2052
		list_add(&tmp->list, &vm->freed);
		trace_amdgpu_vm_bo_unmap(NULL, tmp);
	}

2053 2054
	/* Insert partial mapping before the range */
	if (!list_empty(&before->list)) {
2055
		amdgpu_vm_it_insert(before, &vm->va);
2056 2057 2058 2059 2060 2061 2062
		if (before->flags & AMDGPU_PTE_PRT)
			amdgpu_vm_prt_get(adev);
	} else {
		kfree(before);
	}

	/* Insert partial mapping after the range */
2063
	if (!list_empty(&after->list)) {
2064
		amdgpu_vm_it_insert(after, &vm->va);
2065 2066 2067 2068 2069 2070 2071 2072 2073
		if (after->flags & AMDGPU_PTE_PRT)
			amdgpu_vm_prt_get(adev);
	} else {
		kfree(after);
	}

	return 0;
}

2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086
/**
 * amdgpu_vm_bo_lookup_mapping - find mapping by address
 *
 * @vm: the requested VM
 *
 * Find a mapping by it's address.
 */
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
							 uint64_t addr)
{
	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
}

A
Alex Deucher 已提交
2087 2088 2089 2090 2091 2092
/**
 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
 *
 * @adev: amdgpu_device pointer
 * @bo_va: requested bo_va
 *
2093
 * Remove @bo_va->bo from the requested vm.
A
Alex Deucher 已提交
2094 2095 2096 2097 2098 2099 2100
 *
 * Object have to be reserved!
 */
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
		      struct amdgpu_bo_va *bo_va)
{
	struct amdgpu_bo_va_mapping *mapping, *next;
2101
	struct amdgpu_vm *vm = bo_va->base.vm;
A
Alex Deucher 已提交
2102

2103
	list_del(&bo_va->base.bo_list);
A
Alex Deucher 已提交
2104 2105

	spin_lock(&vm->status_lock);
2106
	list_del(&bo_va->base.vm_status);
A
Alex Deucher 已提交
2107 2108
	spin_unlock(&vm->status_lock);

2109
	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
A
Alex Deucher 已提交
2110
		list_del(&mapping->list);
2111
		amdgpu_vm_it_remove(mapping, &vm->va);
2112
		mapping->bo_va = NULL;
2113
		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2114 2115 2116 2117
		list_add(&mapping->list, &vm->freed);
	}
	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
		list_del(&mapping->list);
2118
		amdgpu_vm_it_remove(mapping, &vm->va);
2119 2120
		amdgpu_vm_free_mapping(adev, vm, mapping,
				       bo_va->last_pt_update);
A
Alex Deucher 已提交
2121
	}
2122

2123
	dma_fence_put(bo_va->last_pt_update);
A
Alex Deucher 已提交
2124 2125 2126 2127 2128 2129 2130 2131 2132 2133
	kfree(bo_va);
}

/**
 * amdgpu_vm_bo_invalidate - mark the bo as invalid
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 * @bo: amdgpu buffer object
 *
2134
 * Mark @bo as invalid.
A
Alex Deucher 已提交
2135 2136
 */
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2137
			     struct amdgpu_bo *bo, bool evicted)
A
Alex Deucher 已提交
2138
{
2139 2140 2141
	struct amdgpu_vm_bo_base *bo_base;

	list_for_each_entry(bo_base, &bo->va, bo_list) {
2142 2143
		struct amdgpu_vm *vm = bo_base->vm;

2144
		bo_base->moved = true;
2145 2146
		if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
			spin_lock(&bo_base->vm->status_lock);
2147 2148 2149 2150 2151
			if (bo->tbo.type == ttm_bo_type_kernel)
				list_move(&bo_base->vm_status, &vm->evicted);
			else
				list_move_tail(&bo_base->vm_status,
					       &vm->evicted);
2152 2153 2154 2155
			spin_unlock(&bo_base->vm->status_lock);
			continue;
		}

2156 2157 2158 2159 2160
		if (bo->tbo.type == ttm_bo_type_kernel) {
			spin_lock(&bo_base->vm->status_lock);
			if (list_empty(&bo_base->vm_status))
				list_add(&bo_base->vm_status, &vm->relocated);
			spin_unlock(&bo_base->vm->status_lock);
2161
			continue;
2162
		}
2163

2164 2165
		spin_lock(&bo_base->vm->status_lock);
		if (list_empty(&bo_base->vm_status))
2166
			list_add(&bo_base->vm_status, &vm->moved);
2167
		spin_unlock(&bo_base->vm->status_lock);
A
Alex Deucher 已提交
2168 2169 2170
	}
}

2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183
static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
{
	/* Total bits covered by PD + PTs */
	unsigned bits = ilog2(vm_size) + 18;

	/* Make sure the PD is 4K in size up to 8GB address space.
	   Above that split equal between PD and PTs */
	if (vm_size <= 8)
		return (bits - 9);
	else
		return ((bits + 3) / 2);
}

2184 2185
/**
 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2186 2187 2188 2189
 *
 * @adev: amdgpu_device pointer
 * @vm_size: the default vm size if it's set auto
 */
2190
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
2191 2192
			   uint32_t fragment_size_default, unsigned max_level,
			   unsigned max_bits)
2193
{
2194 2195 2196
	uint64_t tmp;

	/* adjust vm size first */
2197 2198 2199
	if (amdgpu_vm_size != -1) {
		unsigned max_size = 1 << (max_bits - 30);

2200
		vm_size = amdgpu_vm_size;
2201 2202 2203 2204 2205 2206
		if (vm_size > max_size) {
			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
				 amdgpu_vm_size, max_size);
			vm_size = max_size;
		}
	}
2207 2208

	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2209 2210

	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2211 2212
	if (amdgpu_vm_block_size != -1)
		tmp >>= amdgpu_vm_block_size - 9;
2213 2214
	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
	adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227
	switch (adev->vm_manager.num_level) {
	case 3:
		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
		break;
	case 2:
		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
		break;
	case 1:
		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
		break;
	default:
		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
	}
2228
	/* block size depends on vm size and hw setup*/
2229
	if (amdgpu_vm_block_size != -1)
2230
		adev->vm_manager.block_size =
2231 2232 2233 2234 2235
			min((unsigned)amdgpu_vm_block_size, max_bits
			    - AMDGPU_GPU_PAGE_SHIFT
			    - 9 * adev->vm_manager.num_level);
	else if (adev->vm_manager.num_level > 1)
		adev->vm_manager.block_size = 9;
2236
	else
2237
		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2238

2239 2240 2241 2242
	if (amdgpu_vm_fragment_size == -1)
		adev->vm_manager.fragment_size = fragment_size_default;
	else
		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2243

2244 2245 2246
	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
		 vm_size, adev->vm_manager.num_level + 1,
		 adev->vm_manager.block_size,
2247
		 adev->vm_manager.fragment_size);
2248 2249
}

A
Alex Deucher 已提交
2250 2251 2252 2253 2254
/**
 * amdgpu_vm_init - initialize a vm instance
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
2255
 * @vm_context: Indicates if it GFX or Compute context
A
Alex Deucher 已提交
2256
 *
2257
 * Init @vm fields.
A
Alex Deucher 已提交
2258
 */
2259
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2260
		   int vm_context, unsigned int pasid)
A
Alex Deucher 已提交
2261 2262
{
	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2263
		AMDGPU_VM_PTE_COUNT(adev) * 8);
2264
	uint64_t init_pde_value = 0, flags;
2265 2266
	unsigned ring_instance;
	struct amdgpu_ring *ring;
2267
	struct drm_sched_rq *rq;
2268
	unsigned long size;
2269
	int r, i;
A
Alex Deucher 已提交
2270

2271
	vm->va = RB_ROOT_CACHED;
2272 2273
	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
		vm->reserved_vmid[i] = NULL;
A
Alex Deucher 已提交
2274
	spin_lock_init(&vm->status_lock);
2275
	INIT_LIST_HEAD(&vm->evicted);
2276
	INIT_LIST_HEAD(&vm->relocated);
2277
	INIT_LIST_HEAD(&vm->moved);
A
Alex Deucher 已提交
2278
	INIT_LIST_HEAD(&vm->freed);
2279

2280
	/* create scheduler entity for page table updates */
2281 2282 2283 2284

	ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
	ring_instance %= adev->vm_manager.vm_pte_num_rings;
	ring = adev->vm_manager.vm_pte_rings[ring_instance];
2285 2286
	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
	r = drm_sched_entity_init(&ring->sched, &vm->entity,
2287
				  rq, amdgpu_sched_jobs, NULL);
2288
	if (r)
2289
		return r;
2290

Y
Yong Zhao 已提交
2291 2292 2293
	vm->pte_support_ats = false;

	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2294 2295
		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
						AMDGPU_VM_USE_CPU_FOR_COMPUTE);
Y
Yong Zhao 已提交
2296 2297 2298

		if (adev->asic_type == CHIP_RAVEN) {
			vm->pte_support_ats = true;
2299 2300 2301
			init_pde_value = AMDGPU_PTE_DEFAULT_ATC
					| AMDGPU_PDE_PTE;

Y
Yong Zhao 已提交
2302 2303
		}
	} else
2304 2305 2306 2307 2308 2309
		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
						AMDGPU_VM_USE_CPU_FOR_GFX);
	DRM_DEBUG_DRIVER("VM update mode is %s\n",
			 vm->use_cpu_for_update ? "CPU" : "SDMA");
	WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
		  "CPU update of VM recommended only for large BAR system\n");
2310
	vm->last_update = NULL;
2311

2312 2313 2314 2315 2316 2317 2318 2319
	flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
			AMDGPU_GEM_CREATE_VRAM_CLEARED;
	if (vm->use_cpu_for_update)
		flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
	else
		flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
				AMDGPU_GEM_CREATE_SHADOW);

2320 2321 2322 2323
	size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
	r = amdgpu_bo_create(adev, size, align, true, AMDGPU_GEM_DOMAIN_VRAM,
			     flags, NULL, NULL, init_pde_value,
			     &vm->root.base.bo);
A
Alex Deucher 已提交
2324
	if (r)
2325 2326
		goto error_free_sched_entity;

2327 2328 2329 2330
	r = amdgpu_bo_reserve(vm->root.base.bo, true);
	if (r)
		goto error_free_root;

2331 2332
	vm->root.base.vm = vm;
	list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
2333 2334
	list_add_tail(&vm->root.base.vm_status, &vm->evicted);
	amdgpu_bo_unreserve(vm->root.base.bo);
A
Alex Deucher 已提交
2335

2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346
	if (pasid) {
		unsigned long flags;

		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
		r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
			      GFP_ATOMIC);
		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
		if (r < 0)
			goto error_free_root;

		vm->pasid = pasid;
2347 2348
	}

2349
	INIT_KFIFO(vm->faults);
2350
	vm->fault_credit = 16;
A
Alex Deucher 已提交
2351 2352

	return 0;
2353

2354
error_free_root:
2355 2356 2357
	amdgpu_bo_unref(&vm->root.base.bo->shadow);
	amdgpu_bo_unref(&vm->root.base.bo);
	vm->root.base.bo = NULL;
2358 2359

error_free_sched_entity:
2360
	drm_sched_entity_fini(&ring->sched, &vm->entity);
2361 2362

	return r;
A
Alex Deucher 已提交
2363 2364
}

2365 2366 2367
/**
 * amdgpu_vm_free_levels - free PD/PT levels
 *
2368 2369 2370
 * @adev: amdgpu device structure
 * @parent: PD/PT starting level to free
 * @level: level of parent structure
2371 2372 2373
 *
 * Free the page directory or page table level and all sub levels.
 */
2374 2375 2376
static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
				  struct amdgpu_vm_pt *parent,
				  unsigned level)
2377
{
2378
	unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
2379

2380 2381 2382 2383 2384
	if (parent->base.bo) {
		list_del(&parent->base.bo_list);
		list_del(&parent->base.vm_status);
		amdgpu_bo_unref(&parent->base.bo->shadow);
		amdgpu_bo_unref(&parent->base.bo);
2385 2386
	}

2387 2388 2389 2390
	if (parent->entries)
		for (i = 0; i < num_entries; i++)
			amdgpu_vm_free_levels(adev, &parent->entries[i],
					      level + 1);
2391

2392
	kvfree(parent->entries);
2393 2394
}

A
Alex Deucher 已提交
2395 2396 2397 2398 2399 2400
/**
 * amdgpu_vm_fini - tear down a vm instance
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 *
2401
 * Tear down @vm.
A
Alex Deucher 已提交
2402 2403 2404 2405 2406
 * Unbind the VM and remove all bos from the vm bo list
 */
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
	struct amdgpu_bo_va_mapping *mapping, *tmp;
2407
	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2408
	struct amdgpu_bo *root;
2409
	u64 fault;
2410
	int i, r;
A
Alex Deucher 已提交
2411

2412 2413 2414 2415
	/* Clear pending page faults from IH when the VM is destroyed */
	while (kfifo_get(&vm->faults, &fault))
		amdgpu_ih_clear_fault(adev, fault);

2416 2417 2418 2419 2420 2421 2422 2423
	if (vm->pasid) {
		unsigned long flags;

		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
	}

2424
	drm_sched_entity_fini(vm->entity.sched, &vm->entity);
2425

2426
	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
A
Alex Deucher 已提交
2427 2428
		dev_err(adev->dev, "still active bo inside vm\n");
	}
2429 2430
	rbtree_postorder_for_each_entry_safe(mapping, tmp,
					     &vm->va.rb_root, rb) {
A
Alex Deucher 已提交
2431
		list_del(&mapping->list);
2432
		amdgpu_vm_it_remove(mapping, &vm->va);
A
Alex Deucher 已提交
2433 2434 2435
		kfree(mapping);
	}
	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2436
		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2437
			amdgpu_vm_prt_fini(adev, vm);
2438
			prt_fini_needed = false;
2439
		}
2440

A
Alex Deucher 已提交
2441
		list_del(&mapping->list);
2442
		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
A
Alex Deucher 已提交
2443 2444
	}

2445 2446 2447 2448 2449
	root = amdgpu_bo_ref(vm->root.base.bo);
	r = amdgpu_bo_reserve(root, true);
	if (r) {
		dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
	} else {
2450 2451
		amdgpu_vm_free_levels(adev, &vm->root,
				      adev->vm_manager.root_level);
2452 2453 2454
		amdgpu_bo_unreserve(root);
	}
	amdgpu_bo_unref(&root);
2455
	dma_fence_put(vm->last_update);
2456
	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2457
		amdgpu_vmid_free_reserved(adev, vm, i);
A
Alex Deucher 已提交
2458
}
2459

2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475
/**
 * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
 *
 * @adev: amdgpu_device pointer
 * @pasid: PASID do identify the VM
 *
 * This function is expected to be called in interrupt context. Returns
 * true if there was fault credit, false otherwise
 */
bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
				  unsigned int pasid)
{
	struct amdgpu_vm *vm;

	spin_lock(&adev->vm_manager.pasid_lock);
	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
2476
	if (!vm) {
2477
		/* VM not found, can't track fault credit */
2478
		spin_unlock(&adev->vm_manager.pasid_lock);
2479
		return true;
2480
	}
2481 2482

	/* No lock needed. only accessed by IRQ handler */
2483
	if (!vm->fault_credit) {
2484
		/* Too many faults in this VM */
2485
		spin_unlock(&adev->vm_manager.pasid_lock);
2486
		return false;
2487
	}
2488 2489

	vm->fault_credit--;
2490
	spin_unlock(&adev->vm_manager.pasid_lock);
2491 2492 2493
	return true;
}

2494 2495 2496 2497 2498 2499 2500 2501 2502
/**
 * amdgpu_vm_manager_init - init the VM manager
 *
 * @adev: amdgpu_device pointer
 *
 * Initialize the VM manager structures
 */
void amdgpu_vm_manager_init(struct amdgpu_device *adev)
{
2503
	unsigned i;
2504

2505
	amdgpu_vmid_mgr_init(adev);
2506

2507 2508
	adev->vm_manager.fence_context =
		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2509 2510 2511
	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
		adev->vm_manager.seqno[i] = 0;

2512
	atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
2513
	spin_lock_init(&adev->vm_manager.prt_lock);
2514
	atomic_set(&adev->vm_manager.num_prt_users, 0);
2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531

	/* If not overridden by the user, by default, only in large BAR systems
	 * Compute VM tables will be updated by CPU
	 */
#ifdef CONFIG_X86_64
	if (amdgpu_vm_update_mode == -1) {
		if (amdgpu_vm_is_large_bar(adev))
			adev->vm_manager.vm_update_mode =
				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
		else
			adev->vm_manager.vm_update_mode = 0;
	} else
		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
#else
	adev->vm_manager.vm_update_mode = 0;
#endif

2532 2533
	idr_init(&adev->vm_manager.pasid_idr);
	spin_lock_init(&adev->vm_manager.pasid_lock);
2534 2535
}

2536 2537 2538 2539 2540 2541 2542 2543 2544
/**
 * amdgpu_vm_manager_fini - cleanup VM manager
 *
 * @adev: amdgpu_device pointer
 *
 * Cleanup the VM manager and free resources.
 */
void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
{
2545 2546 2547
	WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
	idr_destroy(&adev->vm_manager.pasid_idr);

2548
	amdgpu_vmid_mgr_fini(adev);
2549
}
C
Chunming Zhou 已提交
2550 2551 2552 2553

int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
	union drm_amdgpu_vm *args = data;
2554 2555 2556
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_fpriv *fpriv = filp->driver_priv;
	int r;
C
Chunming Zhou 已提交
2557 2558 2559

	switch (args->in.op) {
	case AMDGPU_VM_OP_RESERVE_VMID:
2560
		/* current, we only have requirement to reserve vmid from gfxhub */
2561
		r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
2562 2563 2564
		if (r)
			return r;
		break;
C
Chunming Zhou 已提交
2565
	case AMDGPU_VM_OP_UNRESERVE_VMID:
2566
		amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
C
Chunming Zhou 已提交
2567 2568 2569 2570 2571 2572 2573
		break;
	default:
		return -EINVAL;
	}

	return 0;
}