amdgpu_vm.c 67.6 KB
Newer Older
A
Alex Deucher 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
28
#include <linux/dma-fence-array.h>
29
#include <linux/interval_tree_generic.h>
30
#include <linux/idr.h>
A
Alex Deucher 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"

/*
 * GPUVM
 * GPUVM is similar to the legacy gart on older asics, however
 * rather than there being a single global gart table
 * for the entire GPU, there are multiple VM page tables active
 * at any given time.  The VM page tables can contain a mix
 * vram pages and system memory pages and system memory pages
 * can be mapped as snooped (cached system pages) or unsnooped
 * (uncached system pages).
 * Each VM has an ID associated with it and there is a page table
 * associated with each VMID.  When execting a command buffer,
 * the kernel tells the the ring what VMID to use for that command
 * buffer.  VMIDs are allocated dynamically as commands are submitted.
 * The userspace drivers maintain their own address space and the kernel
 * sets up their pages tables accordingly when they submit their
 * command buffers and a VMID is assigned.
 * Cayman/Trinity support up to 8 active VMs at any given time;
 * SI supports 16.
 */

56 57 58 59 60 61 62 63 64
#define START(node) ((node)->start)
#define LAST(node) ((node)->last)

INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
		     START, LAST, static, amdgpu_vm_it)

#undef START
#undef LAST

65 66 67
/* Local structure. Encapsulate some VM table update parameters to reduce
 * the number of function parameters
 */
68
struct amdgpu_pte_update_params {
69 70
	/* amdgpu device we do this update for */
	struct amdgpu_device *adev;
71 72
	/* optional amdgpu_vm we do this update for */
	struct amdgpu_vm *vm;
73 74 75 76
	/* address where to copy page table entries from */
	uint64_t src;
	/* indirect buffer to fill with commands */
	struct amdgpu_ib *ib;
77
	/* Function which actually does the update */
78 79
	void (*func)(struct amdgpu_pte_update_params *params,
		     struct amdgpu_bo *bo, uint64_t pe,
80
		     uint64_t addr, unsigned count, uint32_t incr,
81
		     uint64_t flags);
82 83 84 85 86 87
	/* The next two are used during VM update by CPU
	 *  DMA addresses to use for mapping
	 *  Kernel pointer of PD/PT BO that needs to be updated
	 */
	dma_addr_t *pages_addr;
	void *kptr;
88 89
};

90 91 92 93 94 95
/* Helper to disable partial resident texture feature from a fence callback */
struct amdgpu_prt_cb {
	struct amdgpu_device *adev;
	struct dma_fence_cb cb;
};

96 97 98 99 100 101 102 103 104 105
/**
 * amdgpu_vm_level_shift - return the addr shift for each level
 *
 * @adev: amdgpu_device pointer
 *
 * Returns the number of bits the pfn needs to be right shifted for a level.
 */
static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
				      unsigned level)
{
106 107 108 109 110 111 112
	unsigned shift = 0xff;

	switch (level) {
	case AMDGPU_VM_PDB2:
	case AMDGPU_VM_PDB1:
	case AMDGPU_VM_PDB0:
		shift = 9 * (AMDGPU_VM_PDB0 - level) +
113
			adev->vm_manager.block_size;
114 115 116 117 118 119 120 121 122
		break;
	case AMDGPU_VM_PTB:
		shift = 0;
		break;
	default:
		dev_err(adev->dev, "the level%d isn't supported.\n", level);
	}

	return shift;
123 124
}

A
Alex Deucher 已提交
125
/**
126
 * amdgpu_vm_num_entries - return the number of entries in a PD/PT
A
Alex Deucher 已提交
127 128 129
 *
 * @adev: amdgpu_device pointer
 *
130
 * Calculate the number of entries in a page directory or page table.
A
Alex Deucher 已提交
131
 */
132 133
static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
				      unsigned level)
A
Alex Deucher 已提交
134
{
135 136
	unsigned shift = amdgpu_vm_level_shift(adev,
					       adev->vm_manager.root_level);
137

138
	if (level == adev->vm_manager.root_level)
139
		/* For the root directory */
140
		return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
141
	else if (level != AMDGPU_VM_PTB)
142 143 144
		/* Everything in between */
		return 512;
	else
145
		/* For the page tables on the leaves */
146
		return AMDGPU_VM_PTE_COUNT(adev);
A
Alex Deucher 已提交
147 148 149
}

/**
150
 * amdgpu_vm_bo_size - returns the size of the BOs in bytes
A
Alex Deucher 已提交
151 152 153
 *
 * @adev: amdgpu_device pointer
 *
154
 * Calculate the size of the BO for a page directory or page table in bytes.
A
Alex Deucher 已提交
155
 */
156
static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
A
Alex Deucher 已提交
157
{
158
	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
A
Alex Deucher 已提交
159 160 161
}

/**
162
 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
A
Alex Deucher 已提交
163 164
 *
 * @vm: vm providing the BOs
165
 * @validated: head of validation list
166
 * @entry: entry to add
A
Alex Deucher 已提交
167 168
 *
 * Add the page directory to the list of BOs to
169
 * validate for command submission.
A
Alex Deucher 已提交
170
 */
171 172 173
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
			 struct list_head *validated,
			 struct amdgpu_bo_list_entry *entry)
A
Alex Deucher 已提交
174
{
175
	entry->robj = vm->root.base.bo;
176
	entry->priority = 0;
177
	entry->tv.bo = &entry->robj->tbo;
178
	entry->tv.shared = true;
179
	entry->user_pages = NULL;
180 181
	list_add(&entry->tv.head, validated);
}
A
Alex Deucher 已提交
182

183
/**
184
 * amdgpu_vm_validate_pt_bos - validate the page table BOs
185
 *
186
 * @adev: amdgpu device pointer
187
 * @vm: vm providing the BOs
188 189 190 191 192
 * @validate: callback to do the validation
 * @param: parameter for the validation callback
 *
 * Validate the page table BOs on command submission if neccessary.
 */
193 194 195
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
			      int (*validate)(void *p, struct amdgpu_bo *bo),
			      void *param)
196
{
197
	struct ttm_bo_global *glob = adev->mman.bdev.glob;
198 199
	int r;

200 201 202 203
	spin_lock(&vm->status_lock);
	while (!list_empty(&vm->evicted)) {
		struct amdgpu_vm_bo_base *bo_base;
		struct amdgpu_bo *bo;
204

205 206 207 208
		bo_base = list_first_entry(&vm->evicted,
					   struct amdgpu_vm_bo_base,
					   vm_status);
		spin_unlock(&vm->status_lock);
209

210 211 212 213 214 215
		bo = bo_base->bo;
		BUG_ON(!bo);
		if (bo->parent) {
			r = validate(param, bo);
			if (r)
				return r;
216

217 218 219 220 221 222
			spin_lock(&glob->lru_lock);
			ttm_bo_move_to_lru_tail(&bo->tbo);
			if (bo->shadow)
				ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
			spin_unlock(&glob->lru_lock);
		}
223

224 225
		if (bo->tbo.type == ttm_bo_type_kernel &&
		    vm->use_cpu_for_update) {
226 227 228 229
			r = amdgpu_bo_kmap(bo, NULL);
			if (r)
				return r;
		}
230

231
		spin_lock(&vm->status_lock);
232 233 234 235
		if (bo->tbo.type != ttm_bo_type_kernel)
			list_move(&bo_base->vm_status, &vm->moved);
		else
			list_move(&bo_base->vm_status, &vm->relocated);
236
	}
237
	spin_unlock(&vm->status_lock);
238

239
	return 0;
240 241
}

242
/**
243
 * amdgpu_vm_ready - check VM is ready for updates
244
 *
245
 * @vm: VM to check
A
Alex Deucher 已提交
246
 *
247
 * Check if all VM PDs/PTs are ready for updates
A
Alex Deucher 已提交
248
 */
249
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
A
Alex Deucher 已提交
250
{
251
	bool ready;
A
Alex Deucher 已提交
252

253 254 255
	spin_lock(&vm->status_lock);
	ready = list_empty(&vm->evicted);
	spin_unlock(&vm->status_lock);
256

257
	return ready;
258 259
}

260 261 262 263 264 265 266 267 268 269
/**
 * amdgpu_vm_clear_bo - initially clear the PDs/PTs
 *
 * @adev: amdgpu_device pointer
 * @bo: BO to clear
 * @level: level this BO is at
 *
 * Root PD needs to be reserved when calling this.
 */
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
270 271
			      struct amdgpu_vm *vm, struct amdgpu_bo *bo,
			      unsigned level, bool pte_support_ats)
272 273 274
{
	struct ttm_operation_ctx ctx = { true, false };
	struct dma_fence *fence = NULL;
275
	unsigned entries, ats_entries;
276 277
	struct amdgpu_ring *ring;
	struct amdgpu_job *job;
278
	uint64_t addr;
279 280
	int r;

281 282 283 284 285 286 287 288 289 290 291 292 293 294
	addr = amdgpu_bo_gpu_offset(bo);
	entries = amdgpu_bo_size(bo) / 8;

	if (pte_support_ats) {
		if (level == adev->vm_manager.root_level) {
			ats_entries = amdgpu_vm_level_shift(adev, level);
			ats_entries += AMDGPU_GPU_PAGE_SHIFT;
			ats_entries = AMDGPU_VA_HOLE_START >> ats_entries;
			ats_entries = min(ats_entries, entries);
			entries -= ats_entries;
		} else {
			ats_entries = entries;
			entries = 0;
		}
295
	} else {
296
		ats_entries = 0;
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
	}

	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);

	r = reservation_object_reserve_shared(bo->tbo.resv);
	if (r)
		return r;

	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
	if (r)
		goto error;

	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
	if (r)
		goto error;

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
	if (ats_entries) {
		uint64_t ats_value;

		ats_value = AMDGPU_PTE_DEFAULT_ATC;
		if (level != AMDGPU_VM_PTB)
			ats_value |= AMDGPU_PDE_PTE;

		amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
				      ats_entries, 0, ats_value);
		addr += ats_entries * 8;
	}

	if (entries)
		amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
				      entries, 0, 0);

329 330 331 332 333 334 335 336 337 338
	amdgpu_ring_pad_ib(ring, &job->ibs[0]);

	WARN_ON(job->ibs[0].length_dw > 64);
	r = amdgpu_job_submit(job, ring, &vm->entity,
			      AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
	if (r)
		goto error_free;

	amdgpu_bo_fence(bo, fence, true);
	dma_fence_put(fence);
339 340 341 342 343

	if (bo->shadow)
		return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
					  level, pte_support_ats);

344 345 346 347 348 349 350 351 352
	return 0;

error_free:
	amdgpu_job_free(job);

error:
	return r;
}

353
/**
354 355 356 357 358 359 360 361 362 363 364 365 366
 * amdgpu_vm_alloc_levels - allocate the PD/PT levels
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 * @saddr: start of the address range
 * @eaddr: end of the address range
 *
 * Make sure the page directories and page tables are allocated
 */
static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
				  struct amdgpu_vm *vm,
				  struct amdgpu_vm_pt *parent,
				  uint64_t saddr, uint64_t eaddr,
367
				  unsigned level, bool ats)
368
{
369
	unsigned shift = amdgpu_vm_level_shift(adev, level);
370
	unsigned pt_idx, from, to;
371
	u64 flags;
372
	int r;
373 374 375 376

	if (!parent->entries) {
		unsigned num_entries = amdgpu_vm_num_entries(adev, level);

M
Michal Hocko 已提交
377 378 379
		parent->entries = kvmalloc_array(num_entries,
						   sizeof(struct amdgpu_vm_pt),
						   GFP_KERNEL | __GFP_ZERO);
380 381 382 383 384
		if (!parent->entries)
			return -ENOMEM;
		memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
	}

385 386 387 388 389
	from = saddr >> shift;
	to = eaddr >> shift;
	if (from >= amdgpu_vm_num_entries(adev, level) ||
	    to >= amdgpu_vm_num_entries(adev, level))
		return -EINVAL;
390 391

	++level;
392 393
	saddr = saddr & ((1 << shift) - 1);
	eaddr = eaddr & ((1 << shift) - 1);
394

395
	flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
396 397 398 399 400 401
	if (vm->use_cpu_for_update)
		flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
	else
		flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
				AMDGPU_GEM_CREATE_SHADOW);

402 403
	/* walk over the address space and allocate the page tables */
	for (pt_idx = from; pt_idx <= to; ++pt_idx) {
404
		struct reservation_object *resv = vm->root.base.bo->tbo.resv;
405 406 407
		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
		struct amdgpu_bo *pt;

408
		if (!entry->base.bo) {
409 410 411
			r = amdgpu_bo_create(adev,
					     amdgpu_vm_bo_size(adev, level),
					     AMDGPU_GPU_PAGE_SIZE, true,
412
					     AMDGPU_GEM_DOMAIN_VRAM, flags,
413
					     NULL, resv, &pt);
414 415 416
			if (r)
				return r;

417
			r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats);
418
			if (r) {
419
				amdgpu_bo_unref(&pt->shadow);
420 421 422 423
				amdgpu_bo_unref(&pt);
				return r;
			}

424 425 426
			if (vm->use_cpu_for_update) {
				r = amdgpu_bo_kmap(pt, NULL);
				if (r) {
427
					amdgpu_bo_unref(&pt->shadow);
428 429 430 431 432
					amdgpu_bo_unref(&pt);
					return r;
				}
			}

433 434 435
			/* Keep a reference to the root directory to avoid
			* freeing them up in the wrong order.
			*/
436
			pt->parent = amdgpu_bo_ref(parent->base.bo);
437

438 439 440
			entry->base.vm = vm;
			entry->base.bo = pt;
			list_add_tail(&entry->base.bo_list, &pt->va);
441 442 443
			spin_lock(&vm->status_lock);
			list_add(&entry->base.vm_status, &vm->relocated);
			spin_unlock(&vm->status_lock);
444 445
		}

446
		if (level < AMDGPU_VM_PTB) {
447 448 449 450
			uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
			uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
				((1 << shift) - 1);
			r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
451
						   sub_eaddr, level, ats);
452 453 454 455 456 457 458 459
			if (r)
				return r;
		}
	}

	return 0;
}

460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
/**
 * amdgpu_vm_alloc_pts - Allocate page tables.
 *
 * @adev: amdgpu_device pointer
 * @vm: VM to allocate page tables for
 * @saddr: Start address which needs to be allocated
 * @size: Size from start address we need.
 *
 * Make sure the page tables are allocated.
 */
int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
			struct amdgpu_vm *vm,
			uint64_t saddr, uint64_t size)
{
	uint64_t eaddr;
475
	bool ats = false;
476 477 478 479 480 481

	/* validate the parameters */
	if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
		return -EINVAL;

	eaddr = saddr + size - 1;
482 483 484

	if (vm->pte_support_ats)
		ats = saddr < AMDGPU_VA_HOLE_START;
485 486 487 488

	saddr /= AMDGPU_GPU_PAGE_SIZE;
	eaddr /= AMDGPU_GPU_PAGE_SIZE;

489 490 491 492 493 494
	if (eaddr >= adev->vm_manager.max_pfn) {
		dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
			eaddr, adev->vm_manager.max_pfn);
		return -EINVAL;
	}

495
	return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
496
				      adev->vm_manager.root_level, ats);
497 498
}

499 500 501 502 503 504
/**
 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
 *
 * @adev: amdgpu_device pointer
 */
void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
505
{
506
	const struct amdgpu_ip_block *ip_block;
507 508 509
	bool has_compute_vm_bug;
	struct amdgpu_ring *ring;
	int i;
510

511
	has_compute_vm_bug = false;
512

513
	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
514 515 516 517 518 519 520 521 522
	if (ip_block) {
		/* Compute has a VM bug for GFX version < 7.
		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
		if (ip_block->version->major <= 7)
			has_compute_vm_bug = true;
		else if (ip_block->version->major == 8)
			if (adev->gfx.mec_fw_version < 673)
				has_compute_vm_bug = true;
	}
523

524 525 526 527 528
	for (i = 0; i < adev->num_rings; i++) {
		ring = adev->rings[i];
		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
			/* only compute rings */
			ring->has_compute_vm_bug = has_compute_vm_bug;
529
		else
530
			ring->has_compute_vm_bug = false;
531 532 533
	}
}

534 535
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
				  struct amdgpu_job *job)
A
Alex Xie 已提交
536
{
537 538
	struct amdgpu_device *adev = ring->adev;
	unsigned vmhub = ring->funcs->vmhub;
539 540
	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
	struct amdgpu_vmid *id;
541
	bool gds_switch_needed;
542
	bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
543

544
	if (job->vmid == 0)
545
		return false;
546
	id = &id_mgr->ids[job->vmid];
547 548 549 550 551 552 553
	gds_switch_needed = ring->funcs->emit_gds_switch && (
		id->gds_base != job->gds_base ||
		id->gds_size != job->gds_size ||
		id->gws_base != job->gws_base ||
		id->gws_size != job->gws_size ||
		id->oa_base != job->oa_base ||
		id->oa_size != job->oa_size);
A
Alex Xie 已提交
554

555
	if (amdgpu_vmid_had_gpu_reset(adev, id))
556
		return true;
A
Alex Xie 已提交
557

558
	return vm_flush_needed || gds_switch_needed;
559 560
}

561 562
static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
{
563
	return (adev->gmc.real_vram_size == adev->gmc.visible_vram_size);
A
Alex Xie 已提交
564 565
}

A
Alex Deucher 已提交
566 567 568 569
/**
 * amdgpu_vm_flush - hardware flush the vm
 *
 * @ring: ring to use for flush
570
 * @vmid: vmid number to use
571
 * @pd_addr: address of the page directory
A
Alex Deucher 已提交
572
 *
573
 * Emit a VM flush when it is necessary.
A
Alex Deucher 已提交
574
 */
M
Monk Liu 已提交
575
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
A
Alex Deucher 已提交
576
{
577
	struct amdgpu_device *adev = ring->adev;
578
	unsigned vmhub = ring->funcs->vmhub;
579
	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
580
	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
581
	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
582 583 584 585 586 587
		id->gds_base != job->gds_base ||
		id->gds_size != job->gds_size ||
		id->gws_base != job->gws_base ||
		id->gws_size != job->gws_size ||
		id->oa_base != job->oa_base ||
		id->oa_size != job->oa_size);
588
	bool vm_flush_needed = job->vm_needs_flush;
589
	unsigned patch_offset = 0;
590
	int r;
591

592
	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
593 594 595
		gds_switch_needed = true;
		vm_flush_needed = true;
	}
596

M
Monk Liu 已提交
597
	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
598
		return 0;
599

600 601
	if (ring->funcs->init_cond_exec)
		patch_offset = amdgpu_ring_init_cond_exec(ring);
602

M
Monk Liu 已提交
603 604 605
	if (need_pipe_sync)
		amdgpu_ring_emit_pipeline_sync(ring);

606
	if (ring->funcs->emit_vm_flush && vm_flush_needed) {
607
		struct dma_fence *fence;
608

609
		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
610 611
		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->pasid,
					  job->vm_pd_addr);
612

613 614 615
		r = amdgpu_fence_emit(ring, &fence);
		if (r)
			return r;
616

617
		mutex_lock(&id_mgr->lock);
618 619
		dma_fence_put(id->last_flush);
		id->last_flush = fence;
620
		id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
621
		mutex_unlock(&id_mgr->lock);
622
	}
623

624
	if (ring->funcs->emit_gds_switch && gds_switch_needed) {
625 626 627 628 629 630
		id->gds_base = job->gds_base;
		id->gds_size = job->gds_size;
		id->gws_base = job->gws_base;
		id->gws_size = job->gws_size;
		id->oa_base = job->oa_base;
		id->oa_size = job->oa_size;
631
		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
632 633 634 635 636 637 638 639 640 641 642 643
					    job->gds_size, job->gws_base,
					    job->gws_size, job->oa_base,
					    job->oa_size);
	}

	if (ring->funcs->patch_cond_exec)
		amdgpu_ring_patch_cond_exec(ring, patch_offset);

	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
	if (ring->funcs->emit_switch_buffer) {
		amdgpu_ring_emit_switch_buffer(ring);
		amdgpu_ring_emit_switch_buffer(ring);
644
	}
645
	return 0;
646 647
}

A
Alex Deucher 已提交
648 649 650 651 652 653
/**
 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
 *
 * @vm: requested vm
 * @bo: requested buffer object
 *
654
 * Find @bo inside the requested vm.
A
Alex Deucher 已提交
655 656 657 658 659 660 661 662 663 664
 * Search inside the @bos vm list for the requested vm
 * Returns the found bo_va or NULL if none is found
 *
 * Object has to be reserved!
 */
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
				       struct amdgpu_bo *bo)
{
	struct amdgpu_bo_va *bo_va;

665 666
	list_for_each_entry(bo_va, &bo->va, base.bo_list) {
		if (bo_va->base.vm == vm) {
A
Alex Deucher 已提交
667 668 669 670 671 672 673
			return bo_va;
		}
	}
	return NULL;
}

/**
674
 * amdgpu_vm_do_set_ptes - helper to call the right asic function
A
Alex Deucher 已提交
675
 *
676
 * @params: see amdgpu_pte_update_params definition
677
 * @bo: PD/PT to update
A
Alex Deucher 已提交
678 679 680 681 682 683 684 685 686
 * @pe: addr of the page entry
 * @addr: dst addr to write into pe
 * @count: number of page entries to update
 * @incr: increase next addr by incr bytes
 * @flags: hw access flags
 *
 * Traces the parameters and calls the right asic functions
 * to setup the page table using the DMA.
 */
687
static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
688
				  struct amdgpu_bo *bo,
689 690
				  uint64_t pe, uint64_t addr,
				  unsigned count, uint32_t incr,
691
				  uint64_t flags)
A
Alex Deucher 已提交
692
{
693
	pe += amdgpu_bo_gpu_offset(bo);
694
	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
A
Alex Deucher 已提交
695

696
	if (count < 3) {
697 698
		amdgpu_vm_write_pte(params->adev, params->ib, pe,
				    addr | flags, count, incr);
A
Alex Deucher 已提交
699 700

	} else {
701
		amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
A
Alex Deucher 已提交
702 703 704 705
				      count, incr, flags);
	}
}

706 707 708 709
/**
 * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
 *
 * @params: see amdgpu_pte_update_params definition
710
 * @bo: PD/PT to update
711 712 713 714 715 716 717 718 719
 * @pe: addr of the page entry
 * @addr: dst addr to write into pe
 * @count: number of page entries to update
 * @incr: increase next addr by incr bytes
 * @flags: hw access flags
 *
 * Traces the parameters and calls the DMA function to copy the PTEs.
 */
static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
720
				   struct amdgpu_bo *bo,
721 722
				   uint64_t pe, uint64_t addr,
				   unsigned count, uint32_t incr,
723
				   uint64_t flags)
724
{
725
	uint64_t src = (params->src + (addr >> 12) * 8);
726

727
	pe += amdgpu_bo_gpu_offset(bo);
728 729 730
	trace_amdgpu_vm_copy_ptes(pe, src, count);

	amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
731 732
}

A
Alex Deucher 已提交
733
/**
734
 * amdgpu_vm_map_gart - Resolve gart mapping of addr
A
Alex Deucher 已提交
735
 *
736
 * @pages_addr: optional DMA address to use for lookup
A
Alex Deucher 已提交
737 738 739
 * @addr: the unmapped addr
 *
 * Look up the physical address of the page that the pte resolves
740
 * to and return the pointer for the page table entry.
A
Alex Deucher 已提交
741
 */
742
static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
A
Alex Deucher 已提交
743 744 745
{
	uint64_t result;

746 747
	/* page table offset */
	result = pages_addr[addr >> PAGE_SHIFT];
748

749 750
	/* in case cpu page size != gpu page size*/
	result |= addr & (~PAGE_MASK);
A
Alex Deucher 已提交
751

752
	result &= 0xFFFFFFFFFFFFF000ULL;
A
Alex Deucher 已提交
753 754 755 756

	return result;
}

757 758 759 760
/**
 * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
 *
 * @params: see amdgpu_pte_update_params definition
761
 * @bo: PD/PT to update
762 763 764 765 766 767 768 769 770
 * @pe: kmap addr of the page entry
 * @addr: dst addr to write into pe
 * @count: number of page entries to update
 * @incr: increase next addr by incr bytes
 * @flags: hw access flags
 *
 * Write count number of PT/PD entries directly.
 */
static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
771
				   struct amdgpu_bo *bo,
772 773 774 775 776
				   uint64_t pe, uint64_t addr,
				   unsigned count, uint32_t incr,
				   uint64_t flags)
{
	unsigned int i;
777
	uint64_t value;
778

779 780
	pe += (unsigned long)amdgpu_bo_kptr(bo);

781 782
	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);

783
	for (i = 0; i < count; i++) {
784 785 786
		value = params->pages_addr ?
			amdgpu_vm_map_gart(params->pages_addr, addr) :
			addr;
787 788
		amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
				       i, value, flags);
789 790 791 792
		addr += incr;
	}
}

793 794
static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
			     void *owner)
795 796 797 798 799
{
	struct amdgpu_sync sync;
	int r;

	amdgpu_sync_create(&sync);
800
	amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
801 802 803 804 805 806
	r = amdgpu_sync_wait(&sync, true);
	amdgpu_sync_free(&sync);

	return r;
}

807
/*
808
 * amdgpu_vm_update_pde - update a single level in the hierarchy
809
 *
810
 * @param: parameters for the update
811
 * @vm: requested vm
812
 * @parent: parent directory
813
 * @entry: entry to update
814
 *
815
 * Makes sure the requested entry in parent is up to date.
816
 */
817 818 819 820
static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
				 struct amdgpu_vm *vm,
				 struct amdgpu_vm_pt *parent,
				 struct amdgpu_vm_pt *entry)
A
Alex Deucher 已提交
821
{
822
	struct amdgpu_bo *bo = parent->base.bo, *pbo;
823 824
	uint64_t pde, pt, flags;
	unsigned level;
C
Chunming Zhou 已提交
825

826 827 828
	/* Don't update huge pages here */
	if (entry->huge)
		return;
A
Alex Deucher 已提交
829

830
	for (level = 0, pbo = bo->parent; pbo; ++level)
831 832
		pbo = pbo->parent;

833
	level += params->adev->vm_manager.root_level;
834
	pt = amdgpu_bo_gpu_offset(entry->base.bo);
835
	flags = AMDGPU_PTE_VALID;
836
	amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags);
837 838 839 840
	pde = (entry - parent->entries) * 8;
	if (bo->shadow)
		params->func(params, bo->shadow, pde, pt, 1, 0, flags);
	params->func(params, bo, pde, pt, 1, 0, flags);
A
Alex Deucher 已提交
841 842
}

843 844 845 846 847 848 849
/*
 * amdgpu_vm_invalidate_level - mark all PD levels as invalid
 *
 * @parent: parent PD
 *
 * Mark all PD level as invalid after an error.
 */
850 851 852 853
static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
				       struct amdgpu_vm *vm,
				       struct amdgpu_vm_pt *parent,
				       unsigned level)
854
{
855
	unsigned pt_idx, num_entries;
856 857 858 859 860

	/*
	 * Recurse into the subdirectories. This recursion is harmless because
	 * we only have a maximum of 5 layers.
	 */
861 862
	num_entries = amdgpu_vm_num_entries(adev, level);
	for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
863 864
		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];

865
		if (!entry->base.bo)
866 867
			continue;

868
		spin_lock(&vm->status_lock);
869 870
		if (list_empty(&entry->base.vm_status))
			list_add(&entry->base.vm_status, &vm->relocated);
871
		spin_unlock(&vm->status_lock);
872
		amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
873 874 875
	}
}

876 877 878 879 880 881 882 883 884 885 886 887
/*
 * amdgpu_vm_update_directories - make sure that all directories are valid
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 *
 * Makes sure all directories are up to date.
 * Returns 0 for success, error for failure.
 */
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
				 struct amdgpu_vm *vm)
{
888 889 890
	struct amdgpu_pte_update_params params;
	struct amdgpu_job *job;
	unsigned ndw = 0;
891
	int r = 0;
892

893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
	if (list_empty(&vm->relocated))
		return 0;

restart:
	memset(&params, 0, sizeof(params));
	params.adev = adev;

	if (vm->use_cpu_for_update) {
		r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
		if (unlikely(r))
			return r;

		params.func = amdgpu_vm_cpu_set_ptes;
	} else {
		ndw = 512 * 8;
		r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
		if (r)
			return r;

		params.ib = &job->ibs[0];
		params.func = amdgpu_vm_do_set_ptes;
	}

916 917
	spin_lock(&vm->status_lock);
	while (!list_empty(&vm->relocated)) {
918 919
		struct amdgpu_vm_bo_base *bo_base, *parent;
		struct amdgpu_vm_pt *pt, *entry;
920 921 922 923 924
		struct amdgpu_bo *bo;

		bo_base = list_first_entry(&vm->relocated,
					   struct amdgpu_vm_bo_base,
					   vm_status);
925
		list_del_init(&bo_base->vm_status);
926 927 928
		spin_unlock(&vm->status_lock);

		bo = bo_base->bo->parent;
929
		if (!bo) {
930
			spin_lock(&vm->status_lock);
931
			continue;
932
		}
933 934 935 936 937 938 939 940 941 942 943 944

		parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
					  bo_list);
		pt = container_of(parent, struct amdgpu_vm_pt, base);
		entry = container_of(bo_base, struct amdgpu_vm_pt, base);

		amdgpu_vm_update_pde(&params, vm, pt, entry);

		spin_lock(&vm->status_lock);
		if (!vm->use_cpu_for_update &&
		    (ndw - params.ib->length_dw) < 32)
			break;
945 946
	}
	spin_unlock(&vm->status_lock);
947

948 949 950
	if (vm->use_cpu_for_update) {
		/* Flush HDP */
		mb();
951
		amdgpu_asic_flush_hdp(adev, NULL);
952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
	} else if (params.ib->length_dw == 0) {
		amdgpu_job_free(job);
	} else {
		struct amdgpu_bo *root = vm->root.base.bo;
		struct amdgpu_ring *ring;
		struct dma_fence *fence;

		ring = container_of(vm->entity.sched, struct amdgpu_ring,
				    sched);

		amdgpu_ring_pad_ib(ring, params.ib);
		amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
				 AMDGPU_FENCE_OWNER_VM, false);
		WARN_ON(params.ib->length_dw > ndw);
		r = amdgpu_job_submit(job, ring, &vm->entity,
				      AMDGPU_FENCE_OWNER_VM, &fence);
		if (r)
			goto error;

		amdgpu_bo_fence(root, fence, true);
		dma_fence_put(vm->last_update);
		vm->last_update = fence;
974 975
	}

976 977 978 979 980 981
	if (!list_empty(&vm->relocated))
		goto restart;

	return 0;

error:
982 983
	amdgpu_vm_invalidate_level(adev, vm, &vm->root,
				   adev->vm_manager.root_level);
984
	amdgpu_job_free(job);
985
	return r;
986 987
}

988
/**
989
 * amdgpu_vm_find_entry - find the entry for an address
990 991 992
 *
 * @p: see amdgpu_pte_update_params definition
 * @addr: virtual address in question
993 994
 * @entry: resulting entry or NULL
 * @parent: parent entry
995
 *
996
 * Find the vm_pt entry and it's parent for the given address.
997
 */
998 999 1000
void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
			 struct amdgpu_vm_pt **entry,
			 struct amdgpu_vm_pt **parent)
1001
{
1002
	unsigned level = p->adev->vm_manager.root_level;
1003

1004 1005 1006
	*parent = NULL;
	*entry = &p->vm->root;
	while ((*entry)->entries) {
1007
		unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
1008

1009
		*parent = *entry;
1010 1011
		*entry = &(*entry)->entries[addr >> shift];
		addr &= (1ULL << shift) - 1;
1012 1013
	}

1014
	if (level != AMDGPU_VM_PTB)
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
		*entry = NULL;
}

/**
 * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
 *
 * @p: see amdgpu_pte_update_params definition
 * @entry: vm_pt entry to check
 * @parent: parent entry
 * @nptes: number of PTEs updated with this operation
 * @dst: destination address where the PTEs should point to
 * @flags: access flags fro the PTEs
 *
 * Check if we can update the PD with a huge page.
 */
1030 1031 1032 1033 1034
static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
					struct amdgpu_vm_pt *entry,
					struct amdgpu_vm_pt *parent,
					unsigned nptes, uint64_t dst,
					uint64_t flags)
1035
{
1036
	uint64_t pde;
1037 1038

	/* In the case of a mixed PT the PDE must point to it*/
1039 1040
	if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
	    nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
1041
		/* Set the huge page flag to stop scanning at this PDE */
1042 1043 1044
		flags |= AMDGPU_PDE_PTE;
	}

1045 1046 1047 1048 1049 1050 1051 1052
	if (!(flags & AMDGPU_PDE_PTE)) {
		if (entry->huge) {
			/* Add the entry to the relocated list to update it. */
			entry->huge = false;
			spin_lock(&p->vm->status_lock);
			list_move(&entry->base.vm_status, &p->vm->relocated);
			spin_unlock(&p->vm->status_lock);
		}
1053
		return;
1054
	}
1055

1056
	entry->huge = true;
1057
	amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
1058

1059 1060 1061 1062
	pde = (entry - parent->entries) * 8;
	if (parent->base.bo->shadow)
		p->func(p, parent->base.bo->shadow, pde, dst, 1, 0, flags);
	p->func(p, parent->base.bo, pde, dst, 1, 0, flags);
1063 1064
}

A
Alex Deucher 已提交
1065 1066 1067
/**
 * amdgpu_vm_update_ptes - make sure that page tables are valid
 *
1068
 * @params: see amdgpu_pte_update_params definition
A
Alex Deucher 已提交
1069 1070 1071
 * @vm: requested vm
 * @start: start of GPU address range
 * @end: end of GPU address range
1072
 * @dst: destination address to map to, the next dst inside the function
A
Alex Deucher 已提交
1073 1074
 * @flags: mapping flags
 *
1075
 * Update the page tables in the range @start - @end.
1076
 * Returns 0 for success, -EINVAL for failure.
A
Alex Deucher 已提交
1077
 */
1078
static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1079
				  uint64_t start, uint64_t end,
1080
				  uint64_t dst, uint64_t flags)
A
Alex Deucher 已提交
1081
{
1082 1083
	struct amdgpu_device *adev = params->adev;
	const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
1084

1085
	uint64_t addr, pe_start;
1086
	struct amdgpu_bo *pt;
1087
	unsigned nptes;
A
Alex Deucher 已提交
1088 1089

	/* walk over the address space and update the page tables */
1090 1091 1092 1093 1094 1095 1096
	for (addr = start; addr < end; addr += nptes,
	     dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
		struct amdgpu_vm_pt *entry, *parent;

		amdgpu_vm_get_entry(params, addr, &entry, &parent);
		if (!entry)
			return -ENOENT;
1097

A
Alex Deucher 已提交
1098 1099 1100
		if ((addr & ~mask) == (end & ~mask))
			nptes = end - addr;
		else
1101
			nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
A
Alex Deucher 已提交
1102

1103 1104
		amdgpu_vm_handle_huge_pages(params, entry, parent,
					    nptes, dst, flags);
1105
		/* We don't need to update PTEs for huge pages */
1106
		if (entry->huge)
1107 1108
			continue;

1109
		pt = entry->base.bo;
1110 1111 1112 1113 1114
		pe_start = (addr & mask) * 8;
		if (pt->shadow)
			params->func(params, pt->shadow, pe_start, dst, nptes,
				     AMDGPU_GPU_PAGE_SIZE, flags);
		params->func(params, pt, pe_start, dst, nptes,
1115
			     AMDGPU_GPU_PAGE_SIZE, flags);
A
Alex Deucher 已提交
1116 1117
	}

1118
	return 0;
1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
}

/*
 * amdgpu_vm_frag_ptes - add fragment information to PTEs
 *
 * @params: see amdgpu_pte_update_params definition
 * @vm: requested vm
 * @start: first PTE to handle
 * @end: last PTE to handle
 * @dst: addr those PTEs should point to
 * @flags: hw mapping flags
1130
 * Returns 0 for success, -EINVAL for failure.
1131
 */
1132
static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params	*params,
1133
				uint64_t start, uint64_t end,
1134
				uint64_t dst, uint64_t flags)
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
{
	/**
	 * The MC L1 TLB supports variable sized pages, based on a fragment
	 * field in the PTE. When this field is set to a non-zero value, page
	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
	 * flags are considered valid for all PTEs within the fragment range
	 * and corresponding mappings are assumed to be physically contiguous.
	 *
	 * The L1 TLB can store a single PTE for the whole fragment,
	 * significantly increasing the space available for translation
	 * caching. This leads to large improvements in throughput when the
	 * TLB is under pressure.
	 *
	 * The L2 TLB distributes small and large fragments into two
	 * asymmetric partitions. The large fragment cache is significantly
	 * larger. Thus, we try to use large fragments wherever possible.
	 * Userspace can support this by aligning virtual base address and
	 * allocation size to the fragment size.
	 */
1154 1155
	unsigned max_frag = params->adev->vm_manager.fragment_size;
	int r;
1156 1157

	/* system pages are non continuously */
1158
	if (params->src || !(flags & AMDGPU_PTE_VALID))
1159
		return amdgpu_vm_update_ptes(params, start, end, dst, flags);
1160

1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
	while (start != end) {
		uint64_t frag_flags, frag_end;
		unsigned frag;

		/* This intentionally wraps around if no bit is set */
		frag = min((unsigned)ffs(start) - 1,
			   (unsigned)fls64(end - start) - 1);
		if (frag >= max_frag) {
			frag_flags = AMDGPU_PTE_FRAG(max_frag);
			frag_end = end & ~((1ULL << max_frag) - 1);
		} else {
			frag_flags = AMDGPU_PTE_FRAG(frag);
			frag_end = start + (1 << frag);
		}

		r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
					  flags | frag_flags);
1178 1179
		if (r)
			return r;
1180

1181 1182
		dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
		start = frag_end;
1183
	}
1184 1185

	return 0;
A
Alex Deucher 已提交
1186 1187 1188 1189 1190 1191
}

/**
 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
 *
 * @adev: amdgpu_device pointer
1192
 * @exclusive: fence we need to sync to
1193
 * @pages_addr: DMA addresses to use for mapping
A
Alex Deucher 已提交
1194
 * @vm: requested vm
1195 1196 1197
 * @start: start of mapped range
 * @last: last mapped entry
 * @flags: flags for the entries
A
Alex Deucher 已提交
1198 1199 1200
 * @addr: addr to set the area to
 * @fence: optional resulting fence
 *
1201
 * Fill in the page table entries between @start and @last.
A
Alex Deucher 已提交
1202 1203 1204
 * Returns 0 for success, -EINVAL for failure.
 */
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1205
				       struct dma_fence *exclusive,
1206
				       dma_addr_t *pages_addr,
A
Alex Deucher 已提交
1207
				       struct amdgpu_vm *vm,
1208
				       uint64_t start, uint64_t last,
1209
				       uint64_t flags, uint64_t addr,
1210
				       struct dma_fence **fence)
A
Alex Deucher 已提交
1211
{
1212
	struct amdgpu_ring *ring;
1213
	void *owner = AMDGPU_FENCE_OWNER_VM;
A
Alex Deucher 已提交
1214
	unsigned nptes, ncmds, ndw;
1215
	struct amdgpu_job *job;
1216
	struct amdgpu_pte_update_params params;
1217
	struct dma_fence *f = NULL;
A
Alex Deucher 已提交
1218 1219
	int r;

1220 1221
	memset(&params, 0, sizeof(params));
	params.adev = adev;
1222
	params.vm = vm;
1223

1224 1225 1226 1227
	/* sync to everything on unmapping */
	if (!(flags & AMDGPU_PTE_VALID))
		owner = AMDGPU_FENCE_OWNER_UNDEFINED;

1228 1229 1230 1231 1232 1233 1234 1235
	if (vm->use_cpu_for_update) {
		/* params.src is used as flag to indicate system Memory */
		if (pages_addr)
			params.src = ~0;

		/* Wait for PT BOs to be free. PTs share the same resv. object
		 * as the root PD BO
		 */
1236
		r = amdgpu_vm_wait_pd(adev, vm, owner);
1237 1238 1239 1240 1241 1242 1243 1244 1245
		if (unlikely(r))
			return r;

		params.func = amdgpu_vm_cpu_set_ptes;
		params.pages_addr = pages_addr;
		return amdgpu_vm_frag_ptes(&params, start, last + 1,
					   addr, flags);
	}

1246
	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
1247

1248
	nptes = last - start + 1;
A
Alex Deucher 已提交
1249 1250

	/*
1251
	 * reserve space for two commands every (1 << BLOCK_SIZE)
A
Alex Deucher 已提交
1252
	 *  entries or 2k dwords (whatever is smaller)
1253 1254
         *
         * The second command is for the shadow pagetables.
A
Alex Deucher 已提交
1255
	 */
1256 1257 1258 1259
	if (vm->root.base.bo->shadow)
		ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
	else
		ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
A
Alex Deucher 已提交
1260 1261 1262 1263

	/* padding, etc. */
	ndw = 64;

1264
	if (pages_addr) {
1265
		/* copy commands needed */
1266
		ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
A
Alex Deucher 已提交
1267

1268
		/* and also PTEs */
A
Alex Deucher 已提交
1269 1270
		ndw += nptes * 2;

1271 1272
		params.func = amdgpu_vm_do_copy_ptes;

A
Alex Deucher 已提交
1273 1274
	} else {
		/* set page commands needed */
1275
		ndw += ncmds * 10;
A
Alex Deucher 已提交
1276

1277
		/* extra commands for begin/end fragments */
1278
		ndw += 2 * 10 * adev->vm_manager.fragment_size;
1279 1280

		params.func = amdgpu_vm_do_set_ptes;
A
Alex Deucher 已提交
1281 1282
	}

1283 1284
	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
	if (r)
A
Alex Deucher 已提交
1285
		return r;
1286

1287
	params.ib = &job->ibs[0];
C
Chunming Zhou 已提交
1288

1289
	if (pages_addr) {
1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
		uint64_t *pte;
		unsigned i;

		/* Put the PTEs at the end of the IB. */
		i = ndw - nptes * 2;
		pte= (uint64_t *)&(job->ibs->ptr[i]);
		params.src = job->ibs->gpu_addr + i * 4;

		for (i = 0; i < nptes; ++i) {
			pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
						    AMDGPU_GPU_PAGE_SIZE);
			pte[i] |= flags;
		}
1303
		addr = 0;
1304 1305
	}

1306
	r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
1307 1308 1309
	if (r)
		goto error_free;

1310
	r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
1311
			     owner, false);
1312 1313
	if (r)
		goto error_free;
A
Alex Deucher 已提交
1314

1315
	r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
1316 1317 1318
	if (r)
		goto error_free;

1319 1320 1321
	r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
	if (r)
		goto error_free;
A
Alex Deucher 已提交
1322

1323 1324
	amdgpu_ring_pad_ib(ring, params.ib);
	WARN_ON(params.ib->length_dw > ndw);
1325 1326
	r = amdgpu_job_submit(job, ring, &vm->entity,
			      AMDGPU_FENCE_OWNER_VM, &f);
1327 1328
	if (r)
		goto error_free;
A
Alex Deucher 已提交
1329

1330
	amdgpu_bo_fence(vm->root.base.bo, f, true);
1331 1332
	dma_fence_put(*fence);
	*fence = f;
A
Alex Deucher 已提交
1333
	return 0;
C
Chunming Zhou 已提交
1334 1335

error_free:
1336
	amdgpu_job_free(job);
1337
	return r;
A
Alex Deucher 已提交
1338 1339
}

1340 1341 1342 1343
/**
 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
 *
 * @adev: amdgpu_device pointer
1344
 * @exclusive: fence we need to sync to
1345
 * @pages_addr: DMA addresses to use for mapping
1346 1347
 * @vm: requested vm
 * @mapping: mapped range and flags to use for the update
1348
 * @flags: HW flags for the mapping
1349
 * @nodes: array of drm_mm_nodes with the MC addresses
1350 1351 1352 1353 1354 1355 1356
 * @fence: optional resulting fence
 *
 * Split the mapping into smaller chunks so that each update fits
 * into a SDMA IB.
 * Returns 0 for success, -EINVAL for failure.
 */
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1357
				      struct dma_fence *exclusive,
1358
				      dma_addr_t *pages_addr,
1359 1360
				      struct amdgpu_vm *vm,
				      struct amdgpu_bo_va_mapping *mapping,
1361
				      uint64_t flags,
1362
				      struct drm_mm_node *nodes,
1363
				      struct dma_fence **fence)
1364
{
1365
	unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1366
	uint64_t pfn, start = mapping->start;
1367 1368 1369 1370 1371 1372 1373 1374 1375 1376
	int r;

	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
	 * but in case of something, we filter the flags in first place
	 */
	if (!(mapping->flags & AMDGPU_PTE_READABLE))
		flags &= ~AMDGPU_PTE_READABLE;
	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
		flags &= ~AMDGPU_PTE_WRITEABLE;

1377 1378 1379
	flags &= ~AMDGPU_PTE_EXECUTABLE;
	flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;

1380 1381 1382
	flags &= ~AMDGPU_PTE_MTYPE_MASK;
	flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);

1383 1384 1385 1386 1387 1388
	if ((mapping->flags & AMDGPU_PTE_PRT) &&
	    (adev->asic_type >= CHIP_VEGA10)) {
		flags |= AMDGPU_PTE_PRT;
		flags &= ~AMDGPU_PTE_VALID;
	}

1389 1390
	trace_amdgpu_vm_bo_update(mapping);

1391 1392 1393 1394 1395 1396
	pfn = mapping->offset >> PAGE_SHIFT;
	if (nodes) {
		while (pfn >= nodes->size) {
			pfn -= nodes->size;
			++nodes;
		}
1397
	}
1398

1399
	do {
1400
		dma_addr_t *dma_addr = NULL;
1401 1402
		uint64_t max_entries;
		uint64_t addr, last;
1403

1404 1405 1406 1407 1408 1409 1410 1411
		if (nodes) {
			addr = nodes->start << PAGE_SHIFT;
			max_entries = (nodes->size - pfn) *
				(PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
		} else {
			addr = 0;
			max_entries = S64_MAX;
		}
1412

1413
		if (pages_addr) {
1414 1415
			uint64_t count;

1416
			max_entries = min(max_entries, 16ull * 1024ull);
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432
			for (count = 1; count < max_entries; ++count) {
				uint64_t idx = pfn + count;

				if (pages_addr[idx] !=
				    (pages_addr[idx - 1] + PAGE_SIZE))
					break;
			}

			if (count < min_linear_pages) {
				addr = pfn << PAGE_SHIFT;
				dma_addr = pages_addr;
			} else {
				addr = pages_addr[pfn];
				max_entries = count;
			}

1433 1434
		} else if (flags & AMDGPU_PTE_VALID) {
			addr += adev->vm_manager.vram_base_offset;
1435
			addr += pfn << PAGE_SHIFT;
1436 1437
		}

1438
		last = min((uint64_t)mapping->last, start + max_entries - 1);
1439
		r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
1440 1441 1442 1443 1444
						start, last, flags, addr,
						fence);
		if (r)
			return r;

1445 1446 1447 1448 1449
		pfn += last - start + 1;
		if (nodes && nodes->size == pfn) {
			pfn = 0;
			++nodes;
		}
1450
		start = last + 1;
1451

1452
	} while (unlikely(start != mapping->last + 1));
1453 1454 1455 1456

	return 0;
}

A
Alex Deucher 已提交
1457 1458 1459 1460 1461
/**
 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
 *
 * @adev: amdgpu_device pointer
 * @bo_va: requested BO and VM object
1462
 * @clear: if true clear the entries
A
Alex Deucher 已提交
1463 1464 1465 1466 1467 1468
 *
 * Fill in the page table entries for @bo_va.
 * Returns 0 for success, -EINVAL for failure.
 */
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
			struct amdgpu_bo_va *bo_va,
1469
			bool clear)
A
Alex Deucher 已提交
1470
{
1471 1472
	struct amdgpu_bo *bo = bo_va->base.bo;
	struct amdgpu_vm *vm = bo_va->base.vm;
A
Alex Deucher 已提交
1473
	struct amdgpu_bo_va_mapping *mapping;
1474
	dma_addr_t *pages_addr = NULL;
1475
	struct ttm_mem_reg *mem;
1476
	struct drm_mm_node *nodes;
1477
	struct dma_fence *exclusive, **last_update;
1478
	uint64_t flags;
A
Alex Deucher 已提交
1479 1480
	int r;

1481
	if (clear || !bo_va->base.bo) {
1482
		mem = NULL;
1483
		nodes = NULL;
1484 1485
		exclusive = NULL;
	} else {
1486 1487
		struct ttm_dma_tt *ttm;

1488
		mem = &bo_va->base.bo->tbo.mem;
1489 1490
		nodes = mem->mm_node;
		if (mem->mem_type == TTM_PL_TT) {
1491 1492
			ttm = container_of(bo_va->base.bo->tbo.ttm,
					   struct ttm_dma_tt, ttm);
1493
			pages_addr = ttm->dma_address;
1494
		}
1495
		exclusive = reservation_object_get_excl(bo->tbo.resv);
A
Alex Deucher 已提交
1496 1497
	}

1498
	if (bo)
1499
		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1500
	else
1501
		flags = 0x0;
A
Alex Deucher 已提交
1502

1503 1504 1505 1506 1507
	if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
		last_update = &vm->last_update;
	else
		last_update = &bo_va->last_pt_update;

1508 1509
	if (!clear && bo_va->base.moved) {
		bo_va->base.moved = false;
1510
		list_splice_init(&bo_va->valids, &bo_va->invalids);
1511

1512 1513
	} else if (bo_va->cleared != clear) {
		list_splice_init(&bo_va->valids, &bo_va->invalids);
1514
	}
1515 1516

	list_for_each_entry(mapping, &bo_va->invalids, list) {
1517
		r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
1518
					       mapping, flags, nodes,
1519
					       last_update);
A
Alex Deucher 已提交
1520 1521 1522 1523
		if (r)
			return r;
	}

1524 1525 1526
	if (vm->use_cpu_for_update) {
		/* Flush HDP */
		mb();
1527
		amdgpu_asic_flush_hdp(adev, NULL);
1528 1529
	}

A
Alex Deucher 已提交
1530
	spin_lock(&vm->status_lock);
1531
	list_del_init(&bo_va->base.vm_status);
A
Alex Deucher 已提交
1532 1533
	spin_unlock(&vm->status_lock);

1534 1535 1536 1537 1538 1539
	list_splice_init(&bo_va->invalids, &bo_va->valids);
	bo_va->cleared = clear;

	if (trace_amdgpu_vm_bo_mapping_enabled()) {
		list_for_each_entry(mapping, &bo_va->valids, list)
			trace_amdgpu_vm_bo_mapping(mapping);
1540 1541
	}

A
Alex Deucher 已提交
1542 1543 1544
	return 0;
}

1545 1546 1547 1548 1549 1550 1551 1552 1553
/**
 * amdgpu_vm_update_prt_state - update the global PRT state
 */
static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
{
	unsigned long flags;
	bool enable;

	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1554
	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1555
	adev->gmc.gmc_funcs->set_prt(adev, enable);
1556 1557 1558
	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
}

1559
/**
1560
 * amdgpu_vm_prt_get - add a PRT user
1561 1562 1563
 */
static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
{
1564
	if (!adev->gmc.gmc_funcs->set_prt)
1565 1566
		return;

1567 1568 1569 1570
	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
		amdgpu_vm_update_prt_state(adev);
}

1571 1572 1573 1574 1575
/**
 * amdgpu_vm_prt_put - drop a PRT user
 */
static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
{
1576
	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1577 1578 1579
		amdgpu_vm_update_prt_state(adev);
}

1580
/**
1581
 * amdgpu_vm_prt_cb - callback for updating the PRT status
1582 1583 1584 1585 1586
 */
static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
{
	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);

1587
	amdgpu_vm_prt_put(cb->adev);
1588 1589 1590
	kfree(cb);
}

1591 1592 1593 1594 1595 1596
/**
 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
 */
static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
				 struct dma_fence *fence)
{
1597
	struct amdgpu_prt_cb *cb;
1598

1599
	if (!adev->gmc.gmc_funcs->set_prt)
1600 1601 1602
		return;

	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1603 1604 1605 1606 1607
	if (!cb) {
		/* Last resort when we are OOM */
		if (fence)
			dma_fence_wait(fence, false);

1608
		amdgpu_vm_prt_put(adev);
1609 1610 1611 1612 1613 1614 1615 1616
	} else {
		cb->adev = adev;
		if (!fence || dma_fence_add_callback(fence, &cb->cb,
						     amdgpu_vm_prt_cb))
			amdgpu_vm_prt_cb(fence, &cb->cb);
	}
}

1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
/**
 * amdgpu_vm_free_mapping - free a mapping
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 * @mapping: mapping to be freed
 * @fence: fence of the unmap operation
 *
 * Free a mapping and make sure we decrease the PRT usage count if applicable.
 */
static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
				   struct amdgpu_vm *vm,
				   struct amdgpu_bo_va_mapping *mapping,
				   struct dma_fence *fence)
{
1632 1633 1634 1635
	if (mapping->flags & AMDGPU_PTE_PRT)
		amdgpu_vm_add_prt_cb(adev, fence);
	kfree(mapping);
}
1636

1637 1638 1639 1640 1641 1642 1643 1644 1645 1646
/**
 * amdgpu_vm_prt_fini - finish all prt mappings
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 *
 * Register a cleanup callback to disable PRT support after VM dies.
 */
static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
1647
	struct reservation_object *resv = vm->root.base.bo->tbo.resv;
1648 1649 1650
	struct dma_fence *excl, **shared;
	unsigned i, shared_count;
	int r;
1651

1652 1653 1654 1655 1656 1657 1658 1659 1660
	r = reservation_object_get_fences_rcu(resv, &excl,
					      &shared_count, &shared);
	if (r) {
		/* Not enough memory to grab the fence list, as last resort
		 * block for all the fences to complete.
		 */
		reservation_object_wait_timeout_rcu(resv, true, false,
						    MAX_SCHEDULE_TIMEOUT);
		return;
1661
	}
1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672

	/* Add a callback for each fence in the reservation object */
	amdgpu_vm_prt_get(adev);
	amdgpu_vm_add_prt_cb(adev, excl);

	for (i = 0; i < shared_count; ++i) {
		amdgpu_vm_prt_get(adev);
		amdgpu_vm_add_prt_cb(adev, shared[i]);
	}

	kfree(shared);
1673 1674
}

A
Alex Deucher 已提交
1675 1676 1677 1678 1679
/**
 * amdgpu_vm_clear_freed - clear freed BOs in the PT
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
1680 1681
 * @fence: optional resulting fence (unchanged if no work needed to be done
 * or if an error occurred)
A
Alex Deucher 已提交
1682 1683 1684 1685 1686 1687 1688
 *
 * Make sure all freed BOs are cleared in the PT.
 * Returns 0 for success.
 *
 * PTs have to be reserved and mutex must be locked!
 */
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1689 1690
			  struct amdgpu_vm *vm,
			  struct dma_fence **fence)
A
Alex Deucher 已提交
1691 1692
{
	struct amdgpu_bo_va_mapping *mapping;
1693
	uint64_t init_pte_value = 0;
1694
	struct dma_fence *f = NULL;
A
Alex Deucher 已提交
1695 1696 1697 1698 1699 1700
	int r;

	while (!list_empty(&vm->freed)) {
		mapping = list_first_entry(&vm->freed,
			struct amdgpu_bo_va_mapping, list);
		list_del(&mapping->list);
1701

1702
		if (vm->pte_support_ats && mapping->start < AMDGPU_VA_HOLE_START)
1703
			init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
Y
Yong Zhao 已提交
1704

1705
		r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
1706
						mapping->start, mapping->last,
Y
Yong Zhao 已提交
1707
						init_pte_value, 0, &f);
1708
		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1709
		if (r) {
1710
			dma_fence_put(f);
A
Alex Deucher 已提交
1711
			return r;
1712
		}
1713
	}
A
Alex Deucher 已提交
1714

1715 1716 1717 1718 1719
	if (fence && f) {
		dma_fence_put(*fence);
		*fence = f;
	} else {
		dma_fence_put(f);
A
Alex Deucher 已提交
1720
	}
1721

A
Alex Deucher 已提交
1722 1723 1724 1725 1726
	return 0;

}

/**
1727
 * amdgpu_vm_handle_moved - handle moved BOs in the PT
A
Alex Deucher 已提交
1728 1729 1730
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
1731
 * @sync: sync object to add fences to
A
Alex Deucher 已提交
1732
 *
1733
 * Make sure all BOs which are moved are updated in the PTs.
A
Alex Deucher 已提交
1734 1735
 * Returns 0 for success.
 *
1736
 * PTs have to be reserved!
A
Alex Deucher 已提交
1737
 */
1738
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1739
			   struct amdgpu_vm *vm)
A
Alex Deucher 已提交
1740
{
1741
	bool clear;
1742
	int r = 0;
A
Alex Deucher 已提交
1743 1744

	spin_lock(&vm->status_lock);
1745
	while (!list_empty(&vm->moved)) {
1746
		struct amdgpu_bo_va *bo_va;
1747
		struct reservation_object *resv;
1748

1749
		bo_va = list_first_entry(&vm->moved,
1750
			struct amdgpu_bo_va, base.vm_status);
A
Alex Deucher 已提交
1751
		spin_unlock(&vm->status_lock);
1752

1753 1754
		resv = bo_va->base.bo->tbo.resv;

1755
		/* Per VM BOs never need to bo cleared in the page tables */
1756 1757 1758
		if (resv == vm->root.base.bo->tbo.resv)
			clear = false;
		/* Try to reserve the BO to avoid clearing its ptes */
1759
		else if (!amdgpu_vm_debug && reservation_object_trylock(resv))
1760 1761 1762 1763
			clear = false;
		/* Somebody else is using the BO right now */
		else
			clear = true;
1764 1765

		r = amdgpu_vm_bo_update(adev, bo_va, clear);
A
Alex Deucher 已提交
1766 1767 1768
		if (r)
			return r;

1769 1770 1771
		if (!clear && resv != vm->root.base.bo->tbo.resv)
			reservation_object_unlock(resv);

A
Alex Deucher 已提交
1772 1773 1774 1775
		spin_lock(&vm->status_lock);
	}
	spin_unlock(&vm->status_lock);

1776
	return r;
A
Alex Deucher 已提交
1777 1778 1779 1780 1781 1782 1783 1784 1785
}

/**
 * amdgpu_vm_bo_add - add a bo to a specific vm
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 * @bo: amdgpu buffer object
 *
1786
 * Add @bo into the requested vm.
A
Alex Deucher 已提交
1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801
 * Add @bo to the list of bos associated with the vm
 * Returns newly added bo_va or NULL for failure
 *
 * Object has to be reserved!
 */
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
				      struct amdgpu_vm *vm,
				      struct amdgpu_bo *bo)
{
	struct amdgpu_bo_va *bo_va;

	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
	if (bo_va == NULL) {
		return NULL;
	}
1802 1803 1804 1805 1806
	bo_va->base.vm = vm;
	bo_va->base.bo = bo;
	INIT_LIST_HEAD(&bo_va->base.bo_list);
	INIT_LIST_HEAD(&bo_va->base.vm_status);

A
Alex Deucher 已提交
1807
	bo_va->ref_count = 1;
1808 1809
	INIT_LIST_HEAD(&bo_va->valids);
	INIT_LIST_HEAD(&bo_va->invalids);
1810

1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830
	if (!bo)
		return bo_va;

	list_add_tail(&bo_va->base.bo_list, &bo->va);

	if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
		return bo_va;

	if (bo->preferred_domains &
	    amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
		return bo_va;

	/*
	 * We checked all the prerequisites, but it looks like this per VM BO
	 * is currently evicted. add the BO to the evicted list to make sure it
	 * is validated on next VM use to avoid fault.
	 * */
	spin_lock(&vm->status_lock);
	list_move_tail(&bo_va->base.vm_status, &vm->evicted);
	spin_unlock(&vm->status_lock);
A
Alex Deucher 已提交
1831 1832 1833 1834

	return bo_va;
}

1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851

/**
 * amdgpu_vm_bo_insert_mapping - insert a new mapping
 *
 * @adev: amdgpu_device pointer
 * @bo_va: bo_va to store the address
 * @mapping: the mapping to insert
 *
 * Insert a new mapping into all structures.
 */
static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
				    struct amdgpu_bo_va *bo_va,
				    struct amdgpu_bo_va_mapping *mapping)
{
	struct amdgpu_vm *vm = bo_va->base.vm;
	struct amdgpu_bo *bo = bo_va->base.bo;

1852
	mapping->bo_va = bo_va;
1853 1854 1855 1856 1857 1858 1859 1860
	list_add(&mapping->list, &bo_va->invalids);
	amdgpu_vm_it_insert(mapping, &vm->va);

	if (mapping->flags & AMDGPU_PTE_PRT)
		amdgpu_vm_prt_get(adev);

	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
		spin_lock(&vm->status_lock);
1861 1862
		if (list_empty(&bo_va->base.vm_status))
			list_add(&bo_va->base.vm_status, &vm->moved);
1863 1864 1865 1866 1867
		spin_unlock(&vm->status_lock);
	}
	trace_amdgpu_vm_bo_map(bo_va, mapping);
}

A
Alex Deucher 已提交
1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879
/**
 * amdgpu_vm_bo_map - map bo inside a vm
 *
 * @adev: amdgpu_device pointer
 * @bo_va: bo_va to store the address
 * @saddr: where to map the BO
 * @offset: requested offset in the BO
 * @flags: attributes of pages (read/write/valid/etc.)
 *
 * Add a mapping of the BO at the specefied addr into the VM.
 * Returns 0 for success, error for failure.
 *
1880
 * Object has to be reserved and unreserved outside!
A
Alex Deucher 已提交
1881 1882 1883 1884
 */
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
		     struct amdgpu_bo_va *bo_va,
		     uint64_t saddr, uint64_t offset,
1885
		     uint64_t size, uint64_t flags)
A
Alex Deucher 已提交
1886
{
1887
	struct amdgpu_bo_va_mapping *mapping, *tmp;
1888 1889
	struct amdgpu_bo *bo = bo_va->base.bo;
	struct amdgpu_vm *vm = bo_va->base.vm;
A
Alex Deucher 已提交
1890 1891
	uint64_t eaddr;

1892 1893
	/* validate the parameters */
	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1894
	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1895 1896
		return -EINVAL;

A
Alex Deucher 已提交
1897
	/* make sure object fit at this offset */
1898
	eaddr = saddr + size - 1;
1899
	if (saddr >= eaddr ||
1900
	    (bo && offset + size > amdgpu_bo_size(bo)))
A
Alex Deucher 已提交
1901 1902 1903 1904 1905
		return -EINVAL;

	saddr /= AMDGPU_GPU_PAGE_SIZE;
	eaddr /= AMDGPU_GPU_PAGE_SIZE;

1906 1907
	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
	if (tmp) {
A
Alex Deucher 已提交
1908 1909
		/* bo and tmp overlap, invalid addr */
		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1910
			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1911
			tmp->start, tmp->last + 1);
1912
		return -EINVAL;
A
Alex Deucher 已提交
1913 1914 1915
	}

	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1916 1917
	if (!mapping)
		return -ENOMEM;
A
Alex Deucher 已提交
1918

1919 1920
	mapping->start = saddr;
	mapping->last = eaddr;
A
Alex Deucher 已提交
1921 1922 1923
	mapping->offset = offset;
	mapping->flags = flags;

1924
	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949

	return 0;
}

/**
 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
 *
 * @adev: amdgpu_device pointer
 * @bo_va: bo_va to store the address
 * @saddr: where to map the BO
 * @offset: requested offset in the BO
 * @flags: attributes of pages (read/write/valid/etc.)
 *
 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
 * mappings as we do so.
 * Returns 0 for success, error for failure.
 *
 * Object has to be reserved and unreserved outside!
 */
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
			     struct amdgpu_bo_va *bo_va,
			     uint64_t saddr, uint64_t offset,
			     uint64_t size, uint64_t flags)
{
	struct amdgpu_bo_va_mapping *mapping;
1950
	struct amdgpu_bo *bo = bo_va->base.bo;
1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961
	uint64_t eaddr;
	int r;

	/* validate the parameters */
	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
		return -EINVAL;

	/* make sure object fit at this offset */
	eaddr = saddr + size - 1;
	if (saddr >= eaddr ||
1962
	    (bo && offset + size > amdgpu_bo_size(bo)))
1963 1964 1965 1966 1967 1968 1969
		return -EINVAL;

	/* Allocate all the needed memory */
	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
	if (!mapping)
		return -ENOMEM;

1970
	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1971 1972 1973 1974 1975 1976 1977 1978
	if (r) {
		kfree(mapping);
		return r;
	}

	saddr /= AMDGPU_GPU_PAGE_SIZE;
	eaddr /= AMDGPU_GPU_PAGE_SIZE;

1979 1980
	mapping->start = saddr;
	mapping->last = eaddr;
1981 1982 1983
	mapping->offset = offset;
	mapping->flags = flags;

1984
	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1985

A
Alex Deucher 已提交
1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998
	return 0;
}

/**
 * amdgpu_vm_bo_unmap - remove bo mapping from vm
 *
 * @adev: amdgpu_device pointer
 * @bo_va: bo_va to remove the address from
 * @saddr: where to the BO is mapped
 *
 * Remove a mapping of the BO at the specefied addr from the VM.
 * Returns 0 for success, error for failure.
 *
1999
 * Object has to be reserved and unreserved outside!
A
Alex Deucher 已提交
2000 2001 2002 2003 2004 2005
 */
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
		       struct amdgpu_bo_va *bo_va,
		       uint64_t saddr)
{
	struct amdgpu_bo_va_mapping *mapping;
2006
	struct amdgpu_vm *vm = bo_va->base.vm;
2007
	bool valid = true;
A
Alex Deucher 已提交
2008

2009
	saddr /= AMDGPU_GPU_PAGE_SIZE;
2010

2011
	list_for_each_entry(mapping, &bo_va->valids, list) {
2012
		if (mapping->start == saddr)
A
Alex Deucher 已提交
2013 2014 2015
			break;
	}

2016 2017 2018 2019
	if (&mapping->list == &bo_va->valids) {
		valid = false;

		list_for_each_entry(mapping, &bo_va->invalids, list) {
2020
			if (mapping->start == saddr)
2021 2022 2023
				break;
		}

2024
		if (&mapping->list == &bo_va->invalids)
2025
			return -ENOENT;
A
Alex Deucher 已提交
2026
	}
2027

A
Alex Deucher 已提交
2028
	list_del(&mapping->list);
2029
	amdgpu_vm_it_remove(mapping, &vm->va);
2030
	mapping->bo_va = NULL;
2031
	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
A
Alex Deucher 已提交
2032

2033
	if (valid)
A
Alex Deucher 已提交
2034
		list_add(&mapping->list, &vm->freed);
2035
	else
2036 2037
		amdgpu_vm_free_mapping(adev, vm, mapping,
				       bo_va->last_pt_update);
A
Alex Deucher 已提交
2038 2039 2040 2041

	return 0;
}

2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068
/**
 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
 *
 * @adev: amdgpu_device pointer
 * @vm: VM structure to use
 * @saddr: start of the range
 * @size: size of the range
 *
 * Remove all mappings in a range, split them as appropriate.
 * Returns 0 for success, error for failure.
 */
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
				struct amdgpu_vm *vm,
				uint64_t saddr, uint64_t size)
{
	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
	LIST_HEAD(removed);
	uint64_t eaddr;

	eaddr = saddr + size - 1;
	saddr /= AMDGPU_GPU_PAGE_SIZE;
	eaddr /= AMDGPU_GPU_PAGE_SIZE;

	/* Allocate all the needed memory */
	before = kzalloc(sizeof(*before), GFP_KERNEL);
	if (!before)
		return -ENOMEM;
2069
	INIT_LIST_HEAD(&before->list);
2070 2071 2072 2073 2074 2075

	after = kzalloc(sizeof(*after), GFP_KERNEL);
	if (!after) {
		kfree(before);
		return -ENOMEM;
	}
2076
	INIT_LIST_HEAD(&after->list);
2077 2078

	/* Now gather all removed mappings */
2079 2080
	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
	while (tmp) {
2081
		/* Remember mapping split at the start */
2082 2083 2084
		if (tmp->start < saddr) {
			before->start = tmp->start;
			before->last = saddr - 1;
2085 2086 2087 2088 2089 2090
			before->offset = tmp->offset;
			before->flags = tmp->flags;
			list_add(&before->list, &tmp->list);
		}

		/* Remember mapping split at the end */
2091 2092 2093
		if (tmp->last > eaddr) {
			after->start = eaddr + 1;
			after->last = tmp->last;
2094
			after->offset = tmp->offset;
2095
			after->offset += after->start - tmp->start;
2096 2097 2098 2099 2100 2101
			after->flags = tmp->flags;
			list_add(&after->list, &tmp->list);
		}

		list_del(&tmp->list);
		list_add(&tmp->list, &removed);
2102 2103

		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2104 2105 2106 2107
	}

	/* And free them up */
	list_for_each_entry_safe(tmp, next, &removed, list) {
2108
		amdgpu_vm_it_remove(tmp, &vm->va);
2109 2110
		list_del(&tmp->list);

2111 2112 2113 2114
		if (tmp->start < saddr)
		    tmp->start = saddr;
		if (tmp->last > eaddr)
		    tmp->last = eaddr;
2115

2116
		tmp->bo_va = NULL;
2117 2118 2119 2120
		list_add(&tmp->list, &vm->freed);
		trace_amdgpu_vm_bo_unmap(NULL, tmp);
	}

2121 2122
	/* Insert partial mapping before the range */
	if (!list_empty(&before->list)) {
2123
		amdgpu_vm_it_insert(before, &vm->va);
2124 2125 2126 2127 2128 2129 2130
		if (before->flags & AMDGPU_PTE_PRT)
			amdgpu_vm_prt_get(adev);
	} else {
		kfree(before);
	}

	/* Insert partial mapping after the range */
2131
	if (!list_empty(&after->list)) {
2132
		amdgpu_vm_it_insert(after, &vm->va);
2133 2134 2135 2136 2137 2138 2139 2140 2141
		if (after->flags & AMDGPU_PTE_PRT)
			amdgpu_vm_prt_get(adev);
	} else {
		kfree(after);
	}

	return 0;
}

2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154
/**
 * amdgpu_vm_bo_lookup_mapping - find mapping by address
 *
 * @vm: the requested VM
 *
 * Find a mapping by it's address.
 */
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
							 uint64_t addr)
{
	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
}

A
Alex Deucher 已提交
2155 2156 2157 2158 2159 2160
/**
 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
 *
 * @adev: amdgpu_device pointer
 * @bo_va: requested bo_va
 *
2161
 * Remove @bo_va->bo from the requested vm.
A
Alex Deucher 已提交
2162 2163 2164 2165 2166 2167 2168
 *
 * Object have to be reserved!
 */
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
		      struct amdgpu_bo_va *bo_va)
{
	struct amdgpu_bo_va_mapping *mapping, *next;
2169
	struct amdgpu_vm *vm = bo_va->base.vm;
A
Alex Deucher 已提交
2170

2171
	list_del(&bo_va->base.bo_list);
A
Alex Deucher 已提交
2172 2173

	spin_lock(&vm->status_lock);
2174
	list_del(&bo_va->base.vm_status);
A
Alex Deucher 已提交
2175 2176
	spin_unlock(&vm->status_lock);

2177
	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
A
Alex Deucher 已提交
2178
		list_del(&mapping->list);
2179
		amdgpu_vm_it_remove(mapping, &vm->va);
2180
		mapping->bo_va = NULL;
2181
		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2182 2183 2184 2185
		list_add(&mapping->list, &vm->freed);
	}
	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
		list_del(&mapping->list);
2186
		amdgpu_vm_it_remove(mapping, &vm->va);
2187 2188
		amdgpu_vm_free_mapping(adev, vm, mapping,
				       bo_va->last_pt_update);
A
Alex Deucher 已提交
2189
	}
2190

2191
	dma_fence_put(bo_va->last_pt_update);
A
Alex Deucher 已提交
2192 2193 2194 2195 2196 2197 2198 2199 2200 2201
	kfree(bo_va);
}

/**
 * amdgpu_vm_bo_invalidate - mark the bo as invalid
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 * @bo: amdgpu buffer object
 *
2202
 * Mark @bo as invalid.
A
Alex Deucher 已提交
2203 2204
 */
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2205
			     struct amdgpu_bo *bo, bool evicted)
A
Alex Deucher 已提交
2206
{
2207 2208 2209
	struct amdgpu_vm_bo_base *bo_base;

	list_for_each_entry(bo_base, &bo->va, bo_list) {
2210 2211
		struct amdgpu_vm *vm = bo_base->vm;

2212
		bo_base->moved = true;
2213 2214
		if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
			spin_lock(&bo_base->vm->status_lock);
2215 2216 2217 2218 2219
			if (bo->tbo.type == ttm_bo_type_kernel)
				list_move(&bo_base->vm_status, &vm->evicted);
			else
				list_move_tail(&bo_base->vm_status,
					       &vm->evicted);
2220 2221 2222 2223
			spin_unlock(&bo_base->vm->status_lock);
			continue;
		}

2224 2225 2226 2227 2228
		if (bo->tbo.type == ttm_bo_type_kernel) {
			spin_lock(&bo_base->vm->status_lock);
			if (list_empty(&bo_base->vm_status))
				list_add(&bo_base->vm_status, &vm->relocated);
			spin_unlock(&bo_base->vm->status_lock);
2229
			continue;
2230
		}
2231

2232 2233
		spin_lock(&bo_base->vm->status_lock);
		if (list_empty(&bo_base->vm_status))
2234
			list_add(&bo_base->vm_status, &vm->moved);
2235
		spin_unlock(&bo_base->vm->status_lock);
A
Alex Deucher 已提交
2236 2237 2238
	}
}

2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251
static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
{
	/* Total bits covered by PD + PTs */
	unsigned bits = ilog2(vm_size) + 18;

	/* Make sure the PD is 4K in size up to 8GB address space.
	   Above that split equal between PD and PTs */
	if (vm_size <= 8)
		return (bits - 9);
	else
		return ((bits + 3) / 2);
}

2252 2253
/**
 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2254 2255 2256 2257
 *
 * @adev: amdgpu_device pointer
 * @vm_size: the default vm size if it's set auto
 */
2258
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
2259 2260
			   uint32_t fragment_size_default, unsigned max_level,
			   unsigned max_bits)
2261
{
2262 2263 2264
	uint64_t tmp;

	/* adjust vm size first */
2265 2266 2267
	if (amdgpu_vm_size != -1) {
		unsigned max_size = 1 << (max_bits - 30);

2268
		vm_size = amdgpu_vm_size;
2269 2270 2271 2272 2273 2274
		if (vm_size > max_size) {
			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
				 amdgpu_vm_size, max_size);
			vm_size = max_size;
		}
	}
2275 2276

	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2277 2278

	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2279 2280
	if (amdgpu_vm_block_size != -1)
		tmp >>= amdgpu_vm_block_size - 9;
2281 2282
	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
	adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295
	switch (adev->vm_manager.num_level) {
	case 3:
		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
		break;
	case 2:
		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
		break;
	case 1:
		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
		break;
	default:
		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
	}
2296
	/* block size depends on vm size and hw setup*/
2297
	if (amdgpu_vm_block_size != -1)
2298
		adev->vm_manager.block_size =
2299 2300 2301 2302 2303
			min((unsigned)amdgpu_vm_block_size, max_bits
			    - AMDGPU_GPU_PAGE_SHIFT
			    - 9 * adev->vm_manager.num_level);
	else if (adev->vm_manager.num_level > 1)
		adev->vm_manager.block_size = 9;
2304
	else
2305
		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2306

2307 2308 2309 2310
	if (amdgpu_vm_fragment_size == -1)
		adev->vm_manager.fragment_size = fragment_size_default;
	else
		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2311

2312 2313 2314
	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
		 vm_size, adev->vm_manager.num_level + 1,
		 adev->vm_manager.block_size,
2315
		 adev->vm_manager.fragment_size);
2316 2317
}

A
Alex Deucher 已提交
2318 2319 2320 2321 2322
/**
 * amdgpu_vm_init - initialize a vm instance
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
2323
 * @vm_context: Indicates if it GFX or Compute context
A
Alex Deucher 已提交
2324
 *
2325
 * Init @vm fields.
A
Alex Deucher 已提交
2326
 */
2327
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2328
		   int vm_context, unsigned int pasid)
A
Alex Deucher 已提交
2329 2330
{
	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2331
		AMDGPU_VM_PTE_COUNT(adev) * 8);
2332 2333
	unsigned ring_instance;
	struct amdgpu_ring *ring;
2334
	struct drm_sched_rq *rq;
2335
	unsigned long size;
2336
	uint64_t flags;
2337
	int r, i;
A
Alex Deucher 已提交
2338

2339
	vm->va = RB_ROOT_CACHED;
2340 2341
	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
		vm->reserved_vmid[i] = NULL;
A
Alex Deucher 已提交
2342
	spin_lock_init(&vm->status_lock);
2343
	INIT_LIST_HEAD(&vm->evicted);
2344
	INIT_LIST_HEAD(&vm->relocated);
2345
	INIT_LIST_HEAD(&vm->moved);
A
Alex Deucher 已提交
2346
	INIT_LIST_HEAD(&vm->freed);
2347

2348
	/* create scheduler entity for page table updates */
2349 2350 2351 2352

	ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
	ring_instance %= adev->vm_manager.vm_pte_num_rings;
	ring = adev->vm_manager.vm_pte_rings[ring_instance];
2353 2354
	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
	r = drm_sched_entity_init(&ring->sched, &vm->entity,
2355
				  rq, amdgpu_sched_jobs, NULL);
2356
	if (r)
2357
		return r;
2358

Y
Yong Zhao 已提交
2359 2360 2361
	vm->pte_support_ats = false;

	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2362 2363
		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
						AMDGPU_VM_USE_CPU_FOR_COMPUTE);
Y
Yong Zhao 已提交
2364

2365
		if (adev->asic_type == CHIP_RAVEN)
Y
Yong Zhao 已提交
2366
			vm->pte_support_ats = true;
2367
	} else {
2368 2369
		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
						AMDGPU_VM_USE_CPU_FOR_GFX);
2370
	}
2371 2372 2373 2374
	DRM_DEBUG_DRIVER("VM update mode is %s\n",
			 vm->use_cpu_for_update ? "CPU" : "SDMA");
	WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
		  "CPU update of VM recommended only for large BAR system\n");
2375
	vm->last_update = NULL;
2376

2377
	flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
2378 2379 2380 2381 2382 2383
	if (vm->use_cpu_for_update)
		flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
	else
		flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
				AMDGPU_GEM_CREATE_SHADOW);

2384 2385
	size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
	r = amdgpu_bo_create(adev, size, align, true, AMDGPU_GEM_DOMAIN_VRAM,
2386
			     flags, NULL, NULL, &vm->root.base.bo);
A
Alex Deucher 已提交
2387
	if (r)
2388 2389
		goto error_free_sched_entity;

2390 2391 2392 2393
	r = amdgpu_bo_reserve(vm->root.base.bo, true);
	if (r)
		goto error_free_root;

2394
	r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
2395 2396
			       adev->vm_manager.root_level,
			       vm->pte_support_ats);
2397 2398 2399
	if (r)
		goto error_unreserve;

2400 2401
	vm->root.base.vm = vm;
	list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
2402 2403
	list_add_tail(&vm->root.base.vm_status, &vm->evicted);
	amdgpu_bo_unreserve(vm->root.base.bo);
A
Alex Deucher 已提交
2404

2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415
	if (pasid) {
		unsigned long flags;

		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
		r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
			      GFP_ATOMIC);
		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
		if (r < 0)
			goto error_free_root;

		vm->pasid = pasid;
2416 2417
	}

2418
	INIT_KFIFO(vm->faults);
2419
	vm->fault_credit = 16;
A
Alex Deucher 已提交
2420 2421

	return 0;
2422

2423 2424 2425
error_unreserve:
	amdgpu_bo_unreserve(vm->root.base.bo);

2426
error_free_root:
2427 2428 2429
	amdgpu_bo_unref(&vm->root.base.bo->shadow);
	amdgpu_bo_unref(&vm->root.base.bo);
	vm->root.base.bo = NULL;
2430 2431

error_free_sched_entity:
2432
	drm_sched_entity_fini(&ring->sched, &vm->entity);
2433 2434

	return r;
A
Alex Deucher 已提交
2435 2436
}

2437 2438 2439
/**
 * amdgpu_vm_free_levels - free PD/PT levels
 *
2440 2441 2442
 * @adev: amdgpu device structure
 * @parent: PD/PT starting level to free
 * @level: level of parent structure
2443 2444 2445
 *
 * Free the page directory or page table level and all sub levels.
 */
2446 2447 2448
static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
				  struct amdgpu_vm_pt *parent,
				  unsigned level)
2449
{
2450
	unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
2451

2452 2453 2454 2455 2456
	if (parent->base.bo) {
		list_del(&parent->base.bo_list);
		list_del(&parent->base.vm_status);
		amdgpu_bo_unref(&parent->base.bo->shadow);
		amdgpu_bo_unref(&parent->base.bo);
2457 2458
	}

2459 2460 2461 2462
	if (parent->entries)
		for (i = 0; i < num_entries; i++)
			amdgpu_vm_free_levels(adev, &parent->entries[i],
					      level + 1);
2463

2464
	kvfree(parent->entries);
2465 2466
}

A
Alex Deucher 已提交
2467 2468 2469 2470 2471 2472
/**
 * amdgpu_vm_fini - tear down a vm instance
 *
 * @adev: amdgpu_device pointer
 * @vm: requested vm
 *
2473
 * Tear down @vm.
A
Alex Deucher 已提交
2474 2475 2476 2477 2478
 * Unbind the VM and remove all bos from the vm bo list
 */
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
	struct amdgpu_bo_va_mapping *mapping, *tmp;
2479
	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2480
	struct amdgpu_bo *root;
2481
	u64 fault;
2482
	int i, r;
A
Alex Deucher 已提交
2483

2484 2485 2486 2487
	/* Clear pending page faults from IH when the VM is destroyed */
	while (kfifo_get(&vm->faults, &fault))
		amdgpu_ih_clear_fault(adev, fault);

2488 2489 2490 2491 2492 2493 2494 2495
	if (vm->pasid) {
		unsigned long flags;

		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
	}

2496
	drm_sched_entity_fini(vm->entity.sched, &vm->entity);
2497

2498
	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
A
Alex Deucher 已提交
2499 2500
		dev_err(adev->dev, "still active bo inside vm\n");
	}
2501 2502
	rbtree_postorder_for_each_entry_safe(mapping, tmp,
					     &vm->va.rb_root, rb) {
A
Alex Deucher 已提交
2503
		list_del(&mapping->list);
2504
		amdgpu_vm_it_remove(mapping, &vm->va);
A
Alex Deucher 已提交
2505 2506 2507
		kfree(mapping);
	}
	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2508
		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2509
			amdgpu_vm_prt_fini(adev, vm);
2510
			prt_fini_needed = false;
2511
		}
2512

A
Alex Deucher 已提交
2513
		list_del(&mapping->list);
2514
		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
A
Alex Deucher 已提交
2515 2516
	}

2517 2518 2519 2520 2521
	root = amdgpu_bo_ref(vm->root.base.bo);
	r = amdgpu_bo_reserve(root, true);
	if (r) {
		dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
	} else {
2522 2523
		amdgpu_vm_free_levels(adev, &vm->root,
				      adev->vm_manager.root_level);
2524 2525 2526
		amdgpu_bo_unreserve(root);
	}
	amdgpu_bo_unref(&root);
2527
	dma_fence_put(vm->last_update);
2528
	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2529
		amdgpu_vmid_free_reserved(adev, vm, i);
A
Alex Deucher 已提交
2530
}
2531

2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547
/**
 * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
 *
 * @adev: amdgpu_device pointer
 * @pasid: PASID do identify the VM
 *
 * This function is expected to be called in interrupt context. Returns
 * true if there was fault credit, false otherwise
 */
bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
				  unsigned int pasid)
{
	struct amdgpu_vm *vm;

	spin_lock(&adev->vm_manager.pasid_lock);
	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
2548
	if (!vm) {
2549
		/* VM not found, can't track fault credit */
2550
		spin_unlock(&adev->vm_manager.pasid_lock);
2551
		return true;
2552
	}
2553 2554

	/* No lock needed. only accessed by IRQ handler */
2555
	if (!vm->fault_credit) {
2556
		/* Too many faults in this VM */
2557
		spin_unlock(&adev->vm_manager.pasid_lock);
2558
		return false;
2559
	}
2560 2561

	vm->fault_credit--;
2562
	spin_unlock(&adev->vm_manager.pasid_lock);
2563 2564 2565
	return true;
}

2566 2567 2568 2569 2570 2571 2572 2573 2574
/**
 * amdgpu_vm_manager_init - init the VM manager
 *
 * @adev: amdgpu_device pointer
 *
 * Initialize the VM manager structures
 */
void amdgpu_vm_manager_init(struct amdgpu_device *adev)
{
2575
	unsigned i;
2576

2577
	amdgpu_vmid_mgr_init(adev);
2578

2579 2580
	adev->vm_manager.fence_context =
		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2581 2582 2583
	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
		adev->vm_manager.seqno[i] = 0;

2584
	atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
2585
	spin_lock_init(&adev->vm_manager.prt_lock);
2586
	atomic_set(&adev->vm_manager.num_prt_users, 0);
2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603

	/* If not overridden by the user, by default, only in large BAR systems
	 * Compute VM tables will be updated by CPU
	 */
#ifdef CONFIG_X86_64
	if (amdgpu_vm_update_mode == -1) {
		if (amdgpu_vm_is_large_bar(adev))
			adev->vm_manager.vm_update_mode =
				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
		else
			adev->vm_manager.vm_update_mode = 0;
	} else
		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
#else
	adev->vm_manager.vm_update_mode = 0;
#endif

2604 2605
	idr_init(&adev->vm_manager.pasid_idr);
	spin_lock_init(&adev->vm_manager.pasid_lock);
2606 2607
}

2608 2609 2610 2611 2612 2613 2614 2615 2616
/**
 * amdgpu_vm_manager_fini - cleanup VM manager
 *
 * @adev: amdgpu_device pointer
 *
 * Cleanup the VM manager and free resources.
 */
void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
{
2617 2618 2619
	WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
	idr_destroy(&adev->vm_manager.pasid_idr);

2620
	amdgpu_vmid_mgr_fini(adev);
2621
}
C
Chunming Zhou 已提交
2622 2623 2624 2625

int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
	union drm_amdgpu_vm *args = data;
2626 2627 2628
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_fpriv *fpriv = filp->driver_priv;
	int r;
C
Chunming Zhou 已提交
2629 2630 2631

	switch (args->in.op) {
	case AMDGPU_VM_OP_RESERVE_VMID:
2632
		/* current, we only have requirement to reserve vmid from gfxhub */
2633
		r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
2634 2635 2636
		if (r)
			return r;
		break;
C
Chunming Zhou 已提交
2637
	case AMDGPU_VM_OP_UNRESERVE_VMID:
2638
		amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
C
Chunming Zhou 已提交
2639 2640 2641 2642 2643 2644 2645
		break;
	default:
		return -EINVAL;
	}

	return 0;
}