amdgpu_ttm.c 66.2 KB
Newer Older
A
Alex Deucher 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * Copyright 2009 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */
/*
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 *    Dave Airlie
 */
32

33
#include <linux/dma-mapping.h>
34
#include <linux/iommu.h>
35
#include <linux/hmm.h>
36 37
#include <linux/pagemap.h>
#include <linux/sched/task.h>
38
#include <linux/sched/mm.h>
39 40 41 42
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/swiotlb.h>
43
#include <linux/dma-buf.h>
44

45 46 47 48 49
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_page_alloc.h>
50 51

#include <drm/drm_debugfs.h>
A
Alex Deucher 已提交
52
#include <drm/amdgpu_drm.h>
53

A
Alex Deucher 已提交
54
#include "amdgpu.h"
55
#include "amdgpu_object.h"
56
#include "amdgpu_trace.h"
57
#include "amdgpu_amdkfd.h"
58
#include "amdgpu_sdma.h"
59
#include "amdgpu_ras.h"
A
Alex Deucher 已提交
60 61
#include "bif/bif_4_1_d.h"

62 63 64 65 66 67
static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
			     struct ttm_mem_reg *mem, unsigned num_pages,
			     uint64_t offset, unsigned window,
			     struct amdgpu_ring *ring,
			     uint64_t *addr);

A
Alex Deucher 已提交
68 69 70 71 72 73 74 75
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);

static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
	return 0;
}

76
/**
77 78
 * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
 * memory request.
79
 *
80 81 82
 * @bdev: The TTM BO device object (contains a reference to amdgpu_device)
 * @type: The type of memory requested
 * @man: The memory type manager for each domain
83 84 85 86
 *
 * This is called by ttm_bo_init_mm() when a buffer object is being
 * initialized.
 */
A
Alex Deucher 已提交
87 88 89 90 91
static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
				struct ttm_mem_type_manager *man)
{
	struct amdgpu_device *adev;

92
	adev = amdgpu_ttm_adev(bdev);
A
Alex Deucher 已提交
93 94 95 96 97 98 99 100 101

	switch (type) {
	case TTM_PL_SYSTEM:
		/* System memory */
		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
		man->available_caching = TTM_PL_MASK_CACHING;
		man->default_caching = TTM_PL_FLAG_CACHED;
		break;
	case TTM_PL_TT:
102
		/* GTT memory  */
103
		man->func = &amdgpu_gtt_mgr_func;
104
		man->gpu_offset = adev->gmc.gart_start;
A
Alex Deucher 已提交
105 106 107 108 109 110
		man->available_caching = TTM_PL_MASK_CACHING;
		man->default_caching = TTM_PL_FLAG_CACHED;
		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
		break;
	case TTM_PL_VRAM:
		/* "On-card" video ram */
C
Christian König 已提交
111
		man->func = &amdgpu_vram_mgr_func;
112
		man->gpu_offset = adev->gmc.vram_start;
A
Alex Deucher 已提交
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
		man->flags = TTM_MEMTYPE_FLAG_FIXED |
			     TTM_MEMTYPE_FLAG_MAPPABLE;
		man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
		man->default_caching = TTM_PL_FLAG_WC;
		break;
	case AMDGPU_PL_GDS:
	case AMDGPU_PL_GWS:
	case AMDGPU_PL_OA:
		/* On-chip GDS memory*/
		man->func = &ttm_bo_manager_func;
		man->gpu_offset = 0;
		man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA;
		man->available_caching = TTM_PL_FLAG_UNCACHED;
		man->default_caching = TTM_PL_FLAG_UNCACHED;
		break;
	default:
		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
		return -EINVAL;
	}
	return 0;
}

135 136 137 138 139 140 141 142
/**
 * amdgpu_evict_flags - Compute placement flags
 *
 * @bo: The buffer object to evict
 * @placement: Possible destination(s) for evicted BO
 *
 * Fill in placement data when ttm_bo_evict() is called
 */
A
Alex Deucher 已提交
143 144 145
static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
				struct ttm_placement *placement)
{
146
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
147
	struct amdgpu_bo *abo;
148
	static const struct ttm_place placements = {
A
Alex Deucher 已提交
149 150 151 152 153
		.fpfn = 0,
		.lpfn = 0,
		.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
	};

154
	/* Don't handle scatter gather BOs */
155 156 157 158 159 160
	if (bo->type == ttm_bo_type_sg) {
		placement->num_placement = 0;
		placement->num_busy_placement = 0;
		return;
	}

161
	/* Object isn't an AMDGPU object so ignore */
162
	if (!amdgpu_bo_is_amdgpu_bo(bo)) {
A
Alex Deucher 已提交
163 164 165 166 167 168
		placement->placement = &placements;
		placement->busy_placement = &placements;
		placement->num_placement = 1;
		placement->num_busy_placement = 1;
		return;
	}
169

170
	abo = ttm_to_amdgpu_bo(bo);
A
Alex Deucher 已提交
171
	switch (bo->mem.mem_type) {
172 173 174 175 176 177 178
	case AMDGPU_PL_GDS:
	case AMDGPU_PL_GWS:
	case AMDGPU_PL_OA:
		placement->num_placement = 0;
		placement->num_busy_placement = 0;
		return;

A
Alex Deucher 已提交
179
	case TTM_PL_VRAM:
180
		if (!adev->mman.buffer_funcs_enabled) {
181
			/* Move to system memory */
182
			amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
183
		} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
184 185
			   !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
			   amdgpu_bo_in_cpu_visible_vram(abo)) {
186 187 188 189 190 191

			/* Try evicting to the CPU inaccessible part of VRAM
			 * first, but only set GTT as busy placement, so this
			 * BO will be evicted to GTT rather than causing other
			 * BOs to be evicted from VRAM
			 */
192
			amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
193
							 AMDGPU_GEM_DOMAIN_GTT);
194
			abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
195 196 197
			abo->placements[0].lpfn = 0;
			abo->placement.busy_placement = &abo->placements[1];
			abo->placement.num_busy_placement = 1;
198
		} else {
199
			/* Move to GTT memory */
200
			amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
201
		}
A
Alex Deucher 已提交
202 203 204
		break;
	case TTM_PL_TT:
	default:
205
		amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
206
		break;
A
Alex Deucher 已提交
207
	}
208
	*placement = abo->placement;
A
Alex Deucher 已提交
209 210
}

211 212 213
/**
 * amdgpu_verify_access - Verify access for a mmap call
 *
214 215
 * @bo:	The buffer object to map
 * @filp: The file pointer from the process performing the mmap
216 217 218 219
 *
 * This is called by ttm_bo_mmap() to verify whether a process
 * has the right to mmap a BO to their process space.
 */
A
Alex Deucher 已提交
220 221
static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
222
	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
A
Alex Deucher 已提交
223

224 225 226 227 228 229 230
	/*
	 * Don't verify access for KFD BOs. They don't have a GEM
	 * object associated with them.
	 */
	if (abo->kfd_bo)
		return 0;

231 232
	if (amdgpu_ttm_tt_get_usermm(bo->ttm))
		return -EPERM;
233
	return drm_vma_node_verify_access(&abo->tbo.base.vma_node,
D
David Herrmann 已提交
234
					  filp->private_data);
A
Alex Deucher 已提交
235 236
}

237 238 239
/**
 * amdgpu_move_null - Register memory for a buffer object
 *
240 241
 * @bo: The bo to assign the memory to
 * @new_mem: The memory to be assigned.
242
 *
243
 * Assign the memory from new_mem to the memory of the buffer object bo.
244
 */
A
Alex Deucher 已提交
245 246 247 248 249 250 251 252 253 254
static void amdgpu_move_null(struct ttm_buffer_object *bo,
			     struct ttm_mem_reg *new_mem)
{
	struct ttm_mem_reg *old_mem = &bo->mem;

	BUG_ON(old_mem->mm_node != NULL);
	*old_mem = *new_mem;
	new_mem->mm_node = NULL;
}

255
/**
256 257 258 259 260 261
 * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
 *
 * @bo: The bo to assign the memory to.
 * @mm_node: Memory manager node for drm allocator.
 * @mem: The region where the bo resides.
 *
262
 */
263 264 265
static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
				    struct drm_mm_node *mm_node,
				    struct ttm_mem_reg *mem)
A
Alex Deucher 已提交
266
{
267
	uint64_t addr = 0;
268

269
	if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) {
270 271 272
		addr = mm_node->start << PAGE_SHIFT;
		addr += bo->bdev->man[mem->mem_type].gpu_offset;
	}
273
	return addr;
274 275
}

276
/**
277 278 279 280 281 282
 * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
 * @offset. It also modifies the offset to be within the drm_mm_node returned
 *
 * @mem: The region where the bo resides.
 * @offset: The offset that drm_mm_node is used for finding.
 *
283 284 285
 */
static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
					       unsigned long *offset)
286
{
287
	struct drm_mm_node *mm_node = mem->mm_node;
288

289 290 291 292 293 294
	while (*offset >= (mm_node->size << PAGE_SHIFT)) {
		*offset -= (mm_node->size << PAGE_SHIFT);
		++mm_node;
	}
	return mm_node;
}
295

296 297
/**
 * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
298 299 300 301 302 303 304 305 306 307 308
 *
 * The function copies @size bytes from {src->mem + src->offset} to
 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
 * move and different for a BO to BO copy.
 *
 * @f: Returns the last fence if multiple jobs are submitted.
 */
int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
			       struct amdgpu_copy_mem *src,
			       struct amdgpu_copy_mem *dst,
			       uint64_t size,
309
			       struct dma_resv *resv,
310
			       struct dma_fence **f)
311 312
{
	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
313 314 315
	struct drm_mm_node *src_mm, *dst_mm;
	uint64_t src_node_start, dst_node_start, src_node_size,
		 dst_node_size, src_page_offset, dst_page_offset;
316
	struct dma_fence *fence = NULL;
317 318 319
	int r = 0;
	const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
					AMDGPU_GPU_PAGE_SIZE);
320

321
	if (!adev->mman.buffer_funcs_enabled) {
A
Alex Deucher 已提交
322 323 324 325
		DRM_ERROR("Trying to move memory with ring turned off.\n");
		return -EINVAL;
	}

326
	src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
327 328 329 330
	src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
					     src->offset;
	src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
	src_page_offset = src_node_start & (PAGE_SIZE - 1);
331

332
	dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
333 334 335 336
	dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
					     dst->offset;
	dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
	dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
337

338
	mutex_lock(&adev->mman.gtt_window_lock);
339 340 341 342

	while (size) {
		unsigned long cur_size;
		uint64_t from = src_node_start, to = dst_node_start;
343
		struct dma_fence *next;
344

345 346 347 348 349 350 351 352 353 354 355 356
		/* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
		 * begins at an offset, then adjust the size accordingly
		 */
		cur_size = min3(min(src_node_size, dst_node_size), size,
				GTT_MAX_BYTES);
		if (cur_size + src_page_offset > GTT_MAX_BYTES ||
		    cur_size + dst_page_offset > GTT_MAX_BYTES)
			cur_size -= max(src_page_offset, dst_page_offset);

		/* Map only what needs to be accessed. Map src to window 0 and
		 * dst to window 1
		 */
357
		if (src->mem->start == AMDGPU_BO_INVALID_OFFSET) {
358 359 360 361
			r = amdgpu_map_buffer(src->bo, src->mem,
					PFN_UP(cur_size + src_page_offset),
					src_node_start, 0, ring,
					&from);
362 363
			if (r)
				goto error;
364 365 366 367
			/* Adjust the offset because amdgpu_map_buffer returns
			 * start of mapped page
			 */
			from += src_page_offset;
368 369
		}

370
		if (dst->mem->start == AMDGPU_BO_INVALID_OFFSET) {
371 372 373 374
			r = amdgpu_map_buffer(dst->bo, dst->mem,
					PFN_UP(cur_size + dst_page_offset),
					dst_node_start, 1, ring,
					&to);
375 376
			if (r)
				goto error;
377
			to += dst_page_offset;
378 379
		}

380 381
		r = amdgpu_copy_buffer(ring, from, to, cur_size,
				       resv, &next, false, true);
382 383 384
		if (r)
			goto error;

385
		dma_fence_put(fence);
386 387
		fence = next;

388 389
		size -= cur_size;
		if (!size)
390 391
			break;

392 393 394 395 396
		src_node_size -= cur_size;
		if (!src_node_size) {
			src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
							     src->mem);
			src_node_size = (src_mm->size << PAGE_SHIFT);
397
			src_page_offset = 0;
398
		} else {
399 400
			src_node_start += cur_size;
			src_page_offset = src_node_start & (PAGE_SIZE - 1);
401
		}
402 403 404 405 406
		dst_node_size -= cur_size;
		if (!dst_node_size) {
			dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
							     dst->mem);
			dst_node_size = (dst_mm->size << PAGE_SHIFT);
407
			dst_page_offset = 0;
408
		} else {
409 410
			dst_node_start += cur_size;
			dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
411 412
		}
	}
413
error:
414
	mutex_unlock(&adev->mman.gtt_window_lock);
415 416 417 418 419 420
	if (f)
		*f = dma_fence_get(fence);
	dma_fence_put(fence);
	return r;
}

421 422 423
/**
 * amdgpu_move_blit - Copy an entire buffer to another buffer
 *
424 425
 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
 * help move buffers to and from VRAM.
426
 */
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
static int amdgpu_move_blit(struct ttm_buffer_object *bo,
			    bool evict, bool no_wait_gpu,
			    struct ttm_mem_reg *new_mem,
			    struct ttm_mem_reg *old_mem)
{
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
	struct amdgpu_copy_mem src, dst;
	struct dma_fence *fence = NULL;
	int r;

	src.bo = bo;
	dst.bo = bo;
	src.mem = old_mem;
	dst.mem = new_mem;
	src.offset = 0;
	dst.offset = 0;

	r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
				       new_mem->num_pages << PAGE_SHIFT,
446
				       bo->base.resv, &fence);
447 448
	if (r)
		goto error;
449

450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
	/* clear the space being freed */
	if (old_mem->mem_type == TTM_PL_VRAM &&
	    (ttm_to_amdgpu_bo(bo)->flags &
	     AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
		struct dma_fence *wipe_fence = NULL;

		r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
				       NULL, &wipe_fence);
		if (r) {
			goto error;
		} else if (wipe_fence) {
			dma_fence_put(fence);
			fence = wipe_fence;
		}
	}

466 467 468 469 470
	/* Always block for VM page tables before committing the new location */
	if (bo->type == ttm_bo_type_kernel)
		r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem);
	else
		r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
471
	dma_fence_put(fence);
A
Alex Deucher 已提交
472
	return r;
473 474 475

error:
	if (fence)
476 477
		dma_fence_wait(fence, false);
	dma_fence_put(fence);
478
	return r;
A
Alex Deucher 已提交
479 480
}

481 482 483 484 485
/**
 * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer
 *
 * Called by amdgpu_bo_move().
 */
486 487
static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
				struct ttm_operation_ctx *ctx,
A
Alex Deucher 已提交
488 489 490 491 492 493 494 495
				struct ttm_mem_reg *new_mem)
{
	struct ttm_mem_reg *old_mem = &bo->mem;
	struct ttm_mem_reg tmp_mem;
	struct ttm_place placements;
	struct ttm_placement placement;
	int r;

496
	/* create space/pages for new_mem in GTT space */
A
Alex Deucher 已提交
497 498 499 500 501 502 503
	tmp_mem = *new_mem;
	tmp_mem.mm_node = NULL;
	placement.num_placement = 1;
	placement.placement = &placements;
	placement.num_busy_placement = 1;
	placement.busy_placement = &placements;
	placements.fpfn = 0;
504
	placements.lpfn = 0;
A
Alex Deucher 已提交
505
	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
506
	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
A
Alex Deucher 已提交
507
	if (unlikely(r)) {
508
		pr_err("Failed to find GTT space for blit from VRAM\n");
A
Alex Deucher 已提交
509 510 511
		return r;
	}

512
	/* set caching flags */
A
Alex Deucher 已提交
513 514 515 516 517
	r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
	if (unlikely(r)) {
		goto out_cleanup;
	}

518
	/* Bind the memory to the GTT space */
519
	r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
A
Alex Deucher 已提交
520 521 522
	if (unlikely(r)) {
		goto out_cleanup;
	}
523 524

	/* blit VRAM to GTT */
525
	r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem);
A
Alex Deucher 已提交
526 527 528
	if (unlikely(r)) {
		goto out_cleanup;
	}
529 530

	/* move BO (in tmp_mem) to new_mem */
531
	r = ttm_bo_move_ttm(bo, ctx, new_mem);
A
Alex Deucher 已提交
532 533 534 535 536
out_cleanup:
	ttm_bo_mem_put(bo, &tmp_mem);
	return r;
}

537 538 539 540 541
/**
 * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM
 *
 * Called by amdgpu_bo_move().
 */
542 543
static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
				struct ttm_operation_ctx *ctx,
A
Alex Deucher 已提交
544 545 546 547 548 549 550 551
				struct ttm_mem_reg *new_mem)
{
	struct ttm_mem_reg *old_mem = &bo->mem;
	struct ttm_mem_reg tmp_mem;
	struct ttm_placement placement;
	struct ttm_place placements;
	int r;

552
	/* make space in GTT for old_mem buffer */
A
Alex Deucher 已提交
553 554 555 556 557 558 559
	tmp_mem = *new_mem;
	tmp_mem.mm_node = NULL;
	placement.num_placement = 1;
	placement.placement = &placements;
	placement.num_busy_placement = 1;
	placement.busy_placement = &placements;
	placements.fpfn = 0;
560
	placements.lpfn = 0;
A
Alex Deucher 已提交
561
	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
562
	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
A
Alex Deucher 已提交
563
	if (unlikely(r)) {
564
		pr_err("Failed to find GTT space for blit to VRAM\n");
A
Alex Deucher 已提交
565 566
		return r;
	}
567 568

	/* move/bind old memory to GTT space */
569
	r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
A
Alex Deucher 已提交
570 571 572
	if (unlikely(r)) {
		goto out_cleanup;
	}
573 574

	/* copy to VRAM */
575
	r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem);
A
Alex Deucher 已提交
576 577 578 579 580 581 582 583
	if (unlikely(r)) {
		goto out_cleanup;
	}
out_cleanup:
	ttm_bo_mem_put(bo, &tmp_mem);
	return r;
}

584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
/**
 * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
 *
 * Called by amdgpu_bo_move()
 */
static bool amdgpu_mem_visible(struct amdgpu_device *adev,
			       struct ttm_mem_reg *mem)
{
	struct drm_mm_node *nodes = mem->mm_node;

	if (mem->mem_type == TTM_PL_SYSTEM ||
	    mem->mem_type == TTM_PL_TT)
		return true;
	if (mem->mem_type != TTM_PL_VRAM)
		return false;

	/* ttm_mem_reg_ioremap only supports contiguous memory */
	if (nodes->size != mem->num_pages)
		return false;

	return ((nodes->start + nodes->size) << PAGE_SHIFT)
		<= adev->gmc.visible_vram_size;
}

608 609 610 611 612
/**
 * amdgpu_bo_move - Move a buffer object to a new memory location
 *
 * Called by ttm_bo_handle_move_mem()
 */
613 614 615
static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
			  struct ttm_operation_ctx *ctx,
			  struct ttm_mem_reg *new_mem)
A
Alex Deucher 已提交
616 617
{
	struct amdgpu_device *adev;
618
	struct amdgpu_bo *abo;
A
Alex Deucher 已提交
619 620 621
	struct ttm_mem_reg *old_mem = &bo->mem;
	int r;

622
	/* Can't move a pinned BO */
623
	abo = ttm_to_amdgpu_bo(bo);
624 625 626
	if (WARN_ON_ONCE(abo->pin_count > 0))
		return -EINVAL;

627
	adev = amdgpu_ttm_adev(bo->bdev);
628

A
Alex Deucher 已提交
629 630 631 632 633 634 635 636 637 638 639 640
	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
		amdgpu_move_null(bo, new_mem);
		return 0;
	}
	if ((old_mem->mem_type == TTM_PL_TT &&
	     new_mem->mem_type == TTM_PL_SYSTEM) ||
	    (old_mem->mem_type == TTM_PL_SYSTEM &&
	     new_mem->mem_type == TTM_PL_TT)) {
		/* bind is enough */
		amdgpu_move_null(bo, new_mem);
		return 0;
	}
641 642 643 644 645 646 647 648 649 650
	if (old_mem->mem_type == AMDGPU_PL_GDS ||
	    old_mem->mem_type == AMDGPU_PL_GWS ||
	    old_mem->mem_type == AMDGPU_PL_OA ||
	    new_mem->mem_type == AMDGPU_PL_GDS ||
	    new_mem->mem_type == AMDGPU_PL_GWS ||
	    new_mem->mem_type == AMDGPU_PL_OA) {
		/* Nothing to save here */
		amdgpu_move_null(bo, new_mem);
		return 0;
	}
651

652 653
	if (!adev->mman.buffer_funcs_enabled) {
		r = -ENODEV;
A
Alex Deucher 已提交
654
		goto memcpy;
655
	}
A
Alex Deucher 已提交
656 657 658

	if (old_mem->mem_type == TTM_PL_VRAM &&
	    new_mem->mem_type == TTM_PL_SYSTEM) {
659
		r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
A
Alex Deucher 已提交
660 661
	} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
		   new_mem->mem_type == TTM_PL_VRAM) {
662
		r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
A
Alex Deucher 已提交
663
	} else {
664 665
		r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
				     new_mem, old_mem);
A
Alex Deucher 已提交
666 667 668 669
	}

	if (r) {
memcpy:
670 671 672 673
		/* Check that all memory is CPU accessible */
		if (!amdgpu_mem_visible(adev, old_mem) ||
		    !amdgpu_mem_visible(adev, new_mem)) {
			pr_err("Move buffer fallback to memcpy unavailable\n");
A
Alex Deucher 已提交
674 675
			return r;
		}
676 677 678 679

		r = ttm_bo_move_memcpy(bo, ctx, new_mem);
		if (r)
			return r;
A
Alex Deucher 已提交
680 681
	}

682 683 684 685 686 687 688 689 690
	if (bo->type == ttm_bo_type_device &&
	    new_mem->mem_type == TTM_PL_VRAM &&
	    old_mem->mem_type != TTM_PL_VRAM) {
		/* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
		 * accesses the BO after it's moved.
		 */
		abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
	}

A
Alex Deucher 已提交
691 692 693 694 695
	/* update statistics */
	atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
	return 0;
}

696 697 698 699 700
/**
 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
 *
 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
 */
A
Alex Deucher 已提交
701 702 703
static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
704
	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
705
	struct drm_mm_node *mm_node = mem->mm_node;
A
Alex Deucher 已提交
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722

	mem->bus.addr = NULL;
	mem->bus.offset = 0;
	mem->bus.size = mem->num_pages << PAGE_SHIFT;
	mem->bus.base = 0;
	mem->bus.is_iomem = false;
	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
		return -EINVAL;
	switch (mem->mem_type) {
	case TTM_PL_SYSTEM:
		/* system memory */
		return 0;
	case TTM_PL_TT:
		break;
	case TTM_PL_VRAM:
		mem->bus.offset = mem->start << PAGE_SHIFT;
		/* check if it's visible */
723
		if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
A
Alex Deucher 已提交
724
			return -EINVAL;
725 726 727 728 729 730 731 732 733
		/* Only physically contiguous buffers apply. In a contiguous
		 * buffer, size of the first mm_node would match the number of
		 * pages in ttm_mem_reg.
		 */
		if (adev->mman.aper_base_kaddr &&
		    (mm_node->size == mem->num_pages))
			mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
					mem->bus.offset;

734
		mem->bus.base = adev->gmc.aper_base;
A
Alex Deucher 已提交
735 736 737 738 739 740 741 742 743 744 745 746
		mem->bus.is_iomem = true;
		break;
	default:
		return -EINVAL;
	}
	return 0;
}

static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
}

747 748 749
static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
					   unsigned long page_offset)
{
750 751
	struct drm_mm_node *mm;
	unsigned long offset = (page_offset << PAGE_SHIFT);
752

753 754 755
	mm = amdgpu_find_mm_node(&bo->mem, &offset);
	return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
		(offset >> PAGE_SHIFT);
756 757
}

A
Alex Deucher 已提交
758 759 760 761
/*
 * TTM backend functions.
 */
struct amdgpu_ttm_tt {
762
	struct ttm_dma_tt	ttm;
763
	struct drm_gem_object	*gobj;
764 765
	u64			offset;
	uint64_t		userptr;
766
	struct task_struct	*usertask;
767
	uint32_t		userflags;
768
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
769
	struct hmm_range	*range;
770
#endif
A
Alex Deucher 已提交
771 772
};

773 774 775 776 777 778 779 780 781 782 783 784 785 786
#ifdef CONFIG_DRM_AMDGPU_USERPTR
/* flags used by HMM internal, not related to CPU/GPU PTE flags */
static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
	(1 << 0), /* HMM_PFN_VALID */
	(1 << 1), /* HMM_PFN_WRITE */
	0 /* HMM_PFN_DEVICE_PRIVATE */
};

static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
	0xfffffffffffffffeUL, /* HMM_PFN_ERROR */
	0, /* HMM_PFN_NONE */
	0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */
};

787
/**
788 789
 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
 * memory and start HMM tracking CPU page table update
790
 *
791 792
 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
 * once afterwards to stop HMM tracking
793
 */
794
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
A
Alex Deucher 已提交
795
{
796
	struct ttm_tt *ttm = bo->tbo.ttm;
A
Alex Deucher 已提交
797
	struct amdgpu_ttm_tt *gtt = (void *)ttm;
798
	unsigned long start = gtt->userptr;
799 800
	struct vm_area_struct *vma;
	struct hmm_range *range;
801 802
	unsigned long timeout;
	struct mm_struct *mm;
803
	unsigned long i;
804
	int r = 0;
A
Alex Deucher 已提交
805

806 807 808
	mm = bo->notifier.mm;
	if (unlikely(!mm)) {
		DRM_DEBUG_DRIVER("BO is not registered?\n");
809
		return -EFAULT;
810
	}
811

812 813 814 815
	/* Another get_user_pages is running at the same time?? */
	if (WARN_ON(gtt->range))
		return -EFAULT;

816
	if (!mmget_not_zero(mm)) /* Happens during process shutdown */
817 818
		return -ESRCH;

819 820
	range = kzalloc(sizeof(*range), GFP_KERNEL);
	if (unlikely(!range)) {
821
		r = -ENOMEM;
822 823
		goto out;
	}
824 825 826 827 828 829 830 831 832 833 834 835 836
	range->notifier = &bo->notifier;
	range->flags = hmm_range_flags;
	range->values = hmm_range_values;
	range->pfn_shift = PAGE_SHIFT;
	range->start = bo->notifier.interval_tree.start;
	range->end = bo->notifier.interval_tree.last + 1;
	range->default_flags = hmm_range_flags[HMM_PFN_VALID];
	if (!amdgpu_ttm_tt_is_readonly(ttm))
		range->default_flags |= range->flags[HMM_PFN_WRITE];

	range->pfns = kvmalloc_array(ttm->num_pages, sizeof(*range->pfns),
				     GFP_KERNEL);
	if (unlikely(!range->pfns)) {
837 838
		r = -ENOMEM;
		goto out_free_ranges;
A
Alex Deucher 已提交
839
	}
840

841 842 843 844
	down_read(&mm->mmap_sem);
	vma = find_vma(mm, start);
	if (unlikely(!vma || start < vma->vm_start)) {
		r = -EFAULT;
845
		goto out_unlock;
846
	}
847
	if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
848
		vma->vm_file)) {
849
		r = -EPERM;
850
		goto out_unlock;
851
	}
852 853
	up_read(&mm->mmap_sem);
	timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
854

855 856
retry:
	range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
A
Alex Deucher 已提交
857

858
	down_read(&mm->mmap_sem);
859
	r = hmm_range_fault(range, 0);
860
	up_read(&mm->mmap_sem);
861 862 863 864 865 866 867
	if (unlikely(r <= 0)) {
		/*
		 * FIXME: This timeout should encompass the retry from
		 * mmu_interval_read_retry() as well.
		 */
		if ((r == 0 || r == -EBUSY) && !time_after(jiffies, timeout))
			goto retry;
868
		goto out_free_pfns;
869
	}
870

871
	for (i = 0; i < ttm->num_pages; i++) {
872 873
		/* FIXME: The pages cannot be touched outside the notifier_lock */
		pages[i] = hmm_device_entry_to_page(range, range->pfns[i]);
874
		if (unlikely(!pages[i])) {
875
			pr_err("Page fault failed for pfn[%lu] = 0x%llx\n",
876
			       i, range->pfns[i]);
877 878 879
			r = -ENOMEM;

			goto out_free_pfns;
880 881
		}
	}
882 883

	gtt->range = range;
884
	mmput(mm);
885

886
	return 0;
887

888 889
out_unlock:
	up_read(&mm->mmap_sem);
890
out_free_pfns:
891
	kvfree(range->pfns);
892
out_free_ranges:
893
	kfree(range);
894
out:
895
	mmput(mm);
896 897 898
	return r;
}

899
/**
900 901
 * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
 * Check if the pages backing this ttm range have been invalidated
902
 *
903
 * Returns: true if pages are still valid
904
 */
905
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
906
{
907
	struct amdgpu_ttm_tt *gtt = (void *)ttm;
908
	bool r = false;
909

910 911
	if (!gtt || !gtt->userptr)
		return false;
912

913 914
	DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n",
		gtt->userptr, ttm->num_pages);
915

916
	WARN_ONCE(!gtt->range || !gtt->range->pfns,
917 918
		"No user pages to check\n");

919
	if (gtt->range) {
920 921 922 923 924 925
		/*
		 * FIXME: Must always hold notifier_lock for this, and must
		 * not ignore the return code.
		 */
		r = mmu_interval_read_retry(gtt->range->notifier,
					 gtt->range->notifier_seq);
926 927 928
		kvfree(gtt->range->pfns);
		kfree(gtt->range);
		gtt->range = NULL;
929
	}
930

931
	return !r;
932
}
933
#endif
934

935
/**
936
 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
937
 *
938
 * Called by amdgpu_cs_list_validate(). This creates the page list
939 940
 * that backs user memory and will ultimately be mapped into the device
 * address space.
941
 */
942
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
943
{
944
	unsigned long i;
945

946
	for (i = 0; i < ttm->num_pages; ++i)
947
		ttm->pages[i] = pages ? pages[i] : NULL;
948 949
}

950
/**
951
 * amdgpu_ttm_tt_pin_userptr - 	prepare the sg table with the user pages
952 953 954
 *
 * Called by amdgpu_ttm_backend_bind()
 **/
955 956
static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
{
957
	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
958 959 960 961 962 963 964 965
	struct amdgpu_ttm_tt *gtt = (void *)ttm;
	unsigned nents;
	int r;

	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
	enum dma_data_direction direction = write ?
		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;

966
	/* Allocate an SG array and squash pages into it */
A
Alex Deucher 已提交
967 968 969 970 971 972
	r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
				      ttm->num_pages << PAGE_SHIFT,
				      GFP_KERNEL);
	if (r)
		goto release_sg;

973
	/* Map SG to device */
A
Alex Deucher 已提交
974 975 976 977 978
	r = -ENOMEM;
	nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
	if (nents != ttm->sg->nents)
		goto release_sg;

979
	/* convert SG to linear array of pages and dma addresses */
A
Alex Deucher 已提交
980 981 982 983 984 985 986 987 988 989
	drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
					 gtt->ttm.dma_address, ttm->num_pages);

	return 0;

release_sg:
	kfree(ttm->sg);
	return r;
}

990 991 992
/**
 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
 */
A
Alex Deucher 已提交
993 994
static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
{
995
	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
A
Alex Deucher 已提交
996 997 998 999 1000 1001 1002 1003 1004 1005
	struct amdgpu_ttm_tt *gtt = (void *)ttm;

	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
	enum dma_data_direction direction = write ?
		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;

	/* double check that we don't free the table twice */
	if (!ttm->sg->sgl)
		return;

1006
	/* unmap the pages mapped to the device */
A
Alex Deucher 已提交
1007 1008
	dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);

1009
	sg_free_table(ttm->sg);
1010

1011
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
	if (gtt->range) {
		unsigned long i;

		for (i = 0; i < ttm->num_pages; i++) {
			if (ttm->pages[i] !=
				hmm_device_entry_to_page(gtt->range,
					      gtt->range->pfns[i]))
				break;
		}

		WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
	}
1024
#endif
A
Alex Deucher 已提交
1025 1026
}

1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
				struct ttm_buffer_object *tbo,
				uint64_t flags)
{
	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
	struct ttm_tt *ttm = tbo->ttm;
	struct amdgpu_ttm_tt *gtt = (void *)ttm;
	int r;

	if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) {
		uint64_t page_idx = 1;

		r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
				ttm->pages, gtt->ttm.dma_address, flags);
		if (r)
			goto gart_bind_fail;

		/* Patch mtype of the second part BO */
1045 1046
		flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
		flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065

		r = amdgpu_gart_bind(adev,
				gtt->offset + (page_idx << PAGE_SHIFT),
				ttm->num_pages - page_idx,
				&ttm->pages[page_idx],
				&(gtt->ttm.dma_address[page_idx]), flags);
	} else {
		r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
				     ttm->pages, gtt->ttm.dma_address, flags);
	}

gart_bind_fail:
	if (r)
		DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
			  ttm->num_pages, gtt->offset);

	return r;
}

1066 1067 1068 1069 1070 1071
/**
 * amdgpu_ttm_backend_bind - Bind GTT memory
 *
 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
 * This handles binding GTT memory to the device address space.
 */
A
Alex Deucher 已提交
1072 1073 1074
static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
				   struct ttm_mem_reg *bo_mem)
{
C
Christian König 已提交
1075
	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
A
Alex Deucher 已提交
1076
	struct amdgpu_ttm_tt *gtt = (void*)ttm;
1077
	uint64_t flags;
1078
	int r = 0;
A
Alex Deucher 已提交
1079

1080 1081 1082 1083 1084 1085 1086
	if (gtt->userptr) {
		r = amdgpu_ttm_tt_pin_userptr(ttm);
		if (r) {
			DRM_ERROR("failed to pin userptr\n");
			return r;
		}
	}
A
Alex Deucher 已提交
1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
	if (!ttm->num_pages) {
		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
		     ttm->num_pages, bo_mem, ttm);
	}

	if (bo_mem->mem_type == AMDGPU_PL_GDS ||
	    bo_mem->mem_type == AMDGPU_PL_GWS ||
	    bo_mem->mem_type == AMDGPU_PL_OA)
		return -EINVAL;

1097 1098
	if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
		gtt->offset = AMDGPU_BO_INVALID_OFFSET;
1099
		return 0;
1100
	}
1101

1102
	/* compute PTE flags relevant to this BO memory */
C
Christian König 已提交
1103
	flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
1104 1105

	/* bind pages into GART page tables */
1106
	gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
C
Christian König 已提交
1107
	r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
1108 1109
		ttm->pages, gtt->ttm.dma_address, flags);

1110
	if (r)
1111 1112
		DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
			  ttm->num_pages, gtt->offset);
1113
	return r;
1114 1115
}

1116 1117 1118
/**
 * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object
 */
1119
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
1120
{
1121
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1122
	struct ttm_operation_ctx ctx = { false, false };
1123
	struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
1124 1125 1126
	struct ttm_mem_reg tmp;
	struct ttm_placement placement;
	struct ttm_place placements;
1127
	uint64_t addr, flags;
1128 1129
	int r;

1130
	if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
1131 1132
		return 0;

1133 1134 1135 1136
	addr = amdgpu_gmc_agp_addr(bo);
	if (addr != AMDGPU_BO_INVALID_OFFSET) {
		bo->mem.start = addr >> PAGE_SHIFT;
	} else {
1137

1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
		/* allocate GART space */
		tmp = bo->mem;
		tmp.mm_node = NULL;
		placement.num_placement = 1;
		placement.placement = &placements;
		placement.num_busy_placement = 1;
		placement.busy_placement = &placements;
		placements.fpfn = 0;
		placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
		placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
			TTM_PL_FLAG_TT;

		r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
		if (unlikely(r))
			return r;
1153

1154 1155
		/* compute PTE flags for this buffer object */
		flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
1156

1157
		/* Bind pages */
1158
		gtt->offset = (u64)tmp.start << PAGE_SHIFT;
1159 1160 1161 1162 1163 1164 1165 1166
		r = amdgpu_ttm_gart_bind(adev, bo, flags);
		if (unlikely(r)) {
			ttm_bo_mem_put(bo, &tmp);
			return r;
		}

		ttm_bo_mem_put(bo, &bo->mem);
		bo->mem = tmp;
1167
	}
1168

1169 1170 1171 1172
	bo->offset = (bo->mem.start << PAGE_SHIFT) +
		bo->bdev->man[bo->mem.mem_type].gpu_offset;

	return 0;
A
Alex Deucher 已提交
1173 1174
}

1175 1176 1177 1178 1179 1180
/**
 * amdgpu_ttm_recover_gart - Rebind GTT pages
 *
 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
 * rebind GTT pages during a GPU reset.
 */
1181
int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
1182
{
1183
	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1184
	uint64_t flags;
1185 1186
	int r;

1187
	if (!tbo->ttm)
1188 1189
		return 0;

1190 1191 1192
	flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
	r = amdgpu_ttm_gart_bind(adev, tbo, flags);

1193
	return r;
1194 1195
}

1196 1197 1198 1199 1200 1201
/**
 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
 *
 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
 * ttm_tt_destroy().
 */
A
Alex Deucher 已提交
1202 1203
static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
{
C
Christian König 已提交
1204
	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
A
Alex Deucher 已提交
1205
	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1206
	int r;
A
Alex Deucher 已提交
1207

1208
	/* if the pages have userptr pinning then clear that first */
1209 1210 1211
	if (gtt->userptr)
		amdgpu_ttm_tt_unpin_userptr(ttm);

1212
	if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1213 1214
		return 0;

A
Alex Deucher 已提交
1215
	/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
C
Christian König 已提交
1216
	r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1217
	if (r)
1218 1219 1220
		DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
			  gtt->ttm.ttm.num_pages, gtt->offset);
	return r;
A
Alex Deucher 已提交
1221 1222 1223 1224 1225 1226
}

static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
{
	struct amdgpu_ttm_tt *gtt = (void *)ttm;

1227 1228 1229
	if (gtt->usertask)
		put_task_struct(gtt->usertask);

A
Alex Deucher 已提交
1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
	ttm_dma_tt_fini(&gtt->ttm);
	kfree(gtt);
}

static struct ttm_backend_func amdgpu_backend_func = {
	.bind = &amdgpu_ttm_backend_bind,
	.unbind = &amdgpu_ttm_backend_unbind,
	.destroy = &amdgpu_ttm_backend_destroy,
};

1240 1241 1242 1243 1244 1245 1246
/**
 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
 *
 * @bo: The buffer object to create a GTT ttm_tt object around
 *
 * Called by ttm_tt_create().
 */
1247 1248
static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
					   uint32_t page_flags)
A
Alex Deucher 已提交
1249 1250 1251 1252 1253 1254 1255 1256
{
	struct amdgpu_ttm_tt *gtt;

	gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
	if (gtt == NULL) {
		return NULL;
	}
	gtt->ttm.ttm.func = &amdgpu_backend_func;
1257
	gtt->gobj = &bo->base;
1258 1259

	/* allocate space for the uninitialized page entries */
1260
	if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
A
Alex Deucher 已提交
1261 1262 1263 1264 1265 1266
		kfree(gtt);
		return NULL;
	}
	return &gtt->ttm.ttm;
}

1267 1268 1269 1270 1271 1272
/**
 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
 *
 * Map the pages of a ttm_tt object to an address space visible
 * to the underlying device.
 */
1273 1274
static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
			struct ttm_operation_ctx *ctx)
A
Alex Deucher 已提交
1275
{
1276
	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
A
Alex Deucher 已提交
1277 1278
	struct amdgpu_ttm_tt *gtt = (void *)ttm;

1279
	/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
A
Alex Deucher 已提交
1280
	if (gtt && gtt->userptr) {
1281
		ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
A
Alex Deucher 已提交
1282 1283 1284 1285 1286 1287 1288 1289
		if (!ttm->sg)
			return -ENOMEM;

		ttm->page_flags |= TTM_PAGE_FLAG_SG;
		ttm->state = tt_unbound;
		return 0;
	}

1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
	if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
		if (!ttm->sg) {
			struct dma_buf_attachment *attach;
			struct sg_table *sgt;

			attach = gtt->gobj->import_attach;
			sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
			if (IS_ERR(sgt))
				return PTR_ERR(sgt);

			ttm->sg = sgt;
		}

A
Alex Deucher 已提交
1303
		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1304 1305
						 gtt->ttm.dma_address,
						 ttm->num_pages);
A
Alex Deucher 已提交
1306
		ttm->state = tt_unbound;
1307
		return 0;
A
Alex Deucher 已提交
1308 1309 1310
	}

#ifdef CONFIG_SWIOTLB
1311
	if (adev->need_swiotlb && swiotlb_nr_tbl()) {
1312
		return ttm_dma_populate(&gtt->ttm, adev->dev, ctx);
A
Alex Deucher 已提交
1313 1314 1315
	}
#endif

1316 1317
	/* fall back to generic helper to populate the page array
	 * and map them to the device */
1318
	return ttm_populate_and_map_pages(adev->dev, &gtt->ttm, ctx);
A
Alex Deucher 已提交
1319 1320
}

1321 1322 1323 1324 1325 1326
/**
 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
 *
 * Unmaps pages of a ttm_tt object from the device address space and
 * unpopulates the page array backing it.
 */
A
Alex Deucher 已提交
1327 1328 1329
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
	struct amdgpu_ttm_tt *gtt = (void *)ttm;
1330
	struct amdgpu_device *adev;
A
Alex Deucher 已提交
1331 1332

	if (gtt && gtt->userptr) {
1333
		amdgpu_ttm_tt_set_user_pages(ttm, NULL);
A
Alex Deucher 已提交
1334 1335 1336 1337 1338
		kfree(ttm->sg);
		ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
		return;
	}

1339 1340 1341 1342 1343 1344 1345 1346 1347 1348
	if (ttm->sg && gtt->gobj->import_attach) {
		struct dma_buf_attachment *attach;

		attach = gtt->gobj->import_attach;
		dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
		ttm->sg = NULL;
		return;
	}

	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
A
Alex Deucher 已提交
1349 1350
		return;

1351
	adev = amdgpu_ttm_adev(ttm->bdev);
A
Alex Deucher 已提交
1352 1353

#ifdef CONFIG_SWIOTLB
1354
	if (adev->need_swiotlb && swiotlb_nr_tbl()) {
A
Alex Deucher 已提交
1355 1356 1357 1358 1359
		ttm_dma_unpopulate(&gtt->ttm, adev->dev);
		return;
	}
#endif

1360
	/* fall back to generic helper to unmap and unpopulate array */
1361
	ttm_unmap_and_unpopulate_pages(adev->dev, &gtt->ttm);
A
Alex Deucher 已提交
1362 1363
}

1364
/**
1365 1366
 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
 * task
1367 1368 1369 1370 1371 1372 1373 1374
 *
 * @ttm: The ttm_tt object to bind this userptr object to
 * @addr:  The address in the current tasks VM space to use
 * @flags: Requirements of userptr object.
 *
 * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
 * to current task
 */
A
Alex Deucher 已提交
1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
			      uint32_t flags)
{
	struct amdgpu_ttm_tt *gtt = (void *)ttm;

	if (gtt == NULL)
		return -EINVAL;

	gtt->userptr = addr;
	gtt->userflags = flags;
1385 1386 1387 1388 1389 1390

	if (gtt->usertask)
		put_task_struct(gtt->usertask);
	gtt->usertask = current->group_leader;
	get_task_struct(gtt->usertask);

A
Alex Deucher 已提交
1391 1392 1393
	return 0;
}

1394 1395 1396
/**
 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
 */
1397
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
A
Alex Deucher 已提交
1398 1399 1400 1401
{
	struct amdgpu_ttm_tt *gtt = (void *)ttm;

	if (gtt == NULL)
1402
		return NULL;
A
Alex Deucher 已提交
1403

1404 1405 1406 1407
	if (gtt->usertask == NULL)
		return NULL;

	return gtt->usertask->mm;
A
Alex Deucher 已提交
1408 1409
}

1410
/**
1411 1412
 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
 * address range for the current task.
1413 1414
 *
 */
1415 1416 1417 1418 1419 1420
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
				  unsigned long end)
{
	struct amdgpu_ttm_tt *gtt = (void *)ttm;
	unsigned long size;

1421
	if (gtt == NULL || !gtt->userptr)
1422 1423
		return false;

1424 1425 1426
	/* Return false if no part of the ttm_tt object lies within
	 * the range
	 */
1427 1428 1429 1430 1431 1432 1433
	size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
	if (gtt->userptr > end || gtt->userptr + size <= start)
		return false;

	return true;
}

1434
/**
1435
 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1436
 */
1437
bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1438 1439 1440 1441 1442 1443
{
	struct amdgpu_ttm_tt *gtt = (void *)ttm;

	if (gtt == NULL || !gtt->userptr)
		return false;

1444
	return true;
1445 1446
}

1447 1448 1449
/**
 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
 */
A
Alex Deucher 已提交
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{
	struct amdgpu_ttm_tt *gtt = (void *)ttm;

	if (gtt == NULL)
		return false;

	return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
}

1460
/**
1461
 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1462 1463 1464
 *
 * @ttm: The ttm_tt object to compute the flags for
 * @mem: The memory registry backing this ttm_tt object
1465 1466
 *
 * Figure out the flags to use for a VM PDE (Page Directory Entry).
1467
 */
1468
uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
A
Alex Deucher 已提交
1469
{
1470
	uint64_t flags = 0;
A
Alex Deucher 已提交
1471 1472 1473 1474

	if (mem && mem->mem_type != TTM_PL_SYSTEM)
		flags |= AMDGPU_PTE_VALID;

1475
	if (mem && mem->mem_type == TTM_PL_TT) {
A
Alex Deucher 已提交
1476 1477
		flags |= AMDGPU_PTE_SYSTEM;

1478 1479 1480
		if (ttm->caching_state == tt_cached)
			flags |= AMDGPU_PTE_SNOOPED;
	}
A
Alex Deucher 已提交
1481

1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
	return flags;
}

/**
 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
 *
 * @ttm: The ttm_tt object to compute the flags for
 * @mem: The memory registry backing this ttm_tt object

 * Figure out the flags to use for a VM PTE (Page Table Entry).
 */
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
				 struct ttm_mem_reg *mem)
{
	uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);

1498
	flags |= adev->gart.gart_pte_flags;
A
Alex Deucher 已提交
1499 1500 1501 1502 1503 1504 1505 1506
	flags |= AMDGPU_PTE_READABLE;

	if (!amdgpu_ttm_tt_is_readonly(ttm))
		flags |= AMDGPU_PTE_WRITEABLE;

	return flags;
}

1507
/**
1508 1509
 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
 * object.
1510
 *
1511 1512 1513
 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
 * it can find space for a new object and by ttm_bo_force_list_clean() which is
1514 1515
 * used to clean out a memory space.
 */
1516 1517 1518
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
					    const struct ttm_place *place)
{
1519 1520
	unsigned long num_pages = bo->mem.num_pages;
	struct drm_mm_node *node = bo->mem.mm_node;
1521
	struct dma_resv_list *flist;
1522 1523 1524
	struct dma_fence *f;
	int i;

1525
	if (bo->type == ttm_bo_type_kernel &&
1526
	    !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
1527 1528
		return false;

1529 1530 1531 1532
	/* If bo is a KFD BO, check if the bo belongs to the current process.
	 * If true, then return false as any KFD process needs all its BOs to
	 * be resident to run successfully
	 */
1533
	flist = dma_resv_get_list(bo->base.resv);
1534 1535 1536
	if (flist) {
		for (i = 0; i < flist->shared_count; ++i) {
			f = rcu_dereference_protected(flist->shared[i],
1537
				dma_resv_held(bo->base.resv));
1538 1539 1540 1541
			if (amdkfd_fence_check_mm(f, current->mm))
				return false;
		}
	}
1542

1543 1544 1545
	switch (bo->mem.mem_type) {
	case TTM_PL_TT:
		return true;
1546

1547
	case TTM_PL_VRAM:
1548 1549 1550 1551 1552 1553 1554 1555 1556
		/* Check each drm MM node individually */
		while (num_pages) {
			if (place->fpfn < (node->start + node->size) &&
			    !(place->lpfn && place->lpfn <= node->start))
				return true;

			num_pages -= node->size;
			++node;
		}
1557
		return false;
1558

1559 1560
	default:
		break;
1561 1562 1563 1564 1565
	}

	return ttm_bo_eviction_valuable(bo, place);
}

1566
/**
1567
 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1568 1569 1570 1571 1572 1573 1574 1575 1576 1577
 *
 * @bo:  The buffer object to read/write
 * @offset:  Offset into buffer object
 * @buf:  Secondary buffer to write/read from
 * @len: Length in bytes of access
 * @write:  true if writing
 *
 * This is used to access VRAM that backs a buffer object via MMIO
 * access for debugging purposes.
 */
1578 1579 1580 1581
static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
				    unsigned long offset,
				    void *buf, int len, int write)
{
1582
	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1583
	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1584
	struct drm_mm_node *nodes;
1585 1586 1587 1588 1589 1590 1591 1592
	uint32_t value = 0;
	int ret = 0;
	uint64_t pos;
	unsigned long flags;

	if (bo->mem.mem_type != TTM_PL_VRAM)
		return -EIO;

1593
	nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
1594 1595
	pos = (nodes->start << PAGE_SHIFT) + offset;

1596
	while (len && pos < adev->gmc.mc_vram_size) {
1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607
		uint64_t aligned_pos = pos & ~(uint64_t)3;
		uint32_t bytes = 4 - (pos & 3);
		uint32_t shift = (pos & 3) * 8;
		uint32_t mask = 0xffffffff << shift;

		if (len < bytes) {
			mask &= 0xffffffff >> (bytes - len) * 8;
			bytes = len;
		}

		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1608 1609
		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
		WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
1610
		if (!write || mask != 0xffffffff)
1611
			value = RREG32_NO_KIQ(mmMM_DATA);
1612 1613 1614
		if (write) {
			value &= ~mask;
			value |= (*(uint32_t *)buf << shift) & mask;
1615
			WREG32_NO_KIQ(mmMM_DATA, value);
1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635
		}
		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
		if (!write) {
			value = (value & mask) >> shift;
			memcpy(buf, &value, bytes);
		}

		ret += bytes;
		buf = (uint8_t *)buf + bytes;
		pos += bytes;
		len -= bytes;
		if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
			++nodes;
			pos = (nodes->start << PAGE_SHIFT);
		}
	}

	return ret;
}

A
Alex Deucher 已提交
1636 1637 1638 1639 1640 1641
static struct ttm_bo_driver amdgpu_bo_driver = {
	.ttm_tt_create = &amdgpu_ttm_tt_create,
	.ttm_tt_populate = &amdgpu_ttm_tt_populate,
	.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
	.invalidate_caches = &amdgpu_invalidate_caches,
	.init_mem_type = &amdgpu_init_mem_type,
1642
	.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
A
Alex Deucher 已提交
1643 1644 1645 1646
	.evict_flags = &amdgpu_evict_flags,
	.move = &amdgpu_bo_move,
	.verify_access = &amdgpu_verify_access,
	.move_notify = &amdgpu_bo_move_notify,
1647
	.release_notify = &amdgpu_bo_release_notify,
A
Alex Deucher 已提交
1648 1649 1650
	.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
	.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
	.io_mem_free = &amdgpu_ttm_io_mem_free,
1651
	.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1652 1653
	.access_memory = &amdgpu_ttm_access_memory,
	.del_from_lru_notify = &amdgpu_vm_del_from_lru_notify
A
Alex Deucher 已提交
1654 1655
};

1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680
/*
 * Firmware Reservation functions
 */
/**
 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
 *
 * @adev: amdgpu_device pointer
 *
 * free fw reserved vram if it has been reserved.
 */
static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
{
	amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
		NULL, &adev->fw_vram_usage.va);
}

/**
 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
 *
 * @adev: amdgpu_device pointer
 *
 * create bo vram reservation from fw.
 */
static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
{
1681 1682
	uint64_t vram_size = adev->gmc.visible_vram_size;

1683 1684 1685
	adev->fw_vram_usage.va = NULL;
	adev->fw_vram_usage.reserved_bo = NULL;

1686 1687 1688
	if (adev->fw_vram_usage.size == 0 ||
	    adev->fw_vram_usage.size > vram_size)
		return 0;
1689

1690 1691 1692 1693 1694 1695
	return amdgpu_bo_create_kernel_at(adev,
					  adev->fw_vram_usage.start_offset,
					  adev->fw_vram_usage.size,
					  AMDGPU_GEM_DOMAIN_VRAM,
					  &adev->fw_vram_usage.reserved_bo,
					  &adev->fw_vram_usage.va);
1696
}
1697

1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779
/*
 * Memoy training reservation functions
 */

/**
 * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
 *
 * @adev: amdgpu_device pointer
 *
 * free memory training reserved vram if it has been reserved.
 */
static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
{
	struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;

	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
	amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
	ctx->c2p_bo = NULL;

	amdgpu_bo_free_kernel(&ctx->p2c_bo, NULL, NULL);
	ctx->p2c_bo = NULL;

	return 0;
}

/**
 * amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training
 *
 * @adev: amdgpu_device pointer
 *
 * create bo vram reservation from memory training.
 */
static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)
{
	int ret;
	struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;

	memset(ctx, 0, sizeof(*ctx));
	if (!adev->fw_vram_usage.mem_train_support) {
		DRM_DEBUG("memory training does not support!\n");
		return 0;
	}

	ctx->c2p_train_data_offset = adev->fw_vram_usage.mem_train_fb_loc;
	ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
	ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;

	DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
		  ctx->train_data_size,
		  ctx->p2c_train_data_offset,
		  ctx->c2p_train_data_offset);

	ret = amdgpu_bo_create_kernel_at(adev,
					 ctx->p2c_train_data_offset,
					 ctx->train_data_size,
					 AMDGPU_GEM_DOMAIN_VRAM,
					 &ctx->p2c_bo,
					 NULL);
	if (ret) {
		DRM_ERROR("alloc p2c_bo failed(%d)!\n", ret);
		goto Err_out;
	}

	ret = amdgpu_bo_create_kernel_at(adev,
					 ctx->c2p_train_data_offset,
					 ctx->train_data_size,
					 AMDGPU_GEM_DOMAIN_VRAM,
					 &ctx->c2p_bo,
					 NULL);
	if (ret) {
		DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
		goto Err_out;
	}

	ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
	return 0;

Err_out:
	amdgpu_ttm_training_reserve_vram_fini(adev);
	return ret;
}

1780
/**
1781 1782
 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
 * gtt/vram related fields.
1783 1784 1785 1786 1787 1788
 *
 * This initializes all of the memory space pools that the TTM layer
 * will need such as the GTT space (system memory mapped to the device),
 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
 * can be mapped per VMID.
 */
A
Alex Deucher 已提交
1789 1790
int amdgpu_ttm_init(struct amdgpu_device *adev)
{
1791
	uint64_t gtt_size;
A
Alex Deucher 已提交
1792
	int r;
1793
	u64 vis_vram_limit;
1794
	void *stolen_vga_buf;
A
Alex Deucher 已提交
1795

1796 1797
	mutex_init(&adev->mman.gtt_window_lock);

A
Alex Deucher 已提交
1798 1799 1800 1801
	/* No others user of address space so set it to 0 */
	r = ttm_bo_device_init(&adev->mman.bdev,
			       &amdgpu_bo_driver,
			       adev->ddev->anon_inode->i_mapping,
1802
			       adev->ddev->vma_offset_manager,
1803
			       dma_addressing_limited(adev->dev));
A
Alex Deucher 已提交
1804 1805 1806 1807 1808
	if (r) {
		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
		return r;
	}
	adev->mman.initialized = true;
1809 1810 1811 1812

	/* We opt to avoid OOM on system pages allocations */
	adev->mman.bdev.no_retry = true;

1813
	/* Initialize VRAM pool with all of VRAM divided into pages */
A
Alex Deucher 已提交
1814
	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
1815
				adev->gmc.real_vram_size >> PAGE_SHIFT);
A
Alex Deucher 已提交
1816 1817 1818 1819
	if (r) {
		DRM_ERROR("Failed initializing VRAM heap.\n");
		return r;
	}
1820 1821 1822 1823

	/* Reduce size of CPU-visible VRAM if requested */
	vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
	if (amdgpu_vis_vram_limit > 0 &&
1824 1825
	    vis_vram_limit <= adev->gmc.visible_vram_size)
		adev->gmc.visible_vram_size = vis_vram_limit;
1826

A
Alex Deucher 已提交
1827
	/* Change the size here instead of the init above so only lpfn is affected */
1828
	amdgpu_ttm_set_buffer_funcs_status(adev, false);
1829 1830 1831 1832
#ifdef CONFIG_64BIT
	adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
						adev->gmc.visible_vram_size);
#endif
A
Alex Deucher 已提交
1833

1834 1835 1836 1837
	/*
	 *The reserved vram for firmware must be pinned to the specified
	 *place on the VRAM, so reserve it early.
	 */
1838
	r = amdgpu_ttm_fw_reserve_vram_init(adev);
1839 1840 1841 1842
	if (r) {
		return r;
	}

1843 1844 1845 1846 1847 1848 1849 1850
	/*
	 *The reserved vram for memory training must be pinned to the specified
	 *place on the VRAM, so reserve it early.
	 */
	r = amdgpu_ttm_training_reserve_vram_init(adev);
	if (r)
		return r;

1851 1852 1853 1854
	/* allocate memory as required for VGA
	 * This is used for VGA emulation and pre-OS scanout buffers to
	 * avoid display artifacts while transitioning between pre-OS
	 * and driver.  */
C
Christian König 已提交
1855 1856 1857
	r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
				    AMDGPU_GEM_DOMAIN_VRAM,
				    &adev->stolen_vga_memory,
1858
				    NULL, &stolen_vga_buf);
C
Christian König 已提交
1859 1860
	if (r)
		return r;
1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874

	/*
	 * reserve one TMR (64K) memory at the top of VRAM which holds
	 * IP Discovery data and is protected by PSP.
	 */
	r = amdgpu_bo_create_kernel_at(adev,
				       adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE,
				       DISCOVERY_TMR_SIZE,
				       AMDGPU_GEM_DOMAIN_VRAM,
				       &adev->discovery_memory,
				       NULL);
	if (r)
		return r;

A
Alex Deucher 已提交
1875
	DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1876
		 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1877

1878 1879
	/* Compute GTT size, either bsaed on 3/4th the size of RAM size
	 * or whatever the user passed on module init */
1880 1881 1882 1883
	if (amdgpu_gtt_size == -1) {
		struct sysinfo si;

		si_meminfo(&si);
1884
		gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1885
			       adev->gmc.mc_vram_size),
1886 1887 1888
			       ((uint64_t)si.totalram * si.mem_unit * 3/4));
	}
	else
1889
		gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1890 1891

	/* Initialize GTT memory pool */
1892
	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
A
Alex Deucher 已提交
1893 1894 1895 1896 1897
	if (r) {
		DRM_ERROR("Failed initializing GTT heap.\n");
		return r;
	}
	DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1898
		 (unsigned)(gtt_size / (1024 * 1024)));
A
Alex Deucher 已提交
1899

1900
	/* Initialize various on-chip memory pools */
1901
	r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
1902
			   adev->gds.gds_size);
1903 1904 1905
	if (r) {
		DRM_ERROR("Failed initializing GDS heap.\n");
		return r;
A
Alex Deucher 已提交
1906 1907
	}

1908
	r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
1909
			   adev->gds.gws_size);
1910 1911 1912
	if (r) {
		DRM_ERROR("Failed initializing gws heap.\n");
		return r;
A
Alex Deucher 已提交
1913 1914
	}

1915
	r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
1916
			   adev->gds.oa_size);
1917 1918 1919
	if (r) {
		DRM_ERROR("Failed initializing oa heap.\n");
		return r;
A
Alex Deucher 已提交
1920 1921
	}

1922
	/* Register debugfs entries for amdgpu_ttm */
A
Alex Deucher 已提交
1923 1924 1925 1926 1927 1928 1929 1930
	r = amdgpu_ttm_debugfs_init(adev);
	if (r) {
		DRM_ERROR("Failed to init debugfs\n");
		return r;
	}
	return 0;
}

1931
/**
1932
 * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm
1933
 */
1934 1935
void amdgpu_ttm_late_init(struct amdgpu_device *adev)
{
1936
	void *stolen_vga_buf;
1937
	/* return the VGA stolen memory (if any) back to VRAM */
1938
	amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
1939 1940
}

1941 1942 1943
/**
 * amdgpu_ttm_fini - De-initialize the TTM memory pools
 */
A
Alex Deucher 已提交
1944 1945 1946 1947
void amdgpu_ttm_fini(struct amdgpu_device *adev)
{
	if (!adev->mman.initialized)
		return;
1948

A
Alex Deucher 已提交
1949
	amdgpu_ttm_debugfs_fini(adev);
1950
	amdgpu_ttm_training_reserve_vram_fini(adev);
1951 1952
	/* return the IP Discovery TMR memory back to VRAM */
	amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
1953
	amdgpu_ttm_fw_reserve_vram_fini(adev);
1954

1955 1956 1957
	if (adev->mman.aper_base_kaddr)
		iounmap(adev->mman.aper_base_kaddr);
	adev->mman.aper_base_kaddr = NULL;
1958

A
Alex Deucher 已提交
1959 1960
	ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
	ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
1961 1962 1963
	ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
	ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
	ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
A
Alex Deucher 已提交
1964 1965 1966 1967 1968
	ttm_bo_device_release(&adev->mman.bdev);
	adev->mman.initialized = false;
	DRM_INFO("amdgpu: ttm finalized\n");
}

1969 1970 1971 1972 1973 1974 1975 1976 1977 1978
/**
 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
 *
 * @adev: amdgpu_device pointer
 * @enable: true when we can use buffer functions.
 *
 * Enable/disable use of buffer functions during suspend/resume. This should
 * only be called at bootup or when userspace isn't running.
 */
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
A
Alex Deucher 已提交
1979
{
1980 1981
	struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
	uint64_t size;
1982
	int r;
A
Alex Deucher 已提交
1983

1984 1985
	if (!adev->mman.initialized || adev->in_gpu_reset ||
	    adev->mman.buffer_funcs_enabled == enable)
A
Alex Deucher 已提交
1986 1987
		return;

1988 1989
	if (enable) {
		struct amdgpu_ring *ring;
N
Nirmoy Das 已提交
1990
		struct drm_gpu_scheduler *sched;
1991 1992

		ring = adev->mman.buffer_funcs_ring;
N
Nirmoy Das 已提交
1993 1994 1995 1996
		sched = &ring->sched;
		r = drm_sched_entity_init(&adev->mman.entity,
				          DRM_SCHED_PRIORITY_KERNEL, &sched,
					  1, NULL);
1997 1998 1999 2000 2001 2002
		if (r) {
			DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
				  r);
			return;
		}
	} else {
2003
		drm_sched_entity_destroy(&adev->mman.entity);
2004 2005
		dma_fence_put(man->move);
		man->move = NULL;
2006 2007
	}

A
Alex Deucher 已提交
2008
	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
2009 2010 2011 2012
	if (enable)
		size = adev->gmc.real_vram_size;
	else
		size = adev->gmc.visible_vram_size;
A
Alex Deucher 已提交
2013
	man->size = size >> PAGE_SHIFT;
2014
	adev->mman.buffer_funcs_enabled = enable;
A
Alex Deucher 已提交
2015 2016 2017 2018
}

int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
{
2019 2020
	struct drm_file *file_priv = filp->private_data;
	struct amdgpu_device *adev = file_priv->minor->dev->dev_private;
A
Alex Deucher 已提交
2021

C
Christian König 已提交
2022
	if (adev == NULL)
A
Alex Deucher 已提交
2023
		return -EINVAL;
C
Christian König 已提交
2024 2025

	return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
A
Alex Deucher 已提交
2026 2027
}

2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047
static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
			     struct ttm_mem_reg *mem, unsigned num_pages,
			     uint64_t offset, unsigned window,
			     struct amdgpu_ring *ring,
			     uint64_t *addr)
{
	struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
	struct amdgpu_device *adev = ring->adev;
	struct ttm_tt *ttm = bo->ttm;
	struct amdgpu_job *job;
	unsigned num_dw, num_bytes;
	dma_addr_t *dma_address;
	struct dma_fence *fence;
	uint64_t src_addr, dst_addr;
	uint64_t flags;
	int r;

	BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
	       AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);

2048
	*addr = adev->gmc.gart_start;
2049 2050 2051
	*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
		AMDGPU_GPU_PAGE_SIZE;

L
Luben Tuikov 已提交
2052
	num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
2053 2054 2055 2056 2057 2058 2059 2060 2061
	num_bytes = num_pages * 8;

	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
	if (r)
		return r;

	src_addr = num_dw * 4;
	src_addr += job->ibs[0].gpu_addr;

2062
	dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076
	dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
				dst_addr, num_bytes);

	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
	WARN_ON(job->ibs[0].length_dw > num_dw);

	dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
	flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
	r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
			    &job->ibs[0].ptr[num_dw]);
	if (r)
		goto error_free;

2077
	r = amdgpu_job_submit(job, &adev->mman.entity,
2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090
			      AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
	if (r)
		goto error_free;

	dma_fence_put(fence);

	return r;

error_free:
	amdgpu_job_free(job);
	return r;
}

2091 2092
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
		       uint64_t dst_offset, uint32_t byte_count,
2093
		       struct dma_resv *resv,
2094 2095
		       struct dma_fence **fence, bool direct_submit,
		       bool vm_needs_flush)
A
Alex Deucher 已提交
2096 2097
{
	struct amdgpu_device *adev = ring->adev;
2098 2099
	struct amdgpu_job *job;

A
Alex Deucher 已提交
2100 2101 2102 2103 2104
	uint32_t max_bytes;
	unsigned num_loops, num_dw;
	unsigned i;
	int r;

2105
	if (direct_submit && !ring->sched.ready) {
2106 2107 2108 2109
		DRM_ERROR("Trying to move memory with ring turned off.\n");
		return -EINVAL;
	}

A
Alex Deucher 已提交
2110 2111
	max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
	num_loops = DIV_ROUND_UP(byte_count, max_bytes);
L
Luben Tuikov 已提交
2112
	num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
2113

2114 2115
	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
	if (r)
2116
		return r;
2117

2118
	if (vm_needs_flush) {
2119
		job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
2120 2121
		job->vm_needs_flush = true;
	}
2122
	if (resv) {
2123
		r = amdgpu_sync_resv(adev, &job->sync, resv,
2124 2125
				     AMDGPU_FENCE_OWNER_UNDEFINED,
				     false);
2126 2127 2128 2129
		if (r) {
			DRM_ERROR("sync failed (%d).\n", r);
			goto error_free;
		}
A
Alex Deucher 已提交
2130 2131 2132 2133 2134
	}

	for (i = 0; i < num_loops; i++) {
		uint32_t cur_size_in_bytes = min(byte_count, max_bytes);

2135 2136
		amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
					dst_offset, cur_size_in_bytes);
A
Alex Deucher 已提交
2137 2138 2139 2140 2141 2142

		src_offset += cur_size_in_bytes;
		dst_offset += cur_size_in_bytes;
		byte_count -= cur_size_in_bytes;
	}

2143 2144
	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
	WARN_ON(job->ibs[0].length_dw > num_dw);
2145 2146 2147
	if (direct_submit)
		r = amdgpu_job_submit_direct(job, ring, fence);
	else
2148
		r = amdgpu_job_submit(job, &adev->mman.entity,
2149
				      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2150 2151
	if (r)
		goto error_free;
A
Alex Deucher 已提交
2152

2153
	return r;
2154

2155
error_free:
2156
	amdgpu_job_free(job);
2157
	DRM_ERROR("Error scheduling IBs (%d)\n", r);
2158
	return r;
A
Alex Deucher 已提交
2159 2160
}

2161
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
2162
		       uint32_t src_data,
2163
		       struct dma_resv *resv,
2164
		       struct dma_fence **fence)
2165
{
2166
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2167
	uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
2168 2169
	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;

2170 2171
	struct drm_mm_node *mm_node;
	unsigned long num_pages;
2172
	unsigned int num_loops, num_dw;
2173 2174

	struct amdgpu_job *job;
2175 2176
	int r;

2177
	if (!adev->mman.buffer_funcs_enabled) {
2178 2179 2180 2181
		DRM_ERROR("Trying to clear memory with ring turned off.\n");
		return -EINVAL;
	}

2182
	if (bo->tbo.mem.mem_type == TTM_PL_TT) {
2183
		r = amdgpu_ttm_alloc_gart(&bo->tbo);
2184 2185 2186 2187
		if (r)
			return r;
	}

2188 2189 2190 2191
	num_pages = bo->tbo.num_pages;
	mm_node = bo->tbo.mem.mm_node;
	num_loops = 0;
	while (num_pages) {
2192
		uint64_t byte_count = mm_node->size << PAGE_SHIFT;
2193

2194
		num_loops += DIV_ROUND_UP_ULL(byte_count, max_bytes);
2195 2196 2197
		num_pages -= mm_node->size;
		++mm_node;
	}
2198
	num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
2199 2200

	/* for IB padding */
2201
	num_dw += 64;
2202 2203 2204 2205 2206 2207 2208

	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
	if (r)
		return r;

	if (resv) {
		r = amdgpu_sync_resv(adev, &job->sync, resv,
2209
				     AMDGPU_FENCE_OWNER_UNDEFINED, false);
2210 2211 2212 2213 2214 2215
		if (r) {
			DRM_ERROR("sync failed (%d).\n", r);
			goto error_free;
		}
	}

2216 2217
	num_pages = bo->tbo.num_pages;
	mm_node = bo->tbo.mem.mm_node;
2218

2219
	while (num_pages) {
2220
		uint64_t byte_count = mm_node->size << PAGE_SHIFT;
2221
		uint64_t dst_addr;
2222

2223
		dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
2224
		while (byte_count) {
2225 2226
			uint32_t cur_size_in_bytes = min_t(uint64_t, byte_count,
							   max_bytes);
2227

2228 2229
			amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
						dst_addr, cur_size_in_bytes);
2230 2231 2232 2233 2234 2235 2236

			dst_addr += cur_size_in_bytes;
			byte_count -= cur_size_in_bytes;
		}

		num_pages -= mm_node->size;
		++mm_node;
2237 2238 2239 2240
	}

	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
	WARN_ON(job->ibs[0].length_dw > num_dw);
2241
	r = amdgpu_job_submit(job, &adev->mman.entity,
2242
			      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2243 2244 2245 2246 2247 2248 2249 2250 2251 2252
	if (r)
		goto error_free;

	return 0;

error_free:
	amdgpu_job_free(job);
	return r;
}

A
Alex Deucher 已提交
2253 2254 2255 2256 2257
#if defined(CONFIG_DEBUG_FS)

static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *)m->private;
2258
	unsigned ttm_pl = (uintptr_t)node->info_ent->data;
A
Alex Deucher 已提交
2259 2260
	struct drm_device *dev = node->minor->dev;
	struct amdgpu_device *adev = dev->dev_private;
2261
	struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
D
Daniel Vetter 已提交
2262
	struct drm_printer p = drm_seq_file_printer(m);
A
Alex Deucher 已提交
2263

2264
	man->func->debug(man, &p);
D
Daniel Vetter 已提交
2265
	return 0;
A
Alex Deucher 已提交
2266 2267
}

2268
static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
2269 2270 2271 2272 2273
	{"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM},
	{"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT},
	{"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS},
	{"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS},
	{"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA},
A
Alex Deucher 已提交
2274 2275 2276 2277 2278 2279
	{"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
#ifdef CONFIG_SWIOTLB
	{"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
#endif
};

2280 2281 2282 2283 2284
/**
 * amdgpu_ttm_vram_read - Linear read access to VRAM
 *
 * Accesses VRAM via MMIO for debugging purposes.
 */
A
Alex Deucher 已提交
2285 2286 2287
static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
				    size_t size, loff_t *pos)
{
A
Al Viro 已提交
2288
	struct amdgpu_device *adev = file_inode(f)->i_private;
A
Alex Deucher 已提交
2289 2290 2291 2292 2293 2294
	ssize_t result = 0;
	int r;

	if (size & 0x3 || *pos & 0x3)
		return -EINVAL;

2295
	if (*pos >= adev->gmc.mc_vram_size)
2296 2297
		return -ENXIO;

A
Alex Deucher 已提交
2298 2299 2300 2301
	while (size) {
		unsigned long flags;
		uint32_t value;

2302
		if (*pos >= adev->gmc.mc_vram_size)
A
Alex Deucher 已提交
2303 2304 2305
			return result;

		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
2306 2307 2308
		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
		WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
		value = RREG32_NO_KIQ(mmMM_DATA);
A
Alex Deucher 已提交
2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323
		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);

		r = put_user(value, (uint32_t *)buf);
		if (r)
			return r;

		result += 4;
		buf += 4;
		*pos += 4;
		size -= 4;
	}

	return result;
}

2324 2325 2326 2327 2328
/**
 * amdgpu_ttm_vram_write - Linear write access to VRAM
 *
 * Accesses VRAM via MMIO for debugging purposes.
 */
2329 2330 2331 2332 2333 2334 2335 2336 2337 2338
static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
				    size_t size, loff_t *pos)
{
	struct amdgpu_device *adev = file_inode(f)->i_private;
	ssize_t result = 0;
	int r;

	if (size & 0x3 || *pos & 0x3)
		return -EINVAL;

2339
	if (*pos >= adev->gmc.mc_vram_size)
2340 2341 2342 2343 2344 2345
		return -ENXIO;

	while (size) {
		unsigned long flags;
		uint32_t value;

2346
		if (*pos >= adev->gmc.mc_vram_size)
2347 2348 2349 2350 2351 2352 2353
			return result;

		r = get_user(value, (uint32_t *)buf);
		if (r)
			return r;

		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
2354 2355 2356
		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
		WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
		WREG32_NO_KIQ(mmMM_DATA, value);
2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367
		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);

		result += 4;
		buf += 4;
		*pos += 4;
		size -= 4;
	}

	return result;
}

A
Alex Deucher 已提交
2368 2369 2370
static const struct file_operations amdgpu_ttm_vram_fops = {
	.owner = THIS_MODULE,
	.read = amdgpu_ttm_vram_read,
2371 2372
	.write = amdgpu_ttm_vram_write,
	.llseek = default_llseek,
A
Alex Deucher 已提交
2373 2374
};

2375 2376
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS

2377 2378 2379
/**
 * amdgpu_ttm_gtt_read - Linear read access to GTT memory
 */
A
Alex Deucher 已提交
2380 2381 2382
static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
				   size_t size, loff_t *pos)
{
A
Al Viro 已提交
2383
	struct amdgpu_device *adev = file_inode(f)->i_private;
A
Alex Deucher 已提交
2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426
	ssize_t result = 0;
	int r;

	while (size) {
		loff_t p = *pos / PAGE_SIZE;
		unsigned off = *pos & ~PAGE_MASK;
		size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
		struct page *page;
		void *ptr;

		if (p >= adev->gart.num_cpu_pages)
			return result;

		page = adev->gart.pages[p];
		if (page) {
			ptr = kmap(page);
			ptr += off;

			r = copy_to_user(buf, ptr, cur_size);
			kunmap(adev->gart.pages[p]);
		} else
			r = clear_user(buf, cur_size);

		if (r)
			return -EFAULT;

		result += cur_size;
		buf += cur_size;
		*pos += cur_size;
		size -= cur_size;
	}

	return result;
}

static const struct file_operations amdgpu_ttm_gtt_fops = {
	.owner = THIS_MODULE,
	.read = amdgpu_ttm_gtt_read,
	.llseek = default_llseek
};

#endif

2427 2428 2429 2430 2431 2432 2433
/**
 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
 *
 * This function is used to read memory that has been mapped to the
 * GPU and the known addresses are not physical addresses but instead
 * bus addresses (e.g., what you'd put in an IB or ring buffer).
 */
2434 2435
static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
				 size_t size, loff_t *pos)
2436 2437 2438
{
	struct amdgpu_device *adev = file_inode(f)->i_private;
	struct iommu_domain *dom;
2439 2440
	ssize_t result = 0;
	int r;
2441

2442
	/* retrieve the IOMMU domain if any for this device */
2443
	dom = iommu_get_domain_for_dev(adev->dev);
2444

2445 2446 2447 2448 2449 2450 2451 2452 2453 2454
	while (size) {
		phys_addr_t addr = *pos & PAGE_MASK;
		loff_t off = *pos & ~PAGE_MASK;
		size_t bytes = PAGE_SIZE - off;
		unsigned long pfn;
		struct page *p;
		void *ptr;

		bytes = bytes < size ? bytes : size;

2455 2456 2457 2458
		/* Translate the bus address to a physical address.  If
		 * the domain is NULL it means there is no IOMMU active
		 * and the address translation is the identity
		 */
2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469
		addr = dom ? iommu_iova_to_phys(dom, addr) : addr;

		pfn = addr >> PAGE_SHIFT;
		if (!pfn_valid(pfn))
			return -EPERM;

		p = pfn_to_page(pfn);
		if (p->mapping != adev->mman.bdev.dev_mapping)
			return -EPERM;

		ptr = kmap(p);
2470
		r = copy_to_user(buf, ptr + off, bytes);
2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482
		kunmap(p);
		if (r)
			return -EFAULT;

		size -= bytes;
		*pos += bytes;
		result += bytes;
	}

	return result;
}

2483 2484 2485 2486 2487 2488 2489
/**
 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
 *
 * This function is used to write memory that has been mapped to the
 * GPU and the known addresses are not physical addresses but instead
 * bus addresses (e.g., what you'd put in an IB or ring buffer).
 */
2490 2491 2492 2493 2494 2495 2496
static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
				 size_t size, loff_t *pos)
{
	struct amdgpu_device *adev = file_inode(f)->i_private;
	struct iommu_domain *dom;
	ssize_t result = 0;
	int r;
2497 2498

	dom = iommu_get_domain_for_dev(adev->dev);
2499

2500 2501 2502 2503 2504 2505 2506 2507 2508
	while (size) {
		phys_addr_t addr = *pos & PAGE_MASK;
		loff_t off = *pos & ~PAGE_MASK;
		size_t bytes = PAGE_SIZE - off;
		unsigned long pfn;
		struct page *p;
		void *ptr;

		bytes = bytes < size ? bytes : size;
2509

2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520
		addr = dom ? iommu_iova_to_phys(dom, addr) : addr;

		pfn = addr >> PAGE_SHIFT;
		if (!pfn_valid(pfn))
			return -EPERM;

		p = pfn_to_page(pfn);
		if (p->mapping != adev->mman.bdev.dev_mapping)
			return -EPERM;

		ptr = kmap(p);
2521
		r = copy_from_user(ptr + off, buf, bytes);
2522 2523 2524 2525 2526 2527 2528 2529 2530 2531
		kunmap(p);
		if (r)
			return -EFAULT;

		size -= bytes;
		*pos += bytes;
		result += bytes;
	}

	return result;
2532 2533
}

2534
static const struct file_operations amdgpu_ttm_iomem_fops = {
2535
	.owner = THIS_MODULE,
2536 2537
	.read = amdgpu_iomem_read,
	.write = amdgpu_iomem_write,
2538 2539
	.llseek = default_llseek
};
2540 2541 2542 2543 2544 2545 2546 2547 2548 2549

static const struct {
	char *name;
	const struct file_operations *fops;
	int domain;
} ttm_debugfs_entries[] = {
	{ "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
	{ "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
#endif
2550
	{ "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
2551 2552
};

2553 2554
#endif

A
Alex Deucher 已提交
2555 2556 2557 2558 2559 2560 2561 2562
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
	unsigned count;

	struct drm_minor *minor = adev->ddev->primary;
	struct dentry *ent, *root = minor->debugfs_root;

2563 2564 2565 2566 2567 2568 2569 2570 2571
	for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
		ent = debugfs_create_file(
				ttm_debugfs_entries[count].name,
				S_IFREG | S_IRUGO, root,
				adev,
				ttm_debugfs_entries[count].fops);
		if (IS_ERR(ent))
			return PTR_ERR(ent);
		if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
2572
			i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
2573
		else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
2574
			i_size_write(ent->d_inode, adev->gmc.gart_size);
2575 2576
		adev->mman.debugfs_entries[count] = ent;
	}
A
Alex Deucher 已提交
2577 2578 2579 2580

	count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);

#ifdef CONFIG_SWIOTLB
2581
	if (!(adev->need_swiotlb && swiotlb_nr_tbl()))
A
Alex Deucher 已提交
2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593
		--count;
#endif

	return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
#else
	return 0;
#endif
}

static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
2594
	unsigned i;
A
Alex Deucher 已提交
2595

2596 2597
	for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
		debugfs_remove(adev->mman.debugfs_entries[i]);
2598
#endif
A
Alex Deucher 已提交
2599
}