amdgpu_gem.c 21.7 KB
Newer Older
A
Alex Deucher 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
#include <linux/ktime.h>
29
#include <linux/pagemap.h>
A
Alex Deucher 已提交
30 31 32 33 34 35 36 37 38 39 40
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"

void amdgpu_gem_object_free(struct drm_gem_object *gobj)
{
	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);

	if (robj) {
		if (robj->gem_base.import_attach)
			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
41
		amdgpu_mn_unregister(robj);
A
Alex Deucher 已提交
42 43 44 45 46
		amdgpu_bo_unref(&robj);
	}
}

int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
47 48 49 50
			     int alignment, u32 initial_domain,
			     u64 flags, bool kernel,
			     struct reservation_object *resv,
			     struct drm_gem_object **obj)
A
Alex Deucher 已提交
51
{
52
	struct amdgpu_bo *bo;
A
Alex Deucher 已提交
53 54 55 56 57 58 59 60 61
	int r;

	*obj = NULL;
	/* At least align on page size */
	if (alignment < PAGE_SIZE) {
		alignment = PAGE_SIZE;
	}

retry:
62
	r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
63
			     flags, NULL, resv, 0, &bo);
A
Alex Deucher 已提交
64 65
	if (r) {
		if (r != -ERESTARTSYS) {
66 67 68 69 70
			if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
				flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
				goto retry;
			}

A
Alex Deucher 已提交
71 72 73 74 75 76 77 78 79
			if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
				initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
				goto retry;
			}
			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
				  size, initial_domain, alignment, r);
		}
		return r;
	}
80
	*obj = &bo->gem_base;
A
Alex Deucher 已提交
81 82 83 84

	return 0;
}

85
void amdgpu_gem_force_release(struct amdgpu_device *adev)
A
Alex Deucher 已提交
86
{
87 88
	struct drm_device *ddev = adev->ddev;
	struct drm_file *file;
A
Alex Deucher 已提交
89

90
	mutex_lock(&ddev->filelist_mutex);
91 92 93 94 95 96 97 98 99

	list_for_each_entry(file, &ddev->filelist, lhead) {
		struct drm_gem_object *gobj;
		int handle;

		WARN_ONCE(1, "Still active user space clients!\n");
		spin_lock(&file->table_lock);
		idr_for_each_entry(&file->object_idr, gobj, handle) {
			WARN_ONCE(1, "And also active allocations!\n");
100
			drm_gem_object_put_unlocked(gobj);
101 102 103 104 105
		}
		idr_destroy(&file->object_idr);
		spin_unlock(&file->table_lock);
	}

106
	mutex_unlock(&ddev->filelist_mutex);
A
Alex Deucher 已提交
107 108 109 110 111 112
}

/*
 * Call from drm_gem_handle_create which appear in both new and open ioctl
 * case.
 */
113 114
int amdgpu_gem_object_open(struct drm_gem_object *obj,
			   struct drm_file *file_priv)
A
Alex Deucher 已提交
115
{
116
	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
117
	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
A
Alex Deucher 已提交
118 119 120
	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
	struct amdgpu_vm *vm = &fpriv->vm;
	struct amdgpu_bo_va *bo_va;
121
	struct mm_struct *mm;
A
Alex Deucher 已提交
122
	int r;
123 124 125 126 127

	mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
	if (mm && mm != current->mm)
		return -EPERM;

128 129 130 131
	if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
	    abo->tbo.resv != vm->root.base.bo->tbo.resv)
		return -EPERM;

132
	r = amdgpu_bo_reserve(abo, false);
C
Chunming Zhou 已提交
133
	if (r)
A
Alex Deucher 已提交
134 135
		return r;

136
	bo_va = amdgpu_vm_bo_find(vm, abo);
A
Alex Deucher 已提交
137
	if (!bo_va) {
138
		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
A
Alex Deucher 已提交
139 140 141
	} else {
		++bo_va->ref_count;
	}
142
	amdgpu_bo_unreserve(abo);
A
Alex Deucher 已提交
143 144 145 146 147 148
	return 0;
}

void amdgpu_gem_object_close(struct drm_gem_object *obj,
			     struct drm_file *file_priv)
{
149
	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
150
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
A
Alex Deucher 已提交
151 152
	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
	struct amdgpu_vm *vm = &fpriv->vm;
153 154

	struct amdgpu_bo_list_entry vm_pd;
155
	struct list_head list, duplicates;
156 157
	struct ttm_validate_buffer tv;
	struct ww_acquire_ctx ticket;
A
Alex Deucher 已提交
158 159
	struct amdgpu_bo_va *bo_va;
	int r;
160 161

	INIT_LIST_HEAD(&list);
162
	INIT_LIST_HEAD(&duplicates);
163 164 165 166 167 168 169

	tv.bo = &bo->tbo;
	tv.shared = true;
	list_add(&tv.head, &list);

	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);

170
	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
A
Alex Deucher 已提交
171 172 173 174 175
	if (r) {
		dev_err(adev->dev, "leaking bo va because "
			"we fail to reserve bo (%d)\n", r);
		return;
	}
176
	bo_va = amdgpu_vm_bo_find(vm, bo);
177 178 179
	if (bo_va && --bo_va->ref_count == 0) {
		amdgpu_vm_bo_rmv(adev, bo_va);

180
		if (amdgpu_vm_ready(vm)) {
181
			struct dma_fence *fence = NULL;
182 183 184 185 186 187 188 189 190 191 192

			r = amdgpu_vm_clear_freed(adev, vm, &fence);
			if (unlikely(r)) {
				dev_err(adev->dev, "failed to clear page "
					"tables on GEM object close (%d)\n", r);
			}

			if (fence) {
				amdgpu_bo_fence(bo, fence, true);
				dma_fence_put(fence);
			}
A
Alex Deucher 已提交
193 194
		}
	}
195
	ttm_eu_backoff_reservation(&ticket, &list);
A
Alex Deucher 已提交
196 197 198 199 200 201 202 203 204
}

/*
 * GEM ioctls.
 */
int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
			    struct drm_file *filp)
{
	struct amdgpu_device *adev = dev->dev_private;
205 206
	struct amdgpu_fpriv *fpriv = filp->driver_priv;
	struct amdgpu_vm *vm = &fpriv->vm;
A
Alex Deucher 已提交
207
	union drm_amdgpu_gem_create *args = data;
208
	uint64_t flags = args->in.domain_flags;
A
Alex Deucher 已提交
209
	uint64_t size = args->in.bo_size;
210
	struct reservation_object *resv = NULL;
A
Alex Deucher 已提交
211 212 213 214
	struct drm_gem_object *gobj;
	uint32_t handle;
	int r;

215
	/* reject invalid gem flags */
216 217 218
	if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
		      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
		      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
219
		      AMDGPU_GEM_CREATE_VRAM_CLEARED |
220 221 222
		      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
		      AMDGPU_GEM_CREATE_EXPLICIT_SYNC))

223 224
		return -EINVAL;

225 226 227 228 229 230
	/* reject invalid gem domains */
	if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
				 AMDGPU_GEM_DOMAIN_GTT |
				 AMDGPU_GEM_DOMAIN_VRAM |
				 AMDGPU_GEM_DOMAIN_GDS |
				 AMDGPU_GEM_DOMAIN_GWS |
231 232
				 AMDGPU_GEM_DOMAIN_OA))
		return -EINVAL;
233

A
Alex Deucher 已提交
234 235 236
	/* create a gem object to contain this object in */
	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
237
		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
A
Alex Deucher 已提交
238 239 240 241 242 243
		if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
			size = size << AMDGPU_GDS_SHIFT;
		else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
			size = size << AMDGPU_GWS_SHIFT;
		else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
			size = size << AMDGPU_OA_SHIFT;
244 245
		else
			return -EINVAL;
A
Alex Deucher 已提交
246 247 248
	}
	size = roundup(size, PAGE_SIZE);

249 250 251 252 253 254 255 256
	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
		r = amdgpu_bo_reserve(vm->root.base.bo, false);
		if (r)
			return r;

		resv = vm->root.base.bo->tbo.resv;
	}

A
Alex Deucher 已提交
257 258
	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
				     (u32)(0xffffffff & args->in.domains),
259 260 261 262 263 264 265 266 267
				     flags, false, resv, &gobj);
	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
		if (!r) {
			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);

			abo->parent = amdgpu_bo_ref(vm->root.base.bo);
		}
		amdgpu_bo_unreserve(vm->root.base.bo);
	}
A
Alex Deucher 已提交
268
	if (r)
269
		return r;
A
Alex Deucher 已提交
270 271 272

	r = drm_gem_handle_create(filp, gobj, &handle);
	/* drop reference from allocate - handle holds it now */
273
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
274
	if (r)
275
		return r;
A
Alex Deucher 已提交
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300

	memset(args, 0, sizeof(*args));
	args->out.handle = handle;
	return 0;
}

int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
			     struct drm_file *filp)
{
	struct amdgpu_device *adev = dev->dev_private;
	struct drm_amdgpu_gem_userptr *args = data;
	struct drm_gem_object *gobj;
	struct amdgpu_bo *bo;
	uint32_t handle;
	int r;

	if (offset_in_page(args->addr | args->size))
		return -EINVAL;

	/* reject unknown flag values */
	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
	    AMDGPU_GEM_USERPTR_REGISTER))
		return -EINVAL;

301 302
	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
A
Alex Deucher 已提交
303

304
		/* if we want to write to it we must install a MMU notifier */
A
Alex Deucher 已提交
305 306 307 308
		return -EACCES;
	}

	/* create a gem object to contain this object in */
309 310
	r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
				     0, 0, NULL, &gobj);
A
Alex Deucher 已提交
311
	if (r)
312
		return r;
A
Alex Deucher 已提交
313 314

	bo = gem_to_amdgpu_bo(gobj);
K
Kent Russell 已提交
315
	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
316
	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
A
Alex Deucher 已提交
317 318 319 320 321 322 323 324 325 326 327
	r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
	if (r)
		goto release_object;

	if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
		r = amdgpu_mn_register(bo, args->addr);
		if (r)
			goto release_object;
	}

	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
328 329 330
		r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
						 bo->tbo.ttm->pages);
		if (r)
331
			goto release_object;
332

A
Alex Deucher 已提交
333
		r = amdgpu_bo_reserve(bo, true);
334 335
		if (r)
			goto free_pages;
A
Alex Deucher 已提交
336 337 338 339 340

		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
		amdgpu_bo_unreserve(bo);
		if (r)
341
			goto free_pages;
A
Alex Deucher 已提交
342 343 344 345
	}

	r = drm_gem_handle_create(filp, gobj, &handle);
	/* drop reference from allocate - handle holds it now */
346
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
347
	if (r)
348
		return r;
A
Alex Deucher 已提交
349 350 351 352

	args->handle = handle;
	return 0;

353
free_pages:
354
	release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages);
355

A
Alex Deucher 已提交
356
release_object:
357
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
358 359 360 361 362 363 364 365 366 367 368

	return r;
}

int amdgpu_mode_dumb_mmap(struct drm_file *filp,
			  struct drm_device *dev,
			  uint32_t handle, uint64_t *offset_p)
{
	struct drm_gem_object *gobj;
	struct amdgpu_bo *robj;

369
	gobj = drm_gem_object_lookup(filp, handle);
A
Alex Deucher 已提交
370 371 372 373
	if (gobj == NULL) {
		return -ENOENT;
	}
	robj = gem_to_amdgpu_bo(gobj);
374
	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
375
	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
376
		drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
377 378 379
		return -EPERM;
	}
	*offset_p = amdgpu_bo_mmap_offset(robj);
380
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
	return 0;
}

int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *filp)
{
	union drm_amdgpu_gem_mmap *args = data;
	uint32_t handle = args->in.handle;
	memset(args, 0, sizeof(*args));
	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
}

/**
 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
 *
 * @timeout_ns: timeout in ns
 *
 * Calculate the timeout in jiffies from an absolute timeout in ns.
 */
unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
{
	unsigned long timeout_jiffies;
	ktime_t timeout;

	/* clamp timeout if it's to large */
	if (((int64_t)timeout_ns) < 0)
		return MAX_SCHEDULE_TIMEOUT;

409
	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
A
Alex Deucher 已提交
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
	if (ktime_to_ns(timeout) < 0)
		return 0;

	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
	/*  clamp timeout to avoid unsigned-> signed overflow */
	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
		return MAX_SCHEDULE_TIMEOUT - 1;

	return timeout_jiffies;
}

int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
			      struct drm_file *filp)
{
	union drm_amdgpu_gem_wait_idle *args = data;
	struct drm_gem_object *gobj;
	struct amdgpu_bo *robj;
	uint32_t handle = args->in.handle;
	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
	int r = 0;
	long ret;

432
	gobj = drm_gem_object_lookup(filp, handle);
A
Alex Deucher 已提交
433 434 435 436
	if (gobj == NULL) {
		return -ENOENT;
	}
	robj = gem_to_amdgpu_bo(gobj);
437 438
	ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
						  timeout);
A
Alex Deucher 已提交
439 440 441 442 443 444 445 446 447 448 449

	/* ret == 0 means not signaled,
	 * ret > 0 means signaled
	 * ret < 0 means interrupted before timeout
	 */
	if (ret >= 0) {
		memset(args, 0, sizeof(*args));
		args->out.status = (ret == 0);
	} else
		r = ret;

450
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
451 452 453 454 455 456 457 458 459 460 461 462
	return r;
}

int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
				struct drm_file *filp)
{
	struct drm_amdgpu_gem_metadata *args = data;
	struct drm_gem_object *gobj;
	struct amdgpu_bo *robj;
	int r = -1;

	DRM_DEBUG("%d \n", args->handle);
463
	gobj = drm_gem_object_lookup(filp, args->handle);
A
Alex Deucher 已提交
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
	if (gobj == NULL)
		return -ENOENT;
	robj = gem_to_amdgpu_bo(gobj);

	r = amdgpu_bo_reserve(robj, false);
	if (unlikely(r != 0))
		goto out;

	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
		r = amdgpu_bo_get_metadata(robj, args->data.data,
					   sizeof(args->data.data),
					   &args->data.data_size_bytes,
					   &args->data.flags);
	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
479 480 481 482
		if (args->data.data_size_bytes > sizeof(args->data.data)) {
			r = -EINVAL;
			goto unreserve;
		}
A
Alex Deucher 已提交
483 484 485 486 487 488 489
		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
		if (!r)
			r = amdgpu_bo_set_metadata(robj, args->data.data,
						   args->data.data_size_bytes,
						   args->data.flags);
	}

490
unreserve:
A
Alex Deucher 已提交
491 492
	amdgpu_bo_unreserve(robj);
out:
493
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
494 495 496 497 498 499 500
	return r;
}

/**
 * amdgpu_gem_va_update_vm -update the bo_va in its VM
 *
 * @adev: amdgpu_device pointer
501
 * @vm: vm to update
A
Alex Deucher 已提交
502
 * @bo_va: bo_va to update
503
 * @list: validation list
504
 * @operation: map, unmap or clear
A
Alex Deucher 已提交
505
 *
506
 * Update the bo_va directly after setting its address. Errors are not
A
Alex Deucher 已提交
507 508 509
 * vital here, so they are not reported back to userspace.
 */
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
510
				    struct amdgpu_vm *vm,
511
				    struct amdgpu_bo_va *bo_va,
512
				    struct list_head *list,
513
				    uint32_t operation)
A
Alex Deucher 已提交
514
{
515
	int r;
A
Alex Deucher 已提交
516

517 518
	if (!amdgpu_vm_ready(vm))
		return;
519

520
	r = amdgpu_vm_update_directories(adev, vm);
521
	if (r)
522
		goto error;
A
Alex Deucher 已提交
523

524
	r = amdgpu_vm_clear_freed(adev, vm, NULL);
A
Alex Deucher 已提交
525
	if (r)
526
		goto error;
527

528 529
	if (operation == AMDGPU_VA_OP_MAP ||
	    operation == AMDGPU_VA_OP_REPLACE)
530
		r = amdgpu_vm_bo_update(adev, bo_va, false);
A
Alex Deucher 已提交
531

532
error:
533
	if (r && r != -ERESTARTSYS)
A
Alex Deucher 已提交
534 535 536 537 538 539
		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}

int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *filp)
{
540 541
	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
542
		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
543 544 545
	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
		AMDGPU_VM_PAGE_PRT;

C
Christian König 已提交
546
	struct drm_amdgpu_gem_va *args = data;
A
Alex Deucher 已提交
547 548 549
	struct drm_gem_object *gobj;
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_fpriv *fpriv = filp->driver_priv;
550
	struct amdgpu_bo *abo;
A
Alex Deucher 已提交
551
	struct amdgpu_bo_va *bo_va;
552 553
	struct amdgpu_bo_list_entry vm_pd;
	struct ttm_validate_buffer tv;
554
	struct ww_acquire_ctx ticket;
555
	struct list_head list, duplicates;
556
	uint64_t va_flags;
A
Alex Deucher 已提交
557 558
	int r = 0;

C
Christian König 已提交
559
	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
A
Alex Deucher 已提交
560
		dev_err(&dev->pdev->dev,
561 562
			"va_address 0x%LX is in reserved area 0x%LX\n",
			args->va_address, AMDGPU_VA_RESERVED_SIZE);
A
Alex Deucher 已提交
563 564 565
		return -EINVAL;
	}

566 567 568
	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
		dev_err(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
			args->flags);
A
Alex Deucher 已提交
569 570 571
		return -EINVAL;
	}

C
Christian König 已提交
572
	switch (args->operation) {
A
Alex Deucher 已提交
573 574
	case AMDGPU_VA_OP_MAP:
	case AMDGPU_VA_OP_UNMAP:
575
	case AMDGPU_VA_OP_CLEAR:
576
	case AMDGPU_VA_OP_REPLACE:
A
Alex Deucher 已提交
577 578 579
		break;
	default:
		dev_err(&dev->pdev->dev, "unsupported operation %d\n",
C
Christian König 已提交
580
			args->operation);
A
Alex Deucher 已提交
581 582 583
		return -EINVAL;
	}

584
	INIT_LIST_HEAD(&list);
585
	INIT_LIST_HEAD(&duplicates);
586 587
	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
588 589 590 591 592 593 594 595 596 597 598
		gobj = drm_gem_object_lookup(filp, args->handle);
		if (gobj == NULL)
			return -ENOENT;
		abo = gem_to_amdgpu_bo(gobj);
		tv.bo = &abo->tbo;
		tv.shared = false;
		list_add(&tv.head, &list);
	} else {
		gobj = NULL;
		abo = NULL;
	}
599

600
	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
601

602
	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
603 604
	if (r)
		goto error_unref;
C
Christian König 已提交
605

606 607 608 609 610 611
	if (abo) {
		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
		if (!bo_va) {
			r = -ENOENT;
			goto error_backoff;
		}
612
	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
613
		bo_va = fpriv->prt_va;
614 615
	} else {
		bo_va = NULL;
A
Alex Deucher 已提交
616 617
	}

C
Christian König 已提交
618
	switch (args->operation) {
A
Alex Deucher 已提交
619
	case AMDGPU_VA_OP_MAP:
620
		r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
621 622 623
					args->map_size);
		if (r)
			goto error_backoff;
624

625
		va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
C
Christian König 已提交
626 627
		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
				     args->offset_in_bo, args->map_size,
628
				     va_flags);
A
Alex Deucher 已提交
629 630
		break;
	case AMDGPU_VA_OP_UNMAP:
C
Christian König 已提交
631
		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
A
Alex Deucher 已提交
632
		break;
633 634 635 636 637 638

	case AMDGPU_VA_OP_CLEAR:
		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
						args->va_address,
						args->map_size);
		break;
639
	case AMDGPU_VA_OP_REPLACE:
640
		r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
641 642 643 644 645 646 647 648 649
					args->map_size);
		if (r)
			goto error_backoff;

		va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
					     args->offset_in_bo, args->map_size,
					     va_flags);
		break;
A
Alex Deucher 已提交
650 651 652
	default:
		break;
	}
653
	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
654 655
		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, &list,
					args->operation);
656 657

error_backoff:
658
	ttm_eu_backoff_reservation(&ticket, &list);
C
Chunming Zhou 已提交
659

660
error_unref:
661
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
662 663 664 665 666 667
	return r;
}

int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
			struct drm_file *filp)
{
668
	struct amdgpu_device *adev = dev->dev_private;
A
Alex Deucher 已提交
669 670 671 672 673
	struct drm_amdgpu_gem_op *args = data;
	struct drm_gem_object *gobj;
	struct amdgpu_bo *robj;
	int r;

674
	gobj = drm_gem_object_lookup(filp, args->handle);
A
Alex Deucher 已提交
675 676 677 678 679 680 681 682 683 684 685 686
	if (gobj == NULL) {
		return -ENOENT;
	}
	robj = gem_to_amdgpu_bo(gobj);

	r = amdgpu_bo_reserve(robj, false);
	if (unlikely(r))
		goto out;

	switch (args->op) {
	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
		struct drm_amdgpu_gem_create_in info;
687
		void __user *out = u64_to_user_ptr(args->value);
A
Alex Deucher 已提交
688 689 690

		info.bo_size = robj->gem_base.size;
		info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
K
Kent Russell 已提交
691
		info.domains = robj->preferred_domains;
A
Alex Deucher 已提交
692
		info.domain_flags = robj->flags;
693
		amdgpu_bo_unreserve(robj);
A
Alex Deucher 已提交
694 695 696 697
		if (copy_to_user(out, &info, sizeof(info)))
			r = -EFAULT;
		break;
	}
698
	case AMDGPU_GEM_OP_SET_PLACEMENT:
699 700 701 702 703
		if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
			r = -EINVAL;
			amdgpu_bo_unreserve(robj);
			break;
		}
704
		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
A
Alex Deucher 已提交
705
			r = -EPERM;
706
			amdgpu_bo_unreserve(robj);
A
Alex Deucher 已提交
707 708
			break;
		}
K
Kent Russell 已提交
709
		robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
710 711
							AMDGPU_GEM_DOMAIN_GTT |
							AMDGPU_GEM_DOMAIN_CPU);
K
Kent Russell 已提交
712
		robj->allowed_domains = robj->preferred_domains;
713 714 715
		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;

716 717 718
		if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
			amdgpu_vm_bo_invalidate(adev, robj, true);

719
		amdgpu_bo_unreserve(robj);
A
Alex Deucher 已提交
720 721
		break;
	default:
722
		amdgpu_bo_unreserve(robj);
A
Alex Deucher 已提交
723 724 725 726
		r = -EINVAL;
	}

out:
727
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
728 729 730 731 732 733 734 735 736 737 738 739
	return r;
}

int amdgpu_mode_dumb_create(struct drm_file *file_priv,
			    struct drm_device *dev,
			    struct drm_mode_create_dumb *args)
{
	struct amdgpu_device *adev = dev->dev_private;
	struct drm_gem_object *gobj;
	uint32_t handle;
	int r;

740 741
	args->pitch = amdgpu_align_pitch(adev, args->width,
					 DIV_ROUND_UP(args->bpp, 8), 0);
742
	args->size = (u64)args->pitch * args->height;
A
Alex Deucher 已提交
743 744 745 746
	args->size = ALIGN(args->size, PAGE_SIZE);

	r = amdgpu_gem_object_create(adev, args->size, 0,
				     AMDGPU_GEM_DOMAIN_VRAM,
747
				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
748
				     false, NULL, &gobj);
A
Alex Deucher 已提交
749 750 751 752 753
	if (r)
		return -ENOMEM;

	r = drm_gem_handle_create(file_priv, gobj, &handle);
	/* drop reference from allocate - handle holds it now */
754
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
755 756 757 758 759 760 761 762
	if (r) {
		return r;
	}
	args->handle = handle;
	return 0;
}

#if defined(CONFIG_DEBUG_FS)
763 764 765 766 767 768 769 770 771
static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
{
	struct drm_gem_object *gobj = ptr;
	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
	struct seq_file *m = data;

	unsigned domain;
	const char *placement;
	unsigned pin_count;
772
	uint64_t offset;
773 774 775 776 777 778 779 780 781 782 783 784 785 786

	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
	switch (domain) {
	case AMDGPU_GEM_DOMAIN_VRAM:
		placement = "VRAM";
		break;
	case AMDGPU_GEM_DOMAIN_GTT:
		placement = " GTT";
		break;
	case AMDGPU_GEM_DOMAIN_CPU:
	default:
		placement = " CPU";
		break;
	}
787 788 789
	seq_printf(m, "\t0x%08x: %12ld byte %s",
		   id, amdgpu_bo_size(bo), placement);

790
	offset = READ_ONCE(bo->tbo.mem.start);
791 792
	if (offset != AMDGPU_BO_INVALID_OFFSET)
		seq_printf(m, " @ 0x%010Lx", offset);
793

794
	pin_count = READ_ONCE(bo->pin_count);
795 796 797 798 799 800 801
	if (pin_count)
		seq_printf(m, " pin count %d", pin_count);
	seq_printf(m, "\n");

	return 0;
}

A
Alex Deucher 已提交
802 803 804 805
static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *)m->private;
	struct drm_device *dev = node->minor->dev;
806 807
	struct drm_file *file;
	int r;
A
Alex Deucher 已提交
808

809
	r = mutex_lock_interruptible(&dev->filelist_mutex);
810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830
	if (r)
		return r;

	list_for_each_entry(file, &dev->filelist, lhead) {
		struct task_struct *task;

		/*
		 * Although we have a valid reference on file->pid, that does
		 * not guarantee that the task_struct who called get_pid() is
		 * still alive (e.g. get_pid(current) => fork() => exit()).
		 * Therefore, we need to protect this ->comm access using RCU.
		 */
		rcu_read_lock();
		task = pid_task(file->pid, PIDTYPE_PID);
		seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
			   task ? task->comm : "<unknown>");
		rcu_read_unlock();

		spin_lock(&file->table_lock);
		idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
		spin_unlock(&file->table_lock);
A
Alex Deucher 已提交
831
	}
832

833
	mutex_unlock(&dev->filelist_mutex);
A
Alex Deucher 已提交
834 835 836
	return 0;
}

837
static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
A
Alex Deucher 已提交
838 839 840 841 842 843 844 845 846 847 848
	{"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
};
#endif

int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
#endif
	return 0;
}