amdgpu_gem.c 22.1 KB
Newer Older
A
Alex Deucher 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
#include <linux/ktime.h>
29
#include <linux/pagemap.h>
A
Alex Deucher 已提交
30 31 32 33 34 35 36 37 38 39 40
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"

void amdgpu_gem_object_free(struct drm_gem_object *gobj)
{
	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);

	if (robj) {
		if (robj->gem_base.import_attach)
			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
41
		amdgpu_mn_unregister(robj);
A
Alex Deucher 已提交
42 43 44 45 46
		amdgpu_bo_unref(&robj);
	}
}

int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
47 48 49 50
			     int alignment, u32 initial_domain,
			     u64 flags, bool kernel,
			     struct reservation_object *resv,
			     struct drm_gem_object **obj)
A
Alex Deucher 已提交
51
{
52
	struct amdgpu_bo *bo;
A
Alex Deucher 已提交
53 54 55 56 57 58 59 60 61
	int r;

	*obj = NULL;
	/* At least align on page size */
	if (alignment < PAGE_SIZE) {
		alignment = PAGE_SIZE;
	}

retry:
62
	r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
63
			     flags, NULL, resv, 0, &bo);
A
Alex Deucher 已提交
64 65
	if (r) {
		if (r != -ERESTARTSYS) {
66 67 68 69 70
			if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
				flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
				goto retry;
			}

A
Alex Deucher 已提交
71 72 73 74
			if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
				initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
				goto retry;
			}
75
			DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
A
Alex Deucher 已提交
76 77 78 79
				  size, initial_domain, alignment, r);
		}
		return r;
	}
80
	*obj = &bo->gem_base;
A
Alex Deucher 已提交
81 82 83 84

	return 0;
}

85
void amdgpu_gem_force_release(struct amdgpu_device *adev)
A
Alex Deucher 已提交
86
{
87 88
	struct drm_device *ddev = adev->ddev;
	struct drm_file *file;
A
Alex Deucher 已提交
89

90
	mutex_lock(&ddev->filelist_mutex);
91 92 93 94 95 96 97 98 99

	list_for_each_entry(file, &ddev->filelist, lhead) {
		struct drm_gem_object *gobj;
		int handle;

		WARN_ONCE(1, "Still active user space clients!\n");
		spin_lock(&file->table_lock);
		idr_for_each_entry(&file->object_idr, gobj, handle) {
			WARN_ONCE(1, "And also active allocations!\n");
100
			drm_gem_object_put_unlocked(gobj);
101 102 103 104 105
		}
		idr_destroy(&file->object_idr);
		spin_unlock(&file->table_lock);
	}

106
	mutex_unlock(&ddev->filelist_mutex);
A
Alex Deucher 已提交
107 108 109 110 111 112
}

/*
 * Call from drm_gem_handle_create which appear in both new and open ioctl
 * case.
 */
113 114
int amdgpu_gem_object_open(struct drm_gem_object *obj,
			   struct drm_file *file_priv)
A
Alex Deucher 已提交
115
{
116
	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
117
	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
A
Alex Deucher 已提交
118 119 120
	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
	struct amdgpu_vm *vm = &fpriv->vm;
	struct amdgpu_bo_va *bo_va;
121
	struct mm_struct *mm;
A
Alex Deucher 已提交
122
	int r;
123 124 125 126 127

	mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
	if (mm && mm != current->mm)
		return -EPERM;

128 129 130 131
	if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
	    abo->tbo.resv != vm->root.base.bo->tbo.resv)
		return -EPERM;

132
	r = amdgpu_bo_reserve(abo, false);
C
Chunming Zhou 已提交
133
	if (r)
A
Alex Deucher 已提交
134 135
		return r;

136
	bo_va = amdgpu_vm_bo_find(vm, abo);
A
Alex Deucher 已提交
137
	if (!bo_va) {
138
		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
A
Alex Deucher 已提交
139 140 141
	} else {
		++bo_va->ref_count;
	}
142
	amdgpu_bo_unreserve(abo);
A
Alex Deucher 已提交
143 144 145 146 147 148
	return 0;
}

void amdgpu_gem_object_close(struct drm_gem_object *obj,
			     struct drm_file *file_priv)
{
149
	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
150
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
A
Alex Deucher 已提交
151 152
	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
	struct amdgpu_vm *vm = &fpriv->vm;
153 154

	struct amdgpu_bo_list_entry vm_pd;
155
	struct list_head list, duplicates;
156 157
	struct ttm_validate_buffer tv;
	struct ww_acquire_ctx ticket;
A
Alex Deucher 已提交
158 159
	struct amdgpu_bo_va *bo_va;
	int r;
160 161

	INIT_LIST_HEAD(&list);
162
	INIT_LIST_HEAD(&duplicates);
163 164 165 166 167 168 169

	tv.bo = &bo->tbo;
	tv.shared = true;
	list_add(&tv.head, &list);

	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);

170
	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
A
Alex Deucher 已提交
171 172 173 174 175
	if (r) {
		dev_err(adev->dev, "leaking bo va because "
			"we fail to reserve bo (%d)\n", r);
		return;
	}
176
	bo_va = amdgpu_vm_bo_find(vm, bo);
177 178 179
	if (bo_va && --bo_va->ref_count == 0) {
		amdgpu_vm_bo_rmv(adev, bo_va);

180
		if (amdgpu_vm_ready(vm)) {
181
			struct dma_fence *fence = NULL;
182 183 184 185 186 187 188 189 190 191 192

			r = amdgpu_vm_clear_freed(adev, vm, &fence);
			if (unlikely(r)) {
				dev_err(adev->dev, "failed to clear page "
					"tables on GEM object close (%d)\n", r);
			}

			if (fence) {
				amdgpu_bo_fence(bo, fence, true);
				dma_fence_put(fence);
			}
A
Alex Deucher 已提交
193 194
		}
	}
195
	ttm_eu_backoff_reservation(&ticket, &list);
A
Alex Deucher 已提交
196 197 198 199 200 201 202 203 204
}

/*
 * GEM ioctls.
 */
int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
			    struct drm_file *filp)
{
	struct amdgpu_device *adev = dev->dev_private;
205 206
	struct amdgpu_fpriv *fpriv = filp->driver_priv;
	struct amdgpu_vm *vm = &fpriv->vm;
A
Alex Deucher 已提交
207
	union drm_amdgpu_gem_create *args = data;
208
	uint64_t flags = args->in.domain_flags;
A
Alex Deucher 已提交
209
	uint64_t size = args->in.bo_size;
210
	struct reservation_object *resv = NULL;
A
Alex Deucher 已提交
211 212 213 214
	struct drm_gem_object *gobj;
	uint32_t handle;
	int r;

215
	/* reject invalid gem flags */
216 217 218
	if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
		      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
		      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
219
		      AMDGPU_GEM_CREATE_VRAM_CLEARED |
220 221 222
		      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
		      AMDGPU_GEM_CREATE_EXPLICIT_SYNC))

223 224
		return -EINVAL;

225 226 227 228 229 230
	/* reject invalid gem domains */
	if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
				 AMDGPU_GEM_DOMAIN_GTT |
				 AMDGPU_GEM_DOMAIN_VRAM |
				 AMDGPU_GEM_DOMAIN_GDS |
				 AMDGPU_GEM_DOMAIN_GWS |
231 232
				 AMDGPU_GEM_DOMAIN_OA))
		return -EINVAL;
233

A
Alex Deucher 已提交
234 235 236
	/* create a gem object to contain this object in */
	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
237
		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
A
Alex Deucher 已提交
238 239 240 241 242 243
		if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
			size = size << AMDGPU_GDS_SHIFT;
		else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
			size = size << AMDGPU_GWS_SHIFT;
		else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
			size = size << AMDGPU_OA_SHIFT;
244 245
		else
			return -EINVAL;
A
Alex Deucher 已提交
246 247 248
	}
	size = roundup(size, PAGE_SIZE);

249 250 251 252 253 254 255 256
	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
		r = amdgpu_bo_reserve(vm->root.base.bo, false);
		if (r)
			return r;

		resv = vm->root.base.bo->tbo.resv;
	}

A
Alex Deucher 已提交
257 258
	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
				     (u32)(0xffffffff & args->in.domains),
259 260 261 262 263 264 265 266 267
				     flags, false, resv, &gobj);
	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
		if (!r) {
			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);

			abo->parent = amdgpu_bo_ref(vm->root.base.bo);
		}
		amdgpu_bo_unreserve(vm->root.base.bo);
	}
A
Alex Deucher 已提交
268
	if (r)
269
		return r;
A
Alex Deucher 已提交
270 271 272

	r = drm_gem_handle_create(filp, gobj, &handle);
	/* drop reference from allocate - handle holds it now */
273
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
274
	if (r)
275
		return r;
A
Alex Deucher 已提交
276 277 278 279 280 281 282 283 284

	memset(args, 0, sizeof(*args));
	args->out.handle = handle;
	return 0;
}

int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
			     struct drm_file *filp)
{
285
	struct ttm_operation_ctx ctx = { true, false };
A
Alex Deucher 已提交
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
	struct amdgpu_device *adev = dev->dev_private;
	struct drm_amdgpu_gem_userptr *args = data;
	struct drm_gem_object *gobj;
	struct amdgpu_bo *bo;
	uint32_t handle;
	int r;

	if (offset_in_page(args->addr | args->size))
		return -EINVAL;

	/* reject unknown flag values */
	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
	    AMDGPU_GEM_USERPTR_REGISTER))
		return -EINVAL;

302 303
	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
A
Alex Deucher 已提交
304

305
		/* if we want to write to it we must install a MMU notifier */
A
Alex Deucher 已提交
306 307 308 309
		return -EACCES;
	}

	/* create a gem object to contain this object in */
310 311
	r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
				     0, 0, NULL, &gobj);
A
Alex Deucher 已提交
312
	if (r)
313
		return r;
A
Alex Deucher 已提交
314 315

	bo = gem_to_amdgpu_bo(gobj);
K
Kent Russell 已提交
316
	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
317
	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
A
Alex Deucher 已提交
318 319 320 321 322 323 324 325 326 327 328
	r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
	if (r)
		goto release_object;

	if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
		r = amdgpu_mn_register(bo, args->addr);
		if (r)
			goto release_object;
	}

	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
329 330 331
		r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
						 bo->tbo.ttm->pages);
		if (r)
332
			goto release_object;
333

A
Alex Deucher 已提交
334
		r = amdgpu_bo_reserve(bo, true);
335 336
		if (r)
			goto free_pages;
A
Alex Deucher 已提交
337 338

		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
339
		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
A
Alex Deucher 已提交
340 341
		amdgpu_bo_unreserve(bo);
		if (r)
342
			goto free_pages;
A
Alex Deucher 已提交
343 344 345 346
	}

	r = drm_gem_handle_create(filp, gobj, &handle);
	/* drop reference from allocate - handle holds it now */
347
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
348
	if (r)
349
		return r;
A
Alex Deucher 已提交
350 351 352 353

	args->handle = handle;
	return 0;

354
free_pages:
355
	release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages);
356

A
Alex Deucher 已提交
357
release_object:
358
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
359 360 361 362 363 364 365 366 367 368 369

	return r;
}

int amdgpu_mode_dumb_mmap(struct drm_file *filp,
			  struct drm_device *dev,
			  uint32_t handle, uint64_t *offset_p)
{
	struct drm_gem_object *gobj;
	struct amdgpu_bo *robj;

370
	gobj = drm_gem_object_lookup(filp, handle);
A
Alex Deucher 已提交
371 372 373 374
	if (gobj == NULL) {
		return -ENOENT;
	}
	robj = gem_to_amdgpu_bo(gobj);
375
	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
376
	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
377
		drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
378 379 380
		return -EPERM;
	}
	*offset_p = amdgpu_bo_mmap_offset(robj);
381
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
	return 0;
}

int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *filp)
{
	union drm_amdgpu_gem_mmap *args = data;
	uint32_t handle = args->in.handle;
	memset(args, 0, sizeof(*args));
	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
}

/**
 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
 *
 * @timeout_ns: timeout in ns
 *
 * Calculate the timeout in jiffies from an absolute timeout in ns.
 */
unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
{
	unsigned long timeout_jiffies;
	ktime_t timeout;

	/* clamp timeout if it's to large */
	if (((int64_t)timeout_ns) < 0)
		return MAX_SCHEDULE_TIMEOUT;

410
	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
A
Alex Deucher 已提交
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
	if (ktime_to_ns(timeout) < 0)
		return 0;

	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
	/*  clamp timeout to avoid unsigned-> signed overflow */
	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
		return MAX_SCHEDULE_TIMEOUT - 1;

	return timeout_jiffies;
}

int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
			      struct drm_file *filp)
{
	union drm_amdgpu_gem_wait_idle *args = data;
	struct drm_gem_object *gobj;
	struct amdgpu_bo *robj;
	uint32_t handle = args->in.handle;
	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
	int r = 0;
	long ret;

433
	gobj = drm_gem_object_lookup(filp, handle);
A
Alex Deucher 已提交
434 435 436 437
	if (gobj == NULL) {
		return -ENOENT;
	}
	robj = gem_to_amdgpu_bo(gobj);
438 439
	ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
						  timeout);
A
Alex Deucher 已提交
440 441 442 443 444 445 446 447 448 449 450

	/* ret == 0 means not signaled,
	 * ret > 0 means signaled
	 * ret < 0 means interrupted before timeout
	 */
	if (ret >= 0) {
		memset(args, 0, sizeof(*args));
		args->out.status = (ret == 0);
	} else
		r = ret;

451
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
452 453 454 455 456 457 458 459 460 461 462 463
	return r;
}

int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
				struct drm_file *filp)
{
	struct drm_amdgpu_gem_metadata *args = data;
	struct drm_gem_object *gobj;
	struct amdgpu_bo *robj;
	int r = -1;

	DRM_DEBUG("%d \n", args->handle);
464
	gobj = drm_gem_object_lookup(filp, args->handle);
A
Alex Deucher 已提交
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
	if (gobj == NULL)
		return -ENOENT;
	robj = gem_to_amdgpu_bo(gobj);

	r = amdgpu_bo_reserve(robj, false);
	if (unlikely(r != 0))
		goto out;

	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
		r = amdgpu_bo_get_metadata(robj, args->data.data,
					   sizeof(args->data.data),
					   &args->data.data_size_bytes,
					   &args->data.flags);
	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
480 481 482 483
		if (args->data.data_size_bytes > sizeof(args->data.data)) {
			r = -EINVAL;
			goto unreserve;
		}
A
Alex Deucher 已提交
484 485 486 487 488 489 490
		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
		if (!r)
			r = amdgpu_bo_set_metadata(robj, args->data.data,
						   args->data.data_size_bytes,
						   args->data.flags);
	}

491
unreserve:
A
Alex Deucher 已提交
492 493
	amdgpu_bo_unreserve(robj);
out:
494
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
495 496 497 498 499 500 501
	return r;
}

/**
 * amdgpu_gem_va_update_vm -update the bo_va in its VM
 *
 * @adev: amdgpu_device pointer
502
 * @vm: vm to update
A
Alex Deucher 已提交
503
 * @bo_va: bo_va to update
504
 * @list: validation list
505
 * @operation: map, unmap or clear
A
Alex Deucher 已提交
506
 *
507
 * Update the bo_va directly after setting its address. Errors are not
A
Alex Deucher 已提交
508 509 510
 * vital here, so they are not reported back to userspace.
 */
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
511
				    struct amdgpu_vm *vm,
512
				    struct amdgpu_bo_va *bo_va,
513
				    struct list_head *list,
514
				    uint32_t operation)
A
Alex Deucher 已提交
515
{
516
	int r;
A
Alex Deucher 已提交
517

518 519
	if (!amdgpu_vm_ready(vm))
		return;
520

521
	r = amdgpu_vm_update_directories(adev, vm);
522
	if (r)
523
		goto error;
A
Alex Deucher 已提交
524

525
	r = amdgpu_vm_clear_freed(adev, vm, NULL);
A
Alex Deucher 已提交
526
	if (r)
527
		goto error;
528

529 530
	if (operation == AMDGPU_VA_OP_MAP ||
	    operation == AMDGPU_VA_OP_REPLACE)
531
		r = amdgpu_vm_bo_update(adev, bo_va, false);
A
Alex Deucher 已提交
532

533
error:
534
	if (r && r != -ERESTARTSYS)
A
Alex Deucher 已提交
535 536 537 538 539 540
		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}

int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *filp)
{
541 542
	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
543
		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
544 545 546
	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
		AMDGPU_VM_PAGE_PRT;

C
Christian König 已提交
547
	struct drm_amdgpu_gem_va *args = data;
A
Alex Deucher 已提交
548 549 550
	struct drm_gem_object *gobj;
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_fpriv *fpriv = filp->driver_priv;
551
	struct amdgpu_bo *abo;
A
Alex Deucher 已提交
552
	struct amdgpu_bo_va *bo_va;
553 554
	struct amdgpu_bo_list_entry vm_pd;
	struct ttm_validate_buffer tv;
555
	struct ww_acquire_ctx ticket;
556
	struct list_head list, duplicates;
557
	uint64_t va_flags;
A
Alex Deucher 已提交
558 559
	int r = 0;

C
Christian König 已提交
560
	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
561
		dev_dbg(&dev->pdev->dev,
562 563
			"va_address 0x%LX is in reserved area 0x%LX\n",
			args->va_address, AMDGPU_VA_RESERVED_SIZE);
A
Alex Deucher 已提交
564 565 566
		return -EINVAL;
	}

567 568 569 570 571 572 573 574 575 576 577
	if (args->va_address >= AMDGPU_VA_HOLE_START &&
	    args->va_address < AMDGPU_VA_HOLE_END) {
		dev_dbg(&dev->pdev->dev,
			"va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
			args->va_address, AMDGPU_VA_HOLE_START,
			AMDGPU_VA_HOLE_END);
		return -EINVAL;
	}

	args->va_address &= AMDGPU_VA_HOLE_MASK;

578
	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
579
		dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
580
			args->flags);
A
Alex Deucher 已提交
581 582 583
		return -EINVAL;
	}

C
Christian König 已提交
584
	switch (args->operation) {
A
Alex Deucher 已提交
585 586
	case AMDGPU_VA_OP_MAP:
	case AMDGPU_VA_OP_UNMAP:
587
	case AMDGPU_VA_OP_CLEAR:
588
	case AMDGPU_VA_OP_REPLACE:
A
Alex Deucher 已提交
589 590
		break;
	default:
591
		dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",
C
Christian König 已提交
592
			args->operation);
A
Alex Deucher 已提交
593 594 595
		return -EINVAL;
	}

596
	INIT_LIST_HEAD(&list);
597
	INIT_LIST_HEAD(&duplicates);
598 599
	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
600 601 602 603 604 605 606 607 608 609 610
		gobj = drm_gem_object_lookup(filp, args->handle);
		if (gobj == NULL)
			return -ENOENT;
		abo = gem_to_amdgpu_bo(gobj);
		tv.bo = &abo->tbo;
		tv.shared = false;
		list_add(&tv.head, &list);
	} else {
		gobj = NULL;
		abo = NULL;
	}
611

612
	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
613

614
	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
615 616
	if (r)
		goto error_unref;
C
Christian König 已提交
617

618 619 620 621 622 623
	if (abo) {
		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
		if (!bo_va) {
			r = -ENOENT;
			goto error_backoff;
		}
624
	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
625
		bo_va = fpriv->prt_va;
626 627
	} else {
		bo_va = NULL;
A
Alex Deucher 已提交
628 629
	}

C
Christian König 已提交
630
	switch (args->operation) {
A
Alex Deucher 已提交
631
	case AMDGPU_VA_OP_MAP:
632
		r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
633 634 635
					args->map_size);
		if (r)
			goto error_backoff;
636

637
		va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
C
Christian König 已提交
638 639
		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
				     args->offset_in_bo, args->map_size,
640
				     va_flags);
A
Alex Deucher 已提交
641 642
		break;
	case AMDGPU_VA_OP_UNMAP:
C
Christian König 已提交
643
		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
A
Alex Deucher 已提交
644
		break;
645 646 647 648 649 650

	case AMDGPU_VA_OP_CLEAR:
		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
						args->va_address,
						args->map_size);
		break;
651
	case AMDGPU_VA_OP_REPLACE:
652
		r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
653 654 655 656 657 658 659 660 661
					args->map_size);
		if (r)
			goto error_backoff;

		va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
					     args->offset_in_bo, args->map_size,
					     va_flags);
		break;
A
Alex Deucher 已提交
662 663 664
	default:
		break;
	}
665
	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
666 667
		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, &list,
					args->operation);
668 669

error_backoff:
670
	ttm_eu_backoff_reservation(&ticket, &list);
C
Chunming Zhou 已提交
671

672
error_unref:
673
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
674 675 676 677 678 679
	return r;
}

int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
			struct drm_file *filp)
{
680
	struct amdgpu_device *adev = dev->dev_private;
A
Alex Deucher 已提交
681 682 683 684 685
	struct drm_amdgpu_gem_op *args = data;
	struct drm_gem_object *gobj;
	struct amdgpu_bo *robj;
	int r;

686
	gobj = drm_gem_object_lookup(filp, args->handle);
A
Alex Deucher 已提交
687 688 689 690 691 692 693 694 695 696 697 698
	if (gobj == NULL) {
		return -ENOENT;
	}
	robj = gem_to_amdgpu_bo(gobj);

	r = amdgpu_bo_reserve(robj, false);
	if (unlikely(r))
		goto out;

	switch (args->op) {
	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
		struct drm_amdgpu_gem_create_in info;
699
		void __user *out = u64_to_user_ptr(args->value);
A
Alex Deucher 已提交
700 701 702

		info.bo_size = robj->gem_base.size;
		info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
K
Kent Russell 已提交
703
		info.domains = robj->preferred_domains;
A
Alex Deucher 已提交
704
		info.domain_flags = robj->flags;
705
		amdgpu_bo_unreserve(robj);
A
Alex Deucher 已提交
706 707 708 709
		if (copy_to_user(out, &info, sizeof(info)))
			r = -EFAULT;
		break;
	}
710
	case AMDGPU_GEM_OP_SET_PLACEMENT:
711 712 713 714 715
		if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
			r = -EINVAL;
			amdgpu_bo_unreserve(robj);
			break;
		}
716
		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
A
Alex Deucher 已提交
717
			r = -EPERM;
718
			amdgpu_bo_unreserve(robj);
A
Alex Deucher 已提交
719 720
			break;
		}
K
Kent Russell 已提交
721
		robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
722 723
							AMDGPU_GEM_DOMAIN_GTT |
							AMDGPU_GEM_DOMAIN_CPU);
K
Kent Russell 已提交
724
		robj->allowed_domains = robj->preferred_domains;
725 726 727
		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;

728 729 730
		if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
			amdgpu_vm_bo_invalidate(adev, robj, true);

731
		amdgpu_bo_unreserve(robj);
A
Alex Deucher 已提交
732 733
		break;
	default:
734
		amdgpu_bo_unreserve(robj);
A
Alex Deucher 已提交
735 736 737 738
		r = -EINVAL;
	}

out:
739
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
740 741 742 743 744 745 746 747 748 749 750 751
	return r;
}

int amdgpu_mode_dumb_create(struct drm_file *file_priv,
			    struct drm_device *dev,
			    struct drm_mode_create_dumb *args)
{
	struct amdgpu_device *adev = dev->dev_private;
	struct drm_gem_object *gobj;
	uint32_t handle;
	int r;

752 753
	args->pitch = amdgpu_align_pitch(adev, args->width,
					 DIV_ROUND_UP(args->bpp, 8), 0);
754
	args->size = (u64)args->pitch * args->height;
A
Alex Deucher 已提交
755 756 757 758
	args->size = ALIGN(args->size, PAGE_SIZE);

	r = amdgpu_gem_object_create(adev, args->size, 0,
				     AMDGPU_GEM_DOMAIN_VRAM,
759
				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
760
				     false, NULL, &gobj);
A
Alex Deucher 已提交
761 762 763 764 765
	if (r)
		return -ENOMEM;

	r = drm_gem_handle_create(file_priv, gobj, &handle);
	/* drop reference from allocate - handle holds it now */
766
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
767 768 769 770 771 772 773 774
	if (r) {
		return r;
	}
	args->handle = handle;
	return 0;
}

#if defined(CONFIG_DEBUG_FS)
775 776 777 778 779 780 781 782 783
static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
{
	struct drm_gem_object *gobj = ptr;
	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
	struct seq_file *m = data;

	unsigned domain;
	const char *placement;
	unsigned pin_count;
784
	uint64_t offset;
785 786 787 788 789 790 791 792 793 794 795 796 797 798

	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
	switch (domain) {
	case AMDGPU_GEM_DOMAIN_VRAM:
		placement = "VRAM";
		break;
	case AMDGPU_GEM_DOMAIN_GTT:
		placement = " GTT";
		break;
	case AMDGPU_GEM_DOMAIN_CPU:
	default:
		placement = " CPU";
		break;
	}
799 800 801
	seq_printf(m, "\t0x%08x: %12ld byte %s",
		   id, amdgpu_bo_size(bo), placement);

802
	offset = READ_ONCE(bo->tbo.mem.start);
803 804
	if (offset != AMDGPU_BO_INVALID_OFFSET)
		seq_printf(m, " @ 0x%010Lx", offset);
805

806
	pin_count = READ_ONCE(bo->pin_count);
807 808 809 810 811 812 813
	if (pin_count)
		seq_printf(m, " pin count %d", pin_count);
	seq_printf(m, "\n");

	return 0;
}

A
Alex Deucher 已提交
814 815 816 817
static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *)m->private;
	struct drm_device *dev = node->minor->dev;
818 819
	struct drm_file *file;
	int r;
A
Alex Deucher 已提交
820

821
	r = mutex_lock_interruptible(&dev->filelist_mutex);
822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842
	if (r)
		return r;

	list_for_each_entry(file, &dev->filelist, lhead) {
		struct task_struct *task;

		/*
		 * Although we have a valid reference on file->pid, that does
		 * not guarantee that the task_struct who called get_pid() is
		 * still alive (e.g. get_pid(current) => fork() => exit()).
		 * Therefore, we need to protect this ->comm access using RCU.
		 */
		rcu_read_lock();
		task = pid_task(file->pid, PIDTYPE_PID);
		seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
			   task ? task->comm : "<unknown>");
		rcu_read_unlock();

		spin_lock(&file->table_lock);
		idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
		spin_unlock(&file->table_lock);
A
Alex Deucher 已提交
843
	}
844

845
	mutex_unlock(&dev->filelist_mutex);
A
Alex Deucher 已提交
846 847 848
	return 0;
}

849
static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
A
Alex Deucher 已提交
850 851 852 853
	{"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
};
#endif

854
int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
A
Alex Deucher 已提交
855 856 857 858 859 860
{
#if defined(CONFIG_DEBUG_FS)
	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
#endif
	return 0;
}