amdgpu_gem.c 21.7 KB
Newer Older
A
Alex Deucher 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
#include <linux/ktime.h>
29
#include <linux/pagemap.h>
A
Alex Deucher 已提交
30 31 32 33 34 35 36 37 38 39 40
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"

void amdgpu_gem_object_free(struct drm_gem_object *gobj)
{
	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);

	if (robj) {
		if (robj->gem_base.import_attach)
			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
41
		amdgpu_mn_unregister(robj);
A
Alex Deucher 已提交
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
		amdgpu_bo_unref(&robj);
	}
}

int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
				int alignment, u32 initial_domain,
				u64 flags, bool kernel,
				struct drm_gem_object **obj)
{
	struct amdgpu_bo *robj;
	int r;

	*obj = NULL;
	/* At least align on page size */
	if (alignment < PAGE_SIZE) {
		alignment = PAGE_SIZE;
	}

retry:
61 62
	r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
			     flags, NULL, NULL, &robj);
A
Alex Deucher 已提交
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
	if (r) {
		if (r != -ERESTARTSYS) {
			if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
				initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
				goto retry;
			}
			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
				  size, initial_domain, alignment, r);
		}
		return r;
	}
	*obj = &robj->gem_base;

	return 0;
}

79
void amdgpu_gem_force_release(struct amdgpu_device *adev)
A
Alex Deucher 已提交
80
{
81 82
	struct drm_device *ddev = adev->ddev;
	struct drm_file *file;
A
Alex Deucher 已提交
83

84
	mutex_lock(&ddev->filelist_mutex);
85 86 87 88 89 90 91 92 93

	list_for_each_entry(file, &ddev->filelist, lhead) {
		struct drm_gem_object *gobj;
		int handle;

		WARN_ONCE(1, "Still active user space clients!\n");
		spin_lock(&file->table_lock);
		idr_for_each_entry(&file->object_idr, gobj, handle) {
			WARN_ONCE(1, "And also active allocations!\n");
94
			drm_gem_object_unreference_unlocked(gobj);
95 96 97 98 99
		}
		idr_destroy(&file->object_idr);
		spin_unlock(&file->table_lock);
	}

100
	mutex_unlock(&ddev->filelist_mutex);
A
Alex Deucher 已提交
101 102 103 104 105 106
}

/*
 * Call from drm_gem_handle_create which appear in both new and open ioctl
 * case.
 */
107 108
int amdgpu_gem_object_open(struct drm_gem_object *obj,
			   struct drm_file *file_priv)
A
Alex Deucher 已提交
109
{
110
	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
111
	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
A
Alex Deucher 已提交
112 113 114 115
	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
	struct amdgpu_vm *vm = &fpriv->vm;
	struct amdgpu_bo_va *bo_va;
	int r;
116
	r = amdgpu_bo_reserve(abo, false);
C
Chunming Zhou 已提交
117
	if (r)
A
Alex Deucher 已提交
118 119
		return r;

120
	bo_va = amdgpu_vm_bo_find(vm, abo);
A
Alex Deucher 已提交
121
	if (!bo_va) {
122
		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
A
Alex Deucher 已提交
123 124 125
	} else {
		++bo_va->ref_count;
	}
126
	amdgpu_bo_unreserve(abo);
A
Alex Deucher 已提交
127 128 129
	return 0;
}

130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
static int amdgpu_gem_vm_check(void *param, struct amdgpu_bo *bo)
{
	/* if anything is swapped out don't swap it in here,
	   just abort and wait for the next CS */
	if (!amdgpu_bo_gpu_accessible(bo))
		return -ERESTARTSYS;

	if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
		return -ERESTARTSYS;

	return 0;
}

static bool amdgpu_gem_vm_ready(struct amdgpu_device *adev,
				struct amdgpu_vm *vm,
				struct list_head *list)
{
	struct ttm_validate_buffer *entry;

	list_for_each_entry(entry, list, head) {
		struct amdgpu_bo *bo =
			container_of(entry->bo, struct amdgpu_bo, tbo);
		if (amdgpu_gem_vm_check(NULL, bo))
			return false;
	}

	return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_vm_check, NULL);
}

A
Alex Deucher 已提交
159 160 161
void amdgpu_gem_object_close(struct drm_gem_object *obj,
			     struct drm_file *file_priv)
{
162
	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
163
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
A
Alex Deucher 已提交
164 165
	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
	struct amdgpu_vm *vm = &fpriv->vm;
166 167

	struct amdgpu_bo_list_entry vm_pd;
168
	struct list_head list;
169 170
	struct ttm_validate_buffer tv;
	struct ww_acquire_ctx ticket;
A
Alex Deucher 已提交
171 172
	struct amdgpu_bo_va *bo_va;
	int r;
173 174 175 176 177 178 179 180 181

	INIT_LIST_HEAD(&list);

	tv.bo = &bo->tbo;
	tv.shared = true;
	list_add(&tv.head, &list);

	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);

182
	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
A
Alex Deucher 已提交
183 184 185 186 187
	if (r) {
		dev_err(adev->dev, "leaking bo va because "
			"we fail to reserve bo (%d)\n", r);
		return;
	}
188
	bo_va = amdgpu_vm_bo_find(vm, bo);
189 190 191 192 193
	if (bo_va && --bo_va->ref_count == 0) {
		amdgpu_vm_bo_rmv(adev, bo_va);

		if (amdgpu_gem_vm_ready(adev, vm, &list)) {
			struct dma_fence *fence = NULL;
194 195 196 197 198 199 200 201 202 203 204

			r = amdgpu_vm_clear_freed(adev, vm, &fence);
			if (unlikely(r)) {
				dev_err(adev->dev, "failed to clear page "
					"tables on GEM object close (%d)\n", r);
			}

			if (fence) {
				amdgpu_bo_fence(bo, fence, true);
				dma_fence_put(fence);
			}
A
Alex Deucher 已提交
205 206
		}
	}
207
	ttm_eu_backoff_reservation(&ticket, &list);
A
Alex Deucher 已提交
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
}

/*
 * GEM ioctls.
 */
int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
			    struct drm_file *filp)
{
	struct amdgpu_device *adev = dev->dev_private;
	union drm_amdgpu_gem_create *args = data;
	uint64_t size = args->in.bo_size;
	struct drm_gem_object *gobj;
	uint32_t handle;
	bool kernel = false;
	int r;

224 225 226 227 228 229
	/* reject invalid gem flags */
	if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
				      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
				      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
				      AMDGPU_GEM_CREATE_VRAM_CLEARED|
				      AMDGPU_GEM_CREATE_SHADOW |
230 231 232
				      AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
		return -EINVAL;

233 234 235 236 237 238
	/* reject invalid gem domains */
	if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
				 AMDGPU_GEM_DOMAIN_GTT |
				 AMDGPU_GEM_DOMAIN_VRAM |
				 AMDGPU_GEM_DOMAIN_GDS |
				 AMDGPU_GEM_DOMAIN_GWS |
239 240
				 AMDGPU_GEM_DOMAIN_OA))
		return -EINVAL;
241

A
Alex Deucher 已提交
242 243 244 245 246 247 248 249 250 251
	/* create a gem object to contain this object in */
	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
		kernel = true;
		if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
			size = size << AMDGPU_GDS_SHIFT;
		else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
			size = size << AMDGPU_GWS_SHIFT;
		else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
			size = size << AMDGPU_OA_SHIFT;
252 253
		else
			return -EINVAL;
A
Alex Deucher 已提交
254 255 256 257 258 259 260 261
	}
	size = roundup(size, PAGE_SIZE);

	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
				     (u32)(0xffffffff & args->in.domains),
				     args->in.domain_flags,
				     kernel, &gobj);
	if (r)
262
		return r;
A
Alex Deucher 已提交
263 264 265 266 267

	r = drm_gem_handle_create(filp, gobj, &handle);
	/* drop reference from allocate - handle holds it now */
	drm_gem_object_unreference_unlocked(gobj);
	if (r)
268
		return r;
A
Alex Deucher 已提交
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293

	memset(args, 0, sizeof(*args));
	args->out.handle = handle;
	return 0;
}

int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
			     struct drm_file *filp)
{
	struct amdgpu_device *adev = dev->dev_private;
	struct drm_amdgpu_gem_userptr *args = data;
	struct drm_gem_object *gobj;
	struct amdgpu_bo *bo;
	uint32_t handle;
	int r;

	if (offset_in_page(args->addr | args->size))
		return -EINVAL;

	/* reject unknown flag values */
	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
	    AMDGPU_GEM_USERPTR_REGISTER))
		return -EINVAL;

294 295
	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
A
Alex Deucher 已提交
296

297
		/* if we want to write to it we must install a MMU notifier */
A
Alex Deucher 已提交
298 299 300 301 302 303 304 305
		return -EACCES;
	}

	/* create a gem object to contain this object in */
	r = amdgpu_gem_object_create(adev, args->size, 0,
				     AMDGPU_GEM_DOMAIN_CPU, 0,
				     0, &gobj);
	if (r)
306
		return r;
A
Alex Deucher 已提交
307 308

	bo = gem_to_amdgpu_bo(gobj);
309 310
	bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
A
Alex Deucher 已提交
311 312 313 314 315 316 317 318 319 320 321 322
	r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
	if (r)
		goto release_object;

	if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
		r = amdgpu_mn_register(bo, args->addr);
		if (r)
			goto release_object;
	}

	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
		down_read(&current->mm->mmap_sem);
323 324 325 326 327 328

		r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
						 bo->tbo.ttm->pages);
		if (r)
			goto unlock_mmap_sem;

A
Alex Deucher 已提交
329
		r = amdgpu_bo_reserve(bo, true);
330 331
		if (r)
			goto free_pages;
A
Alex Deucher 已提交
332 333 334 335 336

		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
		amdgpu_bo_unreserve(bo);
		if (r)
337 338 339
			goto free_pages;

		up_read(&current->mm->mmap_sem);
A
Alex Deucher 已提交
340 341 342 343 344 345
	}

	r = drm_gem_handle_create(filp, gobj, &handle);
	/* drop reference from allocate - handle holds it now */
	drm_gem_object_unreference_unlocked(gobj);
	if (r)
346
		return r;
A
Alex Deucher 已提交
347 348 349 350

	args->handle = handle;
	return 0;

351 352 353 354 355 356
free_pages:
	release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);

unlock_mmap_sem:
	up_read(&current->mm->mmap_sem);

A
Alex Deucher 已提交
357 358 359 360 361 362 363 364 365 366 367 368 369
release_object:
	drm_gem_object_unreference_unlocked(gobj);

	return r;
}

int amdgpu_mode_dumb_mmap(struct drm_file *filp,
			  struct drm_device *dev,
			  uint32_t handle, uint64_t *offset_p)
{
	struct drm_gem_object *gobj;
	struct amdgpu_bo *robj;

370
	gobj = drm_gem_object_lookup(filp, handle);
A
Alex Deucher 已提交
371 372 373 374
	if (gobj == NULL) {
		return -ENOENT;
	}
	robj = gem_to_amdgpu_bo(gobj);
375
	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
376
	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
A
Alex Deucher 已提交
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
		drm_gem_object_unreference_unlocked(gobj);
		return -EPERM;
	}
	*offset_p = amdgpu_bo_mmap_offset(robj);
	drm_gem_object_unreference_unlocked(gobj);
	return 0;
}

int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *filp)
{
	union drm_amdgpu_gem_mmap *args = data;
	uint32_t handle = args->in.handle;
	memset(args, 0, sizeof(*args));
	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
}

/**
 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
 *
 * @timeout_ns: timeout in ns
 *
 * Calculate the timeout in jiffies from an absolute timeout in ns.
 */
unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
{
	unsigned long timeout_jiffies;
	ktime_t timeout;

	/* clamp timeout if it's to large */
	if (((int64_t)timeout_ns) < 0)
		return MAX_SCHEDULE_TIMEOUT;

410
	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
A
Alex Deucher 已提交
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
	if (ktime_to_ns(timeout) < 0)
		return 0;

	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
	/*  clamp timeout to avoid unsigned-> signed overflow */
	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
		return MAX_SCHEDULE_TIMEOUT - 1;

	return timeout_jiffies;
}

int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
			      struct drm_file *filp)
{
	union drm_amdgpu_gem_wait_idle *args = data;
	struct drm_gem_object *gobj;
	struct amdgpu_bo *robj;
	uint32_t handle = args->in.handle;
	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
	int r = 0;
	long ret;

433
	gobj = drm_gem_object_lookup(filp, handle);
A
Alex Deucher 已提交
434 435 436 437
	if (gobj == NULL) {
		return -ENOENT;
	}
	robj = gem_to_amdgpu_bo(gobj);
438 439
	ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
						  timeout);
A
Alex Deucher 已提交
440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463

	/* ret == 0 means not signaled,
	 * ret > 0 means signaled
	 * ret < 0 means interrupted before timeout
	 */
	if (ret >= 0) {
		memset(args, 0, sizeof(*args));
		args->out.status = (ret == 0);
	} else
		r = ret;

	drm_gem_object_unreference_unlocked(gobj);
	return r;
}

int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
				struct drm_file *filp)
{
	struct drm_amdgpu_gem_metadata *args = data;
	struct drm_gem_object *gobj;
	struct amdgpu_bo *robj;
	int r = -1;

	DRM_DEBUG("%d \n", args->handle);
464
	gobj = drm_gem_object_lookup(filp, args->handle);
A
Alex Deucher 已提交
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
	if (gobj == NULL)
		return -ENOENT;
	robj = gem_to_amdgpu_bo(gobj);

	r = amdgpu_bo_reserve(robj, false);
	if (unlikely(r != 0))
		goto out;

	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
		r = amdgpu_bo_get_metadata(robj, args->data.data,
					   sizeof(args->data.data),
					   &args->data.data_size_bytes,
					   &args->data.flags);
	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
480 481 482 483
		if (args->data.data_size_bytes > sizeof(args->data.data)) {
			r = -EINVAL;
			goto unreserve;
		}
A
Alex Deucher 已提交
484 485 486 487 488 489 490
		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
		if (!r)
			r = amdgpu_bo_set_metadata(robj, args->data.data,
						   args->data.data_size_bytes,
						   args->data.flags);
	}

491
unreserve:
A
Alex Deucher 已提交
492 493 494 495 496 497 498 499 500 501
	amdgpu_bo_unreserve(robj);
out:
	drm_gem_object_unreference_unlocked(gobj);
	return r;
}

/**
 * amdgpu_gem_va_update_vm -update the bo_va in its VM
 *
 * @adev: amdgpu_device pointer
502
 * @vm: vm to update
A
Alex Deucher 已提交
503
 * @bo_va: bo_va to update
504
 * @list: validation list
505
 * @operation: map, unmap or clear
A
Alex Deucher 已提交
506
 *
507
 * Update the bo_va directly after setting its address. Errors are not
A
Alex Deucher 已提交
508 509 510
 * vital here, so they are not reported back to userspace.
 */
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
511
				    struct amdgpu_vm *vm,
512
				    struct amdgpu_bo_va *bo_va,
513
				    struct list_head *list,
514
				    uint32_t operation)
A
Alex Deucher 已提交
515
{
516
	int r = -ERESTARTSYS;
A
Alex Deucher 已提交
517

518
	if (!amdgpu_gem_vm_ready(adev, vm, list))
519
		goto error;
520

521
	r = amdgpu_vm_update_directories(adev, vm);
522
	if (r)
523
		goto error;
A
Alex Deucher 已提交
524

525
	r = amdgpu_vm_clear_freed(adev, vm, NULL);
A
Alex Deucher 已提交
526
	if (r)
527
		goto error;
528

529 530
	if (operation == AMDGPU_VA_OP_MAP ||
	    operation == AMDGPU_VA_OP_REPLACE)
531
		r = amdgpu_vm_bo_update(adev, bo_va, false);
A
Alex Deucher 已提交
532

533
error:
534
	if (r && r != -ERESTARTSYS)
A
Alex Deucher 已提交
535 536 537 538 539 540
		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}

int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *filp)
{
541 542
	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
543
		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
544 545 546
	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
		AMDGPU_VM_PAGE_PRT;

C
Christian König 已提交
547
	struct drm_amdgpu_gem_va *args = data;
A
Alex Deucher 已提交
548 549 550
	struct drm_gem_object *gobj;
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_fpriv *fpriv = filp->driver_priv;
551
	struct amdgpu_bo *abo;
A
Alex Deucher 已提交
552
	struct amdgpu_bo_va *bo_va;
553 554
	struct amdgpu_bo_list_entry vm_pd;
	struct ttm_validate_buffer tv;
555
	struct ww_acquire_ctx ticket;
556
	struct list_head list;
557
	uint64_t va_flags;
A
Alex Deucher 已提交
558 559
	int r = 0;

C
Christian König 已提交
560
	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
A
Alex Deucher 已提交
561 562
		dev_err(&dev->pdev->dev,
			"va_address 0x%lX is in reserved area 0x%X\n",
C
Christian König 已提交
563
			(unsigned long)args->va_address,
A
Alex Deucher 已提交
564 565 566 567
			AMDGPU_VA_RESERVED_SIZE);
		return -EINVAL;
	}

568 569 570
	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
		dev_err(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
			args->flags);
A
Alex Deucher 已提交
571 572 573
		return -EINVAL;
	}

C
Christian König 已提交
574
	switch (args->operation) {
A
Alex Deucher 已提交
575 576
	case AMDGPU_VA_OP_MAP:
	case AMDGPU_VA_OP_UNMAP:
577
	case AMDGPU_VA_OP_CLEAR:
578
	case AMDGPU_VA_OP_REPLACE:
A
Alex Deucher 已提交
579 580 581
		break;
	default:
		dev_err(&dev->pdev->dev, "unsupported operation %d\n",
C
Christian König 已提交
582
			args->operation);
A
Alex Deucher 已提交
583 584
		return -EINVAL;
	}
585 586 587 588 589
	if ((args->operation == AMDGPU_VA_OP_MAP) ||
	    (args->operation == AMDGPU_VA_OP_REPLACE)) {
		if (amdgpu_kms_vram_lost(adev, fpriv))
			return -ENODEV;
	}
A
Alex Deucher 已提交
590

591
	INIT_LIST_HEAD(&list);
592 593
	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
594 595 596 597 598 599 600 601 602 603 604
		gobj = drm_gem_object_lookup(filp, args->handle);
		if (gobj == NULL)
			return -ENOENT;
		abo = gem_to_amdgpu_bo(gobj);
		tv.bo = &abo->tbo;
		tv.shared = false;
		list_add(&tv.head, &list);
	} else {
		gobj = NULL;
		abo = NULL;
	}
605

606
	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
607

608
	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
609 610
	if (r)
		goto error_unref;
C
Christian König 已提交
611

612 613 614 615 616 617
	if (abo) {
		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
		if (!bo_va) {
			r = -ENOENT;
			goto error_backoff;
		}
618
	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
619
		bo_va = fpriv->prt_va;
620 621
	} else {
		bo_va = NULL;
A
Alex Deucher 已提交
622 623
	}

C
Christian König 已提交
624
	switch (args->operation) {
A
Alex Deucher 已提交
625
	case AMDGPU_VA_OP_MAP:
626 627 628 629
		r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address,
					args->map_size);
		if (r)
			goto error_backoff;
630

631
		va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
C
Christian König 已提交
632 633
		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
				     args->offset_in_bo, args->map_size,
634
				     va_flags);
A
Alex Deucher 已提交
635 636
		break;
	case AMDGPU_VA_OP_UNMAP:
C
Christian König 已提交
637
		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
A
Alex Deucher 已提交
638
		break;
639 640 641 642 643 644

	case AMDGPU_VA_OP_CLEAR:
		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
						args->va_address,
						args->map_size);
		break;
645 646 647 648 649 650 651 652 653 654 655
	case AMDGPU_VA_OP_REPLACE:
		r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address,
					args->map_size);
		if (r)
			goto error_backoff;

		va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
					     args->offset_in_bo, args->map_size,
					     va_flags);
		break;
A
Alex Deucher 已提交
656 657 658
	default:
		break;
	}
659
	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
660 661
		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, &list,
					args->operation);
662 663

error_backoff:
664
	ttm_eu_backoff_reservation(&ticket, &list);
C
Chunming Zhou 已提交
665

666
error_unref:
A
Alex Deucher 已提交
667 668 669 670 671 672 673 674 675 676 677 678
	drm_gem_object_unreference_unlocked(gobj);
	return r;
}

int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
			struct drm_file *filp)
{
	struct drm_amdgpu_gem_op *args = data;
	struct drm_gem_object *gobj;
	struct amdgpu_bo *robj;
	int r;

679
	gobj = drm_gem_object_lookup(filp, args->handle);
A
Alex Deucher 已提交
680 681 682 683 684 685 686 687 688 689 690 691
	if (gobj == NULL) {
		return -ENOENT;
	}
	robj = gem_to_amdgpu_bo(gobj);

	r = amdgpu_bo_reserve(robj, false);
	if (unlikely(r))
		goto out;

	switch (args->op) {
	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
		struct drm_amdgpu_gem_create_in info;
692
		void __user *out = (void __user *)(uintptr_t)args->value;
A
Alex Deucher 已提交
693 694 695

		info.bo_size = robj->gem_base.size;
		info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
696
		info.domains = robj->prefered_domains;
A
Alex Deucher 已提交
697
		info.domain_flags = robj->flags;
698
		amdgpu_bo_unreserve(robj);
A
Alex Deucher 已提交
699 700 701 702
		if (copy_to_user(out, &info, sizeof(info)))
			r = -EFAULT;
		break;
	}
703
	case AMDGPU_GEM_OP_SET_PLACEMENT:
704 705 706 707 708
		if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
			r = -EINVAL;
			amdgpu_bo_unreserve(robj);
			break;
		}
709
		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
A
Alex Deucher 已提交
710
			r = -EPERM;
711
			amdgpu_bo_unreserve(robj);
A
Alex Deucher 已提交
712 713
			break;
		}
714 715 716 717 718 719 720
		robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
							AMDGPU_GEM_DOMAIN_GTT |
							AMDGPU_GEM_DOMAIN_CPU);
		robj->allowed_domains = robj->prefered_domains;
		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;

721
		amdgpu_bo_unreserve(robj);
A
Alex Deucher 已提交
722 723
		break;
	default:
724
		amdgpu_bo_unreserve(robj);
A
Alex Deucher 已提交
725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741
		r = -EINVAL;
	}

out:
	drm_gem_object_unreference_unlocked(gobj);
	return r;
}

int amdgpu_mode_dumb_create(struct drm_file *file_priv,
			    struct drm_device *dev,
			    struct drm_mode_create_dumb *args)
{
	struct amdgpu_device *adev = dev->dev_private;
	struct drm_gem_object *gobj;
	uint32_t handle;
	int r;

742 743
	args->pitch = amdgpu_align_pitch(adev, args->width,
					 DIV_ROUND_UP(args->bpp, 8), 0);
744
	args->size = (u64)args->pitch * args->height;
A
Alex Deucher 已提交
745 746 747 748
	args->size = ALIGN(args->size, PAGE_SIZE);

	r = amdgpu_gem_object_create(adev, args->size, 0,
				     AMDGPU_GEM_DOMAIN_VRAM,
749 750
				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
				     ttm_bo_type_device,
A
Alex Deucher 已提交
751 752 753 754 755 756 757 758 759 760 761 762 763 764 765
				     &gobj);
	if (r)
		return -ENOMEM;

	r = drm_gem_handle_create(file_priv, gobj, &handle);
	/* drop reference from allocate - handle holds it now */
	drm_gem_object_unreference_unlocked(gobj);
	if (r) {
		return r;
	}
	args->handle = handle;
	return 0;
}

#if defined(CONFIG_DEBUG_FS)
766 767 768 769 770 771 772 773 774
static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
{
	struct drm_gem_object *gobj = ptr;
	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
	struct seq_file *m = data;

	unsigned domain;
	const char *placement;
	unsigned pin_count;
775
	uint64_t offset;
776 777 778 779 780 781 782 783 784 785 786 787 788 789

	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
	switch (domain) {
	case AMDGPU_GEM_DOMAIN_VRAM:
		placement = "VRAM";
		break;
	case AMDGPU_GEM_DOMAIN_GTT:
		placement = " GTT";
		break;
	case AMDGPU_GEM_DOMAIN_CPU:
	default:
		placement = " CPU";
		break;
	}
790 791 792 793 794 795
	seq_printf(m, "\t0x%08x: %12ld byte %s",
		   id, amdgpu_bo_size(bo), placement);

	offset = ACCESS_ONCE(bo->tbo.mem.start);
	if (offset != AMDGPU_BO_INVALID_OFFSET)
		seq_printf(m, " @ 0x%010Lx", offset);
796 797 798 799 800 801 802 803 804

	pin_count = ACCESS_ONCE(bo->pin_count);
	if (pin_count)
		seq_printf(m, " pin count %d", pin_count);
	seq_printf(m, "\n");

	return 0;
}

A
Alex Deucher 已提交
805 806 807 808
static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *)m->private;
	struct drm_device *dev = node->minor->dev;
809 810
	struct drm_file *file;
	int r;
A
Alex Deucher 已提交
811

812
	r = mutex_lock_interruptible(&dev->filelist_mutex);
813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
	if (r)
		return r;

	list_for_each_entry(file, &dev->filelist, lhead) {
		struct task_struct *task;

		/*
		 * Although we have a valid reference on file->pid, that does
		 * not guarantee that the task_struct who called get_pid() is
		 * still alive (e.g. get_pid(current) => fork() => exit()).
		 * Therefore, we need to protect this ->comm access using RCU.
		 */
		rcu_read_lock();
		task = pid_task(file->pid, PIDTYPE_PID);
		seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
			   task ? task->comm : "<unknown>");
		rcu_read_unlock();

		spin_lock(&file->table_lock);
		idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
		spin_unlock(&file->table_lock);
A
Alex Deucher 已提交
834
	}
835

836
	mutex_unlock(&dev->filelist_mutex);
A
Alex Deucher 已提交
837 838 839
	return 0;
}

840
static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
A
Alex Deucher 已提交
841 842 843 844 845 846 847 848 849 850 851
	{"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
};
#endif

int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
#endif
	return 0;
}