amdgpu_gem.c 20.8 KB
Newer Older
A
Alex Deucher 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
#include <linux/ktime.h>
29
#include <linux/pagemap.h>
A
Alex Deucher 已提交
30 31 32 33 34 35 36 37 38 39 40
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"

void amdgpu_gem_object_free(struct drm_gem_object *gobj)
{
	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);

	if (robj) {
		if (robj->gem_base.import_attach)
			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
41
		amdgpu_mn_unregister(robj);
A
Alex Deucher 已提交
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
		amdgpu_bo_unref(&robj);
	}
}

int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
				int alignment, u32 initial_domain,
				u64 flags, bool kernel,
				struct drm_gem_object **obj)
{
	struct amdgpu_bo *robj;
	int r;

	*obj = NULL;
	/* At least align on page size */
	if (alignment < PAGE_SIZE) {
		alignment = PAGE_SIZE;
	}

retry:
61
	r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
62
			     flags, NULL, NULL, 0, &robj);
A
Alex Deucher 已提交
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
	if (r) {
		if (r != -ERESTARTSYS) {
			if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
				initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
				goto retry;
			}
			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
				  size, initial_domain, alignment, r);
		}
		return r;
	}
	*obj = &robj->gem_base;

	return 0;
}

79
void amdgpu_gem_force_release(struct amdgpu_device *adev)
A
Alex Deucher 已提交
80
{
81 82
	struct drm_device *ddev = adev->ddev;
	struct drm_file *file;
A
Alex Deucher 已提交
83

84
	mutex_lock(&ddev->filelist_mutex);
85 86 87 88 89 90 91 92 93

	list_for_each_entry(file, &ddev->filelist, lhead) {
		struct drm_gem_object *gobj;
		int handle;

		WARN_ONCE(1, "Still active user space clients!\n");
		spin_lock(&file->table_lock);
		idr_for_each_entry(&file->object_idr, gobj, handle) {
			WARN_ONCE(1, "And also active allocations!\n");
94
			drm_gem_object_put_unlocked(gobj);
95 96 97 98 99
		}
		idr_destroy(&file->object_idr);
		spin_unlock(&file->table_lock);
	}

100
	mutex_unlock(&ddev->filelist_mutex);
A
Alex Deucher 已提交
101 102 103 104 105 106
}

/*
 * Call from drm_gem_handle_create which appear in both new and open ioctl
 * case.
 */
107 108
int amdgpu_gem_object_open(struct drm_gem_object *obj,
			   struct drm_file *file_priv)
A
Alex Deucher 已提交
109
{
110
	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
111
	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
A
Alex Deucher 已提交
112 113 114 115
	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
	struct amdgpu_vm *vm = &fpriv->vm;
	struct amdgpu_bo_va *bo_va;
	int r;
116
	r = amdgpu_bo_reserve(abo, false);
C
Chunming Zhou 已提交
117
	if (r)
A
Alex Deucher 已提交
118 119
		return r;

120
	bo_va = amdgpu_vm_bo_find(vm, abo);
A
Alex Deucher 已提交
121
	if (!bo_va) {
122
		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
A
Alex Deucher 已提交
123 124 125
	} else {
		++bo_va->ref_count;
	}
126
	amdgpu_bo_unreserve(abo);
A
Alex Deucher 已提交
127 128 129 130 131 132
	return 0;
}

void amdgpu_gem_object_close(struct drm_gem_object *obj,
			     struct drm_file *file_priv)
{
133
	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
134
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
A
Alex Deucher 已提交
135 136
	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
	struct amdgpu_vm *vm = &fpriv->vm;
137 138

	struct amdgpu_bo_list_entry vm_pd;
139
	struct list_head list;
140 141
	struct ttm_validate_buffer tv;
	struct ww_acquire_ctx ticket;
A
Alex Deucher 已提交
142 143
	struct amdgpu_bo_va *bo_va;
	int r;
144 145 146 147 148 149 150 151 152

	INIT_LIST_HEAD(&list);

	tv.bo = &bo->tbo;
	tv.shared = true;
	list_add(&tv.head, &list);

	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);

153
	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
A
Alex Deucher 已提交
154 155 156 157 158
	if (r) {
		dev_err(adev->dev, "leaking bo va because "
			"we fail to reserve bo (%d)\n", r);
		return;
	}
159
	bo_va = amdgpu_vm_bo_find(vm, bo);
160 161 162
	if (bo_va && --bo_va->ref_count == 0) {
		amdgpu_vm_bo_rmv(adev, bo_va);

163
		if (amdgpu_vm_ready(adev, vm)) {
164
			struct dma_fence *fence = NULL;
165 166 167 168 169 170 171 172 173 174 175

			r = amdgpu_vm_clear_freed(adev, vm, &fence);
			if (unlikely(r)) {
				dev_err(adev->dev, "failed to clear page "
					"tables on GEM object close (%d)\n", r);
			}

			if (fence) {
				amdgpu_bo_fence(bo, fence, true);
				dma_fence_put(fence);
			}
A
Alex Deucher 已提交
176 177
		}
	}
178
	ttm_eu_backoff_reservation(&ticket, &list);
A
Alex Deucher 已提交
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
}

/*
 * GEM ioctls.
 */
int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
			    struct drm_file *filp)
{
	struct amdgpu_device *adev = dev->dev_private;
	union drm_amdgpu_gem_create *args = data;
	uint64_t size = args->in.bo_size;
	struct drm_gem_object *gobj;
	uint32_t handle;
	bool kernel = false;
	int r;

195 196 197 198
	/* reject invalid gem flags */
	if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
				      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
				      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
199
				      AMDGPU_GEM_CREATE_VRAM_CLEARED))
200 201
		return -EINVAL;

202 203 204 205 206 207
	/* reject invalid gem domains */
	if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
				 AMDGPU_GEM_DOMAIN_GTT |
				 AMDGPU_GEM_DOMAIN_VRAM |
				 AMDGPU_GEM_DOMAIN_GDS |
				 AMDGPU_GEM_DOMAIN_GWS |
208 209
				 AMDGPU_GEM_DOMAIN_OA))
		return -EINVAL;
210

A
Alex Deucher 已提交
211 212 213 214 215 216 217 218 219 220
	/* create a gem object to contain this object in */
	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
		kernel = true;
		if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
			size = size << AMDGPU_GDS_SHIFT;
		else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
			size = size << AMDGPU_GWS_SHIFT;
		else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
			size = size << AMDGPU_OA_SHIFT;
221 222
		else
			return -EINVAL;
A
Alex Deucher 已提交
223 224 225 226 227 228 229 230
	}
	size = roundup(size, PAGE_SIZE);

	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
				     (u32)(0xffffffff & args->in.domains),
				     args->in.domain_flags,
				     kernel, &gobj);
	if (r)
231
		return r;
A
Alex Deucher 已提交
232 233 234

	r = drm_gem_handle_create(filp, gobj, &handle);
	/* drop reference from allocate - handle holds it now */
235
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
236
	if (r)
237
		return r;
A
Alex Deucher 已提交
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262

	memset(args, 0, sizeof(*args));
	args->out.handle = handle;
	return 0;
}

int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
			     struct drm_file *filp)
{
	struct amdgpu_device *adev = dev->dev_private;
	struct drm_amdgpu_gem_userptr *args = data;
	struct drm_gem_object *gobj;
	struct amdgpu_bo *bo;
	uint32_t handle;
	int r;

	if (offset_in_page(args->addr | args->size))
		return -EINVAL;

	/* reject unknown flag values */
	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
	    AMDGPU_GEM_USERPTR_REGISTER))
		return -EINVAL;

263 264
	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
A
Alex Deucher 已提交
265

266
		/* if we want to write to it we must install a MMU notifier */
A
Alex Deucher 已提交
267 268 269 270 271 272 273 274
		return -EACCES;
	}

	/* create a gem object to contain this object in */
	r = amdgpu_gem_object_create(adev, args->size, 0,
				     AMDGPU_GEM_DOMAIN_CPU, 0,
				     0, &gobj);
	if (r)
275
		return r;
A
Alex Deucher 已提交
276 277

	bo = gem_to_amdgpu_bo(gobj);
K
Kent Russell 已提交
278
	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
279
	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
A
Alex Deucher 已提交
280 281 282 283 284 285 286 287 288 289 290 291
	r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
	if (r)
		goto release_object;

	if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
		r = amdgpu_mn_register(bo, args->addr);
		if (r)
			goto release_object;
	}

	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
		down_read(&current->mm->mmap_sem);
292 293 294 295 296 297

		r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
						 bo->tbo.ttm->pages);
		if (r)
			goto unlock_mmap_sem;

A
Alex Deucher 已提交
298
		r = amdgpu_bo_reserve(bo, true);
299 300
		if (r)
			goto free_pages;
A
Alex Deucher 已提交
301 302 303 304 305

		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
		amdgpu_bo_unreserve(bo);
		if (r)
306 307 308
			goto free_pages;

		up_read(&current->mm->mmap_sem);
A
Alex Deucher 已提交
309 310 311 312
	}

	r = drm_gem_handle_create(filp, gobj, &handle);
	/* drop reference from allocate - handle holds it now */
313
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
314
	if (r)
315
		return r;
A
Alex Deucher 已提交
316 317 318 319

	args->handle = handle;
	return 0;

320 321 322 323 324 325
free_pages:
	release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);

unlock_mmap_sem:
	up_read(&current->mm->mmap_sem);

A
Alex Deucher 已提交
326
release_object:
327
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
328 329 330 331 332 333 334 335 336 337 338

	return r;
}

int amdgpu_mode_dumb_mmap(struct drm_file *filp,
			  struct drm_device *dev,
			  uint32_t handle, uint64_t *offset_p)
{
	struct drm_gem_object *gobj;
	struct amdgpu_bo *robj;

339
	gobj = drm_gem_object_lookup(filp, handle);
A
Alex Deucher 已提交
340 341 342 343
	if (gobj == NULL) {
		return -ENOENT;
	}
	robj = gem_to_amdgpu_bo(gobj);
344
	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
345
	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
346
		drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
347 348 349
		return -EPERM;
	}
	*offset_p = amdgpu_bo_mmap_offset(robj);
350
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
	return 0;
}

int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *filp)
{
	union drm_amdgpu_gem_mmap *args = data;
	uint32_t handle = args->in.handle;
	memset(args, 0, sizeof(*args));
	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
}

/**
 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
 *
 * @timeout_ns: timeout in ns
 *
 * Calculate the timeout in jiffies from an absolute timeout in ns.
 */
unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
{
	unsigned long timeout_jiffies;
	ktime_t timeout;

	/* clamp timeout if it's to large */
	if (((int64_t)timeout_ns) < 0)
		return MAX_SCHEDULE_TIMEOUT;

379
	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
A
Alex Deucher 已提交
380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
	if (ktime_to_ns(timeout) < 0)
		return 0;

	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
	/*  clamp timeout to avoid unsigned-> signed overflow */
	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
		return MAX_SCHEDULE_TIMEOUT - 1;

	return timeout_jiffies;
}

int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
			      struct drm_file *filp)
{
	union drm_amdgpu_gem_wait_idle *args = data;
	struct drm_gem_object *gobj;
	struct amdgpu_bo *robj;
	uint32_t handle = args->in.handle;
	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
	int r = 0;
	long ret;

402
	gobj = drm_gem_object_lookup(filp, handle);
A
Alex Deucher 已提交
403 404 405 406
	if (gobj == NULL) {
		return -ENOENT;
	}
	robj = gem_to_amdgpu_bo(gobj);
407 408
	ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
						  timeout);
A
Alex Deucher 已提交
409 410 411 412 413 414 415 416 417 418 419

	/* ret == 0 means not signaled,
	 * ret > 0 means signaled
	 * ret < 0 means interrupted before timeout
	 */
	if (ret >= 0) {
		memset(args, 0, sizeof(*args));
		args->out.status = (ret == 0);
	} else
		r = ret;

420
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
421 422 423 424 425 426 427 428 429 430 431 432
	return r;
}

int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
				struct drm_file *filp)
{
	struct drm_amdgpu_gem_metadata *args = data;
	struct drm_gem_object *gobj;
	struct amdgpu_bo *robj;
	int r = -1;

	DRM_DEBUG("%d \n", args->handle);
433
	gobj = drm_gem_object_lookup(filp, args->handle);
A
Alex Deucher 已提交
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
	if (gobj == NULL)
		return -ENOENT;
	robj = gem_to_amdgpu_bo(gobj);

	r = amdgpu_bo_reserve(robj, false);
	if (unlikely(r != 0))
		goto out;

	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
		r = amdgpu_bo_get_metadata(robj, args->data.data,
					   sizeof(args->data.data),
					   &args->data.data_size_bytes,
					   &args->data.flags);
	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
449 450 451 452
		if (args->data.data_size_bytes > sizeof(args->data.data)) {
			r = -EINVAL;
			goto unreserve;
		}
A
Alex Deucher 已提交
453 454 455 456 457 458 459
		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
		if (!r)
			r = amdgpu_bo_set_metadata(robj, args->data.data,
						   args->data.data_size_bytes,
						   args->data.flags);
	}

460
unreserve:
A
Alex Deucher 已提交
461 462
	amdgpu_bo_unreserve(robj);
out:
463
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
464 465 466 467 468 469 470
	return r;
}

/**
 * amdgpu_gem_va_update_vm -update the bo_va in its VM
 *
 * @adev: amdgpu_device pointer
471
 * @vm: vm to update
A
Alex Deucher 已提交
472
 * @bo_va: bo_va to update
473
 * @list: validation list
474
 * @operation: map, unmap or clear
A
Alex Deucher 已提交
475
 *
476
 * Update the bo_va directly after setting its address. Errors are not
A
Alex Deucher 已提交
477 478 479
 * vital here, so they are not reported back to userspace.
 */
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
480
				    struct amdgpu_vm *vm,
481
				    struct amdgpu_bo_va *bo_va,
482
				    struct list_head *list,
483
				    uint32_t operation)
A
Alex Deucher 已提交
484
{
485
	int r = -ERESTARTSYS;
A
Alex Deucher 已提交
486

487
	if (!amdgpu_vm_ready(adev, vm))
488
		goto error;
489

490
	r = amdgpu_vm_update_directories(adev, vm);
491
	if (r)
492
		goto error;
A
Alex Deucher 已提交
493

494
	r = amdgpu_vm_clear_freed(adev, vm, NULL);
A
Alex Deucher 已提交
495
	if (r)
496
		goto error;
497

498 499
	if (operation == AMDGPU_VA_OP_MAP ||
	    operation == AMDGPU_VA_OP_REPLACE)
500
		r = amdgpu_vm_bo_update(adev, bo_va, false);
A
Alex Deucher 已提交
501

502
error:
503
	if (r && r != -ERESTARTSYS)
A
Alex Deucher 已提交
504 505 506 507 508 509
		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}

int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *filp)
{
510 511
	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
512
		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
513 514 515
	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
		AMDGPU_VM_PAGE_PRT;

C
Christian König 已提交
516
	struct drm_amdgpu_gem_va *args = data;
A
Alex Deucher 已提交
517 518 519
	struct drm_gem_object *gobj;
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_fpriv *fpriv = filp->driver_priv;
520
	struct amdgpu_bo *abo;
A
Alex Deucher 已提交
521
	struct amdgpu_bo_va *bo_va;
522 523
	struct amdgpu_bo_list_entry vm_pd;
	struct ttm_validate_buffer tv;
524
	struct ww_acquire_ctx ticket;
525
	struct list_head list;
526
	uint64_t va_flags;
A
Alex Deucher 已提交
527 528
	int r = 0;

C
Christian König 已提交
529
	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
A
Alex Deucher 已提交
530 531
		dev_err(&dev->pdev->dev,
			"va_address 0x%lX is in reserved area 0x%X\n",
C
Christian König 已提交
532
			(unsigned long)args->va_address,
A
Alex Deucher 已提交
533 534 535 536
			AMDGPU_VA_RESERVED_SIZE);
		return -EINVAL;
	}

537 538 539
	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
		dev_err(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
			args->flags);
A
Alex Deucher 已提交
540 541 542
		return -EINVAL;
	}

C
Christian König 已提交
543
	switch (args->operation) {
A
Alex Deucher 已提交
544 545
	case AMDGPU_VA_OP_MAP:
	case AMDGPU_VA_OP_UNMAP:
546
	case AMDGPU_VA_OP_CLEAR:
547
	case AMDGPU_VA_OP_REPLACE:
A
Alex Deucher 已提交
548 549 550
		break;
	default:
		dev_err(&dev->pdev->dev, "unsupported operation %d\n",
C
Christian König 已提交
551
			args->operation);
A
Alex Deucher 已提交
552 553
		return -EINVAL;
	}
554 555 556 557 558
	if ((args->operation == AMDGPU_VA_OP_MAP) ||
	    (args->operation == AMDGPU_VA_OP_REPLACE)) {
		if (amdgpu_kms_vram_lost(adev, fpriv))
			return -ENODEV;
	}
A
Alex Deucher 已提交
559

560
	INIT_LIST_HEAD(&list);
561 562
	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
563 564 565 566 567 568 569 570 571 572 573
		gobj = drm_gem_object_lookup(filp, args->handle);
		if (gobj == NULL)
			return -ENOENT;
		abo = gem_to_amdgpu_bo(gobj);
		tv.bo = &abo->tbo;
		tv.shared = false;
		list_add(&tv.head, &list);
	} else {
		gobj = NULL;
		abo = NULL;
	}
574

575
	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
576

577
	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
578 579
	if (r)
		goto error_unref;
C
Christian König 已提交
580

581 582 583 584 585 586
	if (abo) {
		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
		if (!bo_va) {
			r = -ENOENT;
			goto error_backoff;
		}
587
	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
588
		bo_va = fpriv->prt_va;
589 590
	} else {
		bo_va = NULL;
A
Alex Deucher 已提交
591 592
	}

C
Christian König 已提交
593
	switch (args->operation) {
A
Alex Deucher 已提交
594
	case AMDGPU_VA_OP_MAP:
595
		r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
596 597 598
					args->map_size);
		if (r)
			goto error_backoff;
599

600
		va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
C
Christian König 已提交
601 602
		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
				     args->offset_in_bo, args->map_size,
603
				     va_flags);
A
Alex Deucher 已提交
604 605
		break;
	case AMDGPU_VA_OP_UNMAP:
C
Christian König 已提交
606
		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
A
Alex Deucher 已提交
607
		break;
608 609 610 611 612 613

	case AMDGPU_VA_OP_CLEAR:
		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
						args->va_address,
						args->map_size);
		break;
614
	case AMDGPU_VA_OP_REPLACE:
615
		r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
616 617 618 619 620 621 622 623 624
					args->map_size);
		if (r)
			goto error_backoff;

		va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
					     args->offset_in_bo, args->map_size,
					     va_flags);
		break;
A
Alex Deucher 已提交
625 626 627
	default:
		break;
	}
628
	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
629 630
		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, &list,
					args->operation);
631 632

error_backoff:
633
	ttm_eu_backoff_reservation(&ticket, &list);
C
Chunming Zhou 已提交
634

635
error_unref:
636
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
637 638 639 640 641 642 643 644 645 646 647
	return r;
}

int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
			struct drm_file *filp)
{
	struct drm_amdgpu_gem_op *args = data;
	struct drm_gem_object *gobj;
	struct amdgpu_bo *robj;
	int r;

648
	gobj = drm_gem_object_lookup(filp, args->handle);
A
Alex Deucher 已提交
649 650 651 652 653 654 655 656 657 658 659 660
	if (gobj == NULL) {
		return -ENOENT;
	}
	robj = gem_to_amdgpu_bo(gobj);

	r = amdgpu_bo_reserve(robj, false);
	if (unlikely(r))
		goto out;

	switch (args->op) {
	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
		struct drm_amdgpu_gem_create_in info;
661
		void __user *out = u64_to_user_ptr(args->value);
A
Alex Deucher 已提交
662 663 664

		info.bo_size = robj->gem_base.size;
		info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
K
Kent Russell 已提交
665
		info.domains = robj->preferred_domains;
A
Alex Deucher 已提交
666
		info.domain_flags = robj->flags;
667
		amdgpu_bo_unreserve(robj);
A
Alex Deucher 已提交
668 669 670 671
		if (copy_to_user(out, &info, sizeof(info)))
			r = -EFAULT;
		break;
	}
672
	case AMDGPU_GEM_OP_SET_PLACEMENT:
673 674 675 676 677
		if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
			r = -EINVAL;
			amdgpu_bo_unreserve(robj);
			break;
		}
678
		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
A
Alex Deucher 已提交
679
			r = -EPERM;
680
			amdgpu_bo_unreserve(robj);
A
Alex Deucher 已提交
681 682
			break;
		}
K
Kent Russell 已提交
683
		robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
684 685
							AMDGPU_GEM_DOMAIN_GTT |
							AMDGPU_GEM_DOMAIN_CPU);
K
Kent Russell 已提交
686
		robj->allowed_domains = robj->preferred_domains;
687 688 689
		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;

690
		amdgpu_bo_unreserve(robj);
A
Alex Deucher 已提交
691 692
		break;
	default:
693
		amdgpu_bo_unreserve(robj);
A
Alex Deucher 已提交
694 695 696 697
		r = -EINVAL;
	}

out:
698
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
699 700 701 702 703 704 705 706 707 708 709 710
	return r;
}

int amdgpu_mode_dumb_create(struct drm_file *file_priv,
			    struct drm_device *dev,
			    struct drm_mode_create_dumb *args)
{
	struct amdgpu_device *adev = dev->dev_private;
	struct drm_gem_object *gobj;
	uint32_t handle;
	int r;

711 712
	args->pitch = amdgpu_align_pitch(adev, args->width,
					 DIV_ROUND_UP(args->bpp, 8), 0);
713
	args->size = (u64)args->pitch * args->height;
A
Alex Deucher 已提交
714 715 716 717
	args->size = ALIGN(args->size, PAGE_SIZE);

	r = amdgpu_gem_object_create(adev, args->size, 0,
				     AMDGPU_GEM_DOMAIN_VRAM,
718 719
				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
				     ttm_bo_type_device,
A
Alex Deucher 已提交
720 721 722 723 724 725
				     &gobj);
	if (r)
		return -ENOMEM;

	r = drm_gem_handle_create(file_priv, gobj, &handle);
	/* drop reference from allocate - handle holds it now */
726
	drm_gem_object_put_unlocked(gobj);
A
Alex Deucher 已提交
727 728 729 730 731 732 733 734
	if (r) {
		return r;
	}
	args->handle = handle;
	return 0;
}

#if defined(CONFIG_DEBUG_FS)
735 736 737 738 739 740 741 742 743
static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
{
	struct drm_gem_object *gobj = ptr;
	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
	struct seq_file *m = data;

	unsigned domain;
	const char *placement;
	unsigned pin_count;
744
	uint64_t offset;
745 746 747 748 749 750 751 752 753 754 755 756 757 758

	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
	switch (domain) {
	case AMDGPU_GEM_DOMAIN_VRAM:
		placement = "VRAM";
		break;
	case AMDGPU_GEM_DOMAIN_GTT:
		placement = " GTT";
		break;
	case AMDGPU_GEM_DOMAIN_CPU:
	default:
		placement = " CPU";
		break;
	}
759 760 761 762 763 764
	seq_printf(m, "\t0x%08x: %12ld byte %s",
		   id, amdgpu_bo_size(bo), placement);

	offset = ACCESS_ONCE(bo->tbo.mem.start);
	if (offset != AMDGPU_BO_INVALID_OFFSET)
		seq_printf(m, " @ 0x%010Lx", offset);
765 766 767 768 769 770 771 772 773

	pin_count = ACCESS_ONCE(bo->pin_count);
	if (pin_count)
		seq_printf(m, " pin count %d", pin_count);
	seq_printf(m, "\n");

	return 0;
}

A
Alex Deucher 已提交
774 775 776 777
static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *)m->private;
	struct drm_device *dev = node->minor->dev;
778 779
	struct drm_file *file;
	int r;
A
Alex Deucher 已提交
780

781
	r = mutex_lock_interruptible(&dev->filelist_mutex);
782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
	if (r)
		return r;

	list_for_each_entry(file, &dev->filelist, lhead) {
		struct task_struct *task;

		/*
		 * Although we have a valid reference on file->pid, that does
		 * not guarantee that the task_struct who called get_pid() is
		 * still alive (e.g. get_pid(current) => fork() => exit()).
		 * Therefore, we need to protect this ->comm access using RCU.
		 */
		rcu_read_lock();
		task = pid_task(file->pid, PIDTYPE_PID);
		seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
			   task ? task->comm : "<unknown>");
		rcu_read_unlock();

		spin_lock(&file->table_lock);
		idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
		spin_unlock(&file->table_lock);
A
Alex Deucher 已提交
803
	}
804

805
	mutex_unlock(&dev->filelist_mutex);
A
Alex Deucher 已提交
806 807 808
	return 0;
}

809
static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
A
Alex Deucher 已提交
810 811 812 813 814 815 816 817 818 819 820
	{"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
};
#endif

int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
#endif
	return 0;
}