ttm_bo.c 42.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/**************************************************************************
 *
 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 */

J
Joe Perches 已提交
31 32
#define pr_fmt(fmt) "[TTM] " fmt

33 34 35
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
36 37 38 39 40 41
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/module.h>
A
Arun Sharma 已提交
42
#include <linux/atomic.h>
43 44 45 46 47 48

#define TTM_ASSERT_LOCKED(param)
#define TTM_DEBUG(fmt, arg...)
#define TTM_BO_HASH_ORDER 13

static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
49 50 51 52 53 54 55
static void ttm_bo_global_kobj_release(struct kobject *kobj);

static struct attribute ttm_bo_count = {
	.name = "bo_count",
	.mode = S_IRUGO
};

56 57 58 59 60 61 62 63 64 65 66 67
static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
{
	int i;

	for (i = 0; i <= TTM_PL_PRIV5; i++)
		if (flags & (1 << i)) {
			*mem_type = i;
			return 0;
		}
	return -EINVAL;
}

68
static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
69
{
70 71
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];

J
Joe Perches 已提交
72 73 74 75 76 77 78
	pr_err("    has_type: %d\n", man->has_type);
	pr_err("    use_type: %d\n", man->use_type);
	pr_err("    flags: 0x%08X\n", man->flags);
	pr_err("    gpu_offset: 0x%08lX\n", man->gpu_offset);
	pr_err("    size: %llu\n", man->size);
	pr_err("    available_caching: 0x%08X\n", man->available_caching);
	pr_err("    default_caching: 0x%08X\n", man->default_caching);
79 80
	if (mem_type != TTM_PL_SYSTEM)
		(*man->func->debug)(man, TTM_PFX);
81 82 83 84 85 86 87
}

static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
					struct ttm_placement *placement)
{
	int i, ret, mem_type;

J
Joe Perches 已提交
88 89 90
	pr_err("No space for %p (%lu pages, %luK, %luM)\n",
	       bo, bo->mem.num_pages, bo->mem.size >> 10,
	       bo->mem.size >> 20);
91 92 93 94 95
	for (i = 0; i < placement->num_placement; i++) {
		ret = ttm_mem_type_from_flags(placement->placement[i],
						&mem_type);
		if (ret)
			return;
J
Joe Perches 已提交
96 97
		pr_err("  placement[%d]=0x%08X (%d)\n",
		       i, placement->placement[i], mem_type);
98
		ttm_mem_type_debug(bo->bdev, mem_type);
99 100 101
	}
}

102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
static ssize_t ttm_bo_global_show(struct kobject *kobj,
				  struct attribute *attr,
				  char *buffer)
{
	struct ttm_bo_global *glob =
		container_of(kobj, struct ttm_bo_global, kobj);

	return snprintf(buffer, PAGE_SIZE, "%lu\n",
			(unsigned long) atomic_read(&glob->bo_count));
}

static struct attribute *ttm_bo_global_attrs[] = {
	&ttm_bo_count,
	NULL
};

118
static const struct sysfs_ops ttm_bo_global_ops = {
119 120 121 122 123 124 125 126 127
	.show = &ttm_bo_global_show
};

static struct kobj_type ttm_bo_glob_kobj_type  = {
	.release = &ttm_bo_global_kobj_release,
	.sysfs_ops = &ttm_bo_global_ops,
	.default_attrs = ttm_bo_global_attrs
};

128 129 130 131 132 133 134 135 136 137 138

static inline uint32_t ttm_bo_type_flags(unsigned type)
{
	return 1 << (type);
}

static void ttm_bo_release_list(struct kref *list_kref)
{
	struct ttm_buffer_object *bo =
	    container_of(list_kref, struct ttm_buffer_object, list_kref);
	struct ttm_bo_device *bdev = bo->bdev;
139
	size_t acc_size = bo->acc_size;
140 141 142 143 144 145 146 147 148 149 150

	BUG_ON(atomic_read(&bo->list_kref.refcount));
	BUG_ON(atomic_read(&bo->kref.refcount));
	BUG_ON(atomic_read(&bo->cpu_writers));
	BUG_ON(bo->sync_obj != NULL);
	BUG_ON(bo->mem.mm_node != NULL);
	BUG_ON(!list_empty(&bo->lru));
	BUG_ON(!list_empty(&bo->ddestroy));

	if (bo->ttm)
		ttm_tt_destroy(bo->ttm);
151
	atomic_dec(&bo->glob->bo_count);
152 153
	if (bo->resv == &bo->ttm_resv)
		reservation_object_fini(&bo->ttm_resv);
154
	mutex_destroy(&bo->wu_mutex);
155 156 157 158 159
	if (bo->destroy)
		bo->destroy(bo);
	else {
		kfree(bo);
	}
160
	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
161 162
}

163
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
164 165 166 167
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man;

168
	lockdep_assert_held(&bo->resv->lock.base);
169 170 171 172 173 174 175 176 177 178

	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {

		BUG_ON(!list_empty(&bo->lru));

		man = &bdev->man[bo->mem.mem_type];
		list_add_tail(&bo->lru, &man->lru);
		kref_get(&bo->list_kref);

		if (bo->ttm != NULL) {
179
			list_add_tail(&bo->swap, &bo->glob->swap_lru);
180 181 182 183
			kref_get(&bo->list_kref);
		}
	}
}
184
EXPORT_SYMBOL(ttm_bo_add_to_lru);
185

186
int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
{
	int put_count = 0;

	if (!list_empty(&bo->swap)) {
		list_del_init(&bo->swap);
		++put_count;
	}
	if (!list_empty(&bo->lru)) {
		list_del_init(&bo->lru);
		++put_count;
	}

	/*
	 * TODO: Add a driver hook to delete from
	 * driver-specific LRU's here.
	 */

	return put_count;
}

static void ttm_bo_ref_bug(struct kref *list_kref)
{
	BUG();
}

212 213 214
void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
			 bool never_free)
{
215 216
	kref_sub(&bo->list_kref, count,
		 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
217 218
}

219
void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
220
{
221
	int put_count;
222

223 224 225 226
	spin_lock(&bo->glob->lru_lock);
	put_count = ttm_bo_del_from_lru(bo);
	spin_unlock(&bo->glob->lru_lock);
	ttm_bo_list_ref_sub(bo, put_count, true);
227
}
228
EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
229

230 231 232 233 234 235
/*
 * Call bo->mutex locked.
 */
static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
{
	struct ttm_bo_device *bdev = bo->bdev;
236
	struct ttm_bo_global *glob = bo->glob;
237 238 239 240 241 242
	int ret = 0;
	uint32_t page_flags = 0;

	TTM_ASSERT_LOCKED(&bo->mutex);
	bo->ttm = NULL;

D
Dave Airlie 已提交
243 244 245
	if (bdev->need_dma32)
		page_flags |= TTM_PAGE_FLAG_DMA32;

246 247 248 249 250
	switch (bo->type) {
	case ttm_bo_type_device:
		if (zero_alloc)
			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
	case ttm_bo_type_kernel:
251 252
		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
						      page_flags, glob->dummy_read_page);
253 254 255
		if (unlikely(bo->ttm == NULL))
			ret = -ENOMEM;
		break;
256 257 258 259 260 261 262 263 264 265
	case ttm_bo_type_sg:
		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
						      page_flags | TTM_PAGE_FLAG_SG,
						      glob->dummy_read_page);
		if (unlikely(bo->ttm == NULL)) {
			ret = -ENOMEM;
			break;
		}
		bo->ttm->sg = bo->sg;
		break;
266
	default:
J
Joe Perches 已提交
267
		pr_err("Illegal buffer object type\n");
268 269 270 271 272 273 274 275 276
		ret = -EINVAL;
		break;
	}

	return ret;
}

static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
				  struct ttm_mem_reg *mem,
277
				  bool evict, bool interruptible,
278
				  bool no_wait_gpu)
279 280 281 282 283 284 285 286 287
{
	struct ttm_bo_device *bdev = bo->bdev;
	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
	int ret = 0;

	if (old_is_pci || new_is_pci ||
288 289 290 291 292 293 294
	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
		ret = ttm_mem_io_lock(old_man, true);
		if (unlikely(ret != 0))
			goto out_err;
		ttm_bo_unmap_virtual_locked(bo);
		ttm_mem_io_unlock(old_man);
	}
295 296 297 298 299

	/*
	 * Create and bind a ttm if required.
	 */

300 301
	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
		if (bo->ttm == NULL) {
302 303
			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
			ret = ttm_bo_add_ttm(bo, zero);
304 305 306
			if (ret)
				goto out_err;
		}
307 308 309

		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
		if (ret)
310
			goto out_err;
311 312 313 314 315 316 317 318

		if (mem->mem_type != TTM_PL_SYSTEM) {
			ret = ttm_tt_bind(bo->ttm, mem);
			if (ret)
				goto out_err;
		}

		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
319 320
			if (bdev->driver->move_notify)
				bdev->driver->move_notify(bo, mem);
321
			bo->mem = *mem;
322 323 324 325 326
			mem->mm_node = NULL;
			goto moved;
		}
	}

327 328 329
	if (bdev->driver->move_notify)
		bdev->driver->move_notify(bo, mem);

330 331
	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
332
		ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
333 334
	else if (bdev->driver->move)
		ret = bdev->driver->move(bo, evict, interruptible,
335
					 no_wait_gpu, mem);
336
	else
337
		ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
338

339 340 341 342 343 344 345
	if (ret) {
		if (bdev->driver->move_notify) {
			struct ttm_mem_reg tmp_mem = *mem;
			*mem = bo->mem;
			bo->mem = tmp_mem;
			bdev->driver->move_notify(bo, mem);
			bo->mem = *mem;
346
			*mem = tmp_mem;
347
		}
348

349 350
		goto out_err;
	}
351

352 353
moved:
	if (bo->evicted) {
354 355 356 357 358
		if (bdev->driver->invalidate_caches) {
			ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
			if (ret)
				pr_err("Can not flush read caches\n");
		}
359 360 361 362
		bo->evicted = false;
	}

	if (bo->mem.mm_node) {
363
		bo->offset = (bo->mem.start << PAGE_SHIFT) +
364 365
		    bdev->man[bo->mem.mem_type].gpu_offset;
		bo->cur_placement = bo->mem.placement;
366 367
	} else
		bo->offset = 0;
368 369 370 371 372 373 374 375 376 377 378 379 380 381

	return 0;

out_err:
	new_man = &bdev->man[bo->mem.mem_type];
	if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
		ttm_tt_unbind(bo->ttm);
		ttm_tt_destroy(bo->ttm);
		bo->ttm = NULL;
	}

	return ret;
}

382
/**
383
 * Call bo::reserved.
384
 * Will release GPU memory type usage on destruction.
385 386 387
 * This is the place to put in driver specific hooks to release
 * driver private resources.
 * Will release the bo::reserved lock.
388 389 390 391
 */

static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
392 393 394
	if (bo->bdev->driver->move_notify)
		bo->bdev->driver->move_notify(bo, NULL);

395 396 397 398 399
	if (bo->ttm) {
		ttm_tt_unbind(bo->ttm);
		ttm_tt_destroy(bo->ttm);
		bo->ttm = NULL;
	}
400
	ttm_bo_mem_put(bo, &bo->mem);
401

402
	ww_mutex_unlock (&bo->resv->lock);
403 404
}

405
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
406 407
{
	struct ttm_bo_device *bdev = bo->bdev;
408
	struct ttm_bo_global *glob = bo->glob;
409
	struct ttm_bo_driver *driver = bdev->driver;
410
	void *sync_obj = NULL;
411
	int put_count;
412 413
	int ret;

414
	spin_lock(&glob->lru_lock);
415
	ret = __ttm_bo_reserve(bo, false, true, false, 0);
416

417
	spin_lock(&bdev->fence_lock);
418
	(void) ttm_bo_wait(bo, false, false, true);
419
	if (!ret && !bo->sync_obj) {
420
		spin_unlock(&bdev->fence_lock);
421
		put_count = ttm_bo_del_from_lru(bo);
422

423
		spin_unlock(&glob->lru_lock);
424
		ttm_bo_cleanup_memtype_use(bo);
425

426
		ttm_bo_list_ref_sub(bo, put_count, true);
427

428
		return;
429
	}
430 431
	if (bo->sync_obj)
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
432 433
	spin_unlock(&bdev->fence_lock);

434 435 436 437 438 439 440 441 442 443 444 445
	if (!ret) {

		/*
		 * Make NO_EVICT bos immediately available to
		 * shrinkers, now that they are queued for
		 * destruction.
		 */
		if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
			bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
			ttm_bo_add_to_lru(bo);
		}

446
		__ttm_bo_unreserve(bo);
447
	}
448 449 450 451 452

	kref_get(&bo->list_kref);
	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
	spin_unlock(&glob->lru_lock);

453
	if (sync_obj) {
454
		driver->sync_obj_flush(sync_obj);
455 456
		driver->sync_obj_unref(&sync_obj);
	}
457 458 459 460 461
	schedule_delayed_work(&bdev->wq,
			      ((HZ / 100) < 1) ? 1 : HZ / 100);
}

/**
462
 * function ttm_bo_cleanup_refs_and_unlock
463 464 465
 * If bo idle, remove from delayed- and lru lists, and unref.
 * If not idle, do nothing.
 *
466 467 468
 * Must be called with lru_lock and reservation held, this function
 * will drop both before returning.
 *
469 470 471 472
 * @interruptible         Any sleeps should occur interruptibly.
 * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
 */

473 474 475
static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
					  bool interruptible,
					  bool no_wait_gpu)
476
{
477
	struct ttm_bo_device *bdev = bo->bdev;
478
	struct ttm_bo_driver *driver = bdev->driver;
479 480
	struct ttm_bo_global *glob = bo->glob;
	int put_count;
481
	int ret;
482

483
	spin_lock(&bdev->fence_lock);
484
	ret = ttm_bo_wait(bo, false, false, true);
485

486 487
	if (ret && !no_wait_gpu) {
		void *sync_obj;
488

489 490 491 492 493
		/*
		 * Take a reference to the fence and unreserve,
		 * at this point the buffer should be dead, so
		 * no new sync objects can be attached.
		 */
494
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
495
		spin_unlock(&bdev->fence_lock);
496

497
		__ttm_bo_unreserve(bo);
498 499
		spin_unlock(&glob->lru_lock);

500 501 502
		ret = driver->sync_obj_wait(sync_obj, false, interruptible);
		driver->sync_obj_unref(&sync_obj);
		if (ret)
503 504
			return ret;

505 506 507 508 509 510 511 512 513 514
		/*
		 * remove sync_obj with ttm_bo_wait, the wait should be
		 * finished, and no new wait object should have been added.
		 */
		spin_lock(&bdev->fence_lock);
		ret = ttm_bo_wait(bo, false, false, true);
		WARN_ON(ret);
		spin_unlock(&bdev->fence_lock);
		if (ret)
			return ret;
515

516
		spin_lock(&glob->lru_lock);
517
		ret = __ttm_bo_reserve(bo, false, true, false, 0);
518

519 520 521 522 523 524 525 526 527 528 529 530 531 532
		/*
		 * We raced, and lost, someone else holds the reservation now,
		 * and is probably busy in ttm_bo_cleanup_memtype_use.
		 *
		 * Even if it's not the case, because we finished waiting any
		 * delayed destruction would succeed, so just return success
		 * here.
		 */
		if (ret) {
			spin_unlock(&glob->lru_lock);
			return 0;
		}
	} else
		spin_unlock(&bdev->fence_lock);
533

534
	if (ret || unlikely(list_empty(&bo->ddestroy))) {
535
		__ttm_bo_unreserve(bo);
536
		spin_unlock(&glob->lru_lock);
537
		return ret;
538 539
	}

540 541 542 543 544 545 546
	put_count = ttm_bo_del_from_lru(bo);
	list_del_init(&bo->ddestroy);
	++put_count;

	spin_unlock(&glob->lru_lock);
	ttm_bo_cleanup_memtype_use(bo);

547
	ttm_bo_list_ref_sub(bo, put_count, true);
548 549

	return 0;
550 551 552 553 554 555 556 557 558
}

/**
 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
 * encountered buffers.
 */

static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
{
559
	struct ttm_bo_global *glob = bdev->glob;
560 561
	struct ttm_buffer_object *entry = NULL;
	int ret = 0;
562

563
	spin_lock(&glob->lru_lock);
564 565 566 567 568 569 570 571 572 573 574 575 576
	if (list_empty(&bdev->ddestroy))
		goto out_unlock;

	entry = list_first_entry(&bdev->ddestroy,
		struct ttm_buffer_object, ddestroy);
	kref_get(&entry->list_kref);

	for (;;) {
		struct ttm_buffer_object *nentry = NULL;

		if (entry->ddestroy.next != &bdev->ddestroy) {
			nentry = list_first_entry(&entry->ddestroy,
				struct ttm_buffer_object, ddestroy);
577 578 579
			kref_get(&nentry->list_kref);
		}

580
		ret = __ttm_bo_reserve(entry, false, true, false, 0);
581 582
		if (remove_all && ret) {
			spin_unlock(&glob->lru_lock);
583 584
			ret = __ttm_bo_reserve(entry, false, false,
					       false, 0);
585 586 587
			spin_lock(&glob->lru_lock);
		}

588 589 590 591 592 593
		if (!ret)
			ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
							     !remove_all);
		else
			spin_unlock(&glob->lru_lock);

594
		kref_put(&entry->list_kref, ttm_bo_release_list);
595 596 597 598
		entry = nentry;

		if (ret || !entry)
			goto out;
599

600
		spin_lock(&glob->lru_lock);
601
		if (list_empty(&entry->ddestroy))
602 603 604
			break;
	}

605 606 607 608 609
out_unlock:
	spin_unlock(&glob->lru_lock);
out:
	if (entry)
		kref_put(&entry->list_kref, ttm_bo_release_list);
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
	return ret;
}

static void ttm_bo_delayed_workqueue(struct work_struct *work)
{
	struct ttm_bo_device *bdev =
	    container_of(work, struct ttm_bo_device, wq.work);

	if (ttm_bo_delayed_delete(bdev, false)) {
		schedule_delayed_work(&bdev->wq,
				      ((HZ / 100) < 1) ? 1 : HZ / 100);
	}
}

static void ttm_bo_release(struct kref *kref)
{
	struct ttm_buffer_object *bo =
	    container_of(kref, struct ttm_buffer_object, kref);
	struct ttm_bo_device *bdev = bo->bdev;
629
	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
630

631
	drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
632 633 634
	ttm_mem_io_lock(man, false);
	ttm_mem_io_free_vm(bo);
	ttm_mem_io_unlock(man);
635
	ttm_bo_cleanup_refs_or_queue(bo);
636 637 638 639 640 641 642 643 644 645 646 647
	kref_put(&bo->list_kref, ttm_bo_release_list);
}

void ttm_bo_unref(struct ttm_buffer_object **p_bo)
{
	struct ttm_buffer_object *bo = *p_bo;

	*p_bo = NULL;
	kref_put(&bo->kref, ttm_bo_release);
}
EXPORT_SYMBOL(ttm_bo_unref);

648 649 650 651 652 653 654 655 656 657 658 659 660 661
int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
{
	return cancel_delayed_work_sync(&bdev->wq);
}
EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);

void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
{
	if (resched)
		schedule_delayed_work(&bdev->wq,
				      ((HZ / 100) < 1) ? 1 : HZ / 100);
}
EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);

662
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
663
			bool no_wait_gpu)
664 665 666
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_reg evict_mem;
667 668
	struct ttm_placement placement;
	int ret = 0;
669

670
	spin_lock(&bdev->fence_lock);
671
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
672
	spin_unlock(&bdev->fence_lock);
673

674
	if (unlikely(ret != 0)) {
675
		if (ret != -ERESTARTSYS) {
J
Joe Perches 已提交
676
			pr_err("Failed to expire sync object before buffer eviction\n");
677
		}
678 679 680
		goto out;
	}

681
	lockdep_assert_held(&bo->resv->lock.base);
682 683 684

	evict_mem = bo->mem;
	evict_mem.mm_node = NULL;
685 686
	evict_mem.bus.io_reserved_vm = false;
	evict_mem.bus.io_reserved_count = 0;
687

688 689 690 691
	placement.fpfn = 0;
	placement.lpfn = 0;
	placement.num_placement = 0;
	placement.num_busy_placement = 0;
692 693
	bdev->driver->evict_flags(bo, &placement);
	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
694
				no_wait_gpu);
695
	if (ret) {
696
		if (ret != -ERESTARTSYS) {
J
Joe Perches 已提交
697 698
			pr_err("Failed to find memory space for buffer 0x%p eviction\n",
			       bo);
699 700
			ttm_bo_mem_space_debug(bo, &placement);
		}
701 702 703 704
		goto out;
	}

	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
705
				     no_wait_gpu);
706
	if (ret) {
707
		if (ret != -ERESTARTSYS)
J
Joe Perches 已提交
708
			pr_err("Buffer eviction failed\n");
709
		ttm_bo_mem_put(bo, &evict_mem);
710 711
		goto out;
	}
712 713 714 715 716 717 718
	bo->evicted = true;
out:
	return ret;
}

static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
				uint32_t mem_type,
719
				bool interruptible,
720
				bool no_wait_gpu)
721 722 723 724
{
	struct ttm_bo_global *glob = bdev->glob;
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
	struct ttm_buffer_object *bo;
725
	int ret = -EBUSY, put_count;
726

727
	spin_lock(&glob->lru_lock);
728
	list_for_each_entry(bo, &man->lru, lru) {
729
		ret = __ttm_bo_reserve(bo, false, true, false, 0);
730 731 732 733 734
		if (!ret)
			break;
	}

	if (ret) {
735
		spin_unlock(&glob->lru_lock);
736
		return ret;
737 738
	}

739
	kref_get(&bo->list_kref);
740

741
	if (!list_empty(&bo->ddestroy)) {
742 743
		ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
						     no_wait_gpu);
744
		kref_put(&bo->list_kref, ttm_bo_release_list);
745
		return ret;
746 747
	}

748
	put_count = ttm_bo_del_from_lru(bo);
749
	spin_unlock(&glob->lru_lock);
750 751 752

	BUG_ON(ret != 0);

753
	ttm_bo_list_ref_sub(bo, put_count, true);
754

755
	ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
756
	ttm_bo_unreserve(bo);
757

758
	kref_put(&bo->list_kref, ttm_bo_release_list);
759 760 761
	return ret;
}

762 763
void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
{
764
	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
765

766 767
	if (mem->mm_node)
		(*man->func->put_node)(man, mem);
768 769 770
}
EXPORT_SYMBOL(ttm_bo_mem_put);

771 772 773 774
/**
 * Repeatedly evict memory from the LRU for @mem_type until we create enough
 * space, or we've evicted everything and there isn't enough space.
 */
775 776 777 778
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
					uint32_t mem_type,
					struct ttm_placement *placement,
					struct ttm_mem_reg *mem,
779 780
					bool interruptible,
					bool no_wait_gpu)
781
{
782
	struct ttm_bo_device *bdev = bo->bdev;
783 784 785 786
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
	int ret;

	do {
787
		ret = (*man->func->get_node)(man, bo, placement, mem);
788 789
		if (unlikely(ret != 0))
			return ret;
790
		if (mem->mm_node)
791
			break;
792 793
		ret = ttm_mem_evict_first(bdev, mem_type,
					  interruptible, no_wait_gpu);
794 795 796
		if (unlikely(ret != 0))
			return ret;
	} while (1);
797
	if (mem->mm_node == NULL)
798 799 800 801 802
		return -ENOMEM;
	mem->mem_type = mem_type;
	return 0;
}

803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
				      uint32_t cur_placement,
				      uint32_t proposed_placement)
{
	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;

	/**
	 * Keep current caching if possible.
	 */

	if ((cur_placement & caching) != 0)
		result |= (cur_placement & caching);
	else if ((man->default_caching & caching) != 0)
		result |= man->default_caching;
	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
		result |= TTM_PL_FLAG_CACHED;
	else if ((TTM_PL_FLAG_WC & caching) != 0)
		result |= TTM_PL_FLAG_WC;
	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
		result |= TTM_PL_FLAG_UNCACHED;

	return result;
}

828 829
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
				 uint32_t mem_type,
830 831
				 uint32_t proposed_placement,
				 uint32_t *masked_placement)
832 833 834
{
	uint32_t cur_flags = ttm_bo_type_flags(mem_type);

835
	if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
836 837
		return false;

838
	if ((proposed_placement & man->available_caching) == 0)
839 840
		return false;

841 842 843
	cur_flags |= (proposed_placement & man->available_caching);

	*masked_placement = cur_flags;
844 845 846 847 848 849 850 851 852 853 854 855
	return true;
}

/**
 * Creates space for memory region @mem according to its type.
 *
 * This function first searches for free space in compatible memory types in
 * the priority order defined by the driver.  If free space isn't found, then
 * ttm_bo_mem_force_space is attempted in priority order to evict and find
 * space.
 */
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
856 857
			struct ttm_placement *placement,
			struct ttm_mem_reg *mem,
858
			bool interruptible,
859
			bool no_wait_gpu)
860 861 862 863 864 865 866
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man;
	uint32_t mem_type = TTM_PL_SYSTEM;
	uint32_t cur_flags = 0;
	bool type_found = false;
	bool type_ok = false;
867
	bool has_erestartsys = false;
868
	int i, ret;
869 870

	mem->mm_node = NULL;
871
	for (i = 0; i < placement->num_placement; ++i) {
872 873 874 875
		ret = ttm_mem_type_from_flags(placement->placement[i],
						&mem_type);
		if (ret)
			return ret;
876 877 878
		man = &bdev->man[mem_type];

		type_ok = ttm_bo_mt_compatible(man,
879 880 881
						mem_type,
						placement->placement[i],
						&cur_flags);
882 883 884 885

		if (!type_ok)
			continue;

886 887
		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
						  cur_flags);
888 889 890 891 892 893
		/*
		 * Use the access and other non-mapping-related flag bits from
		 * the memory placement flags to the current flags
		 */
		ttm_flag_masked(&cur_flags, placement->placement[i],
				~TTM_PL_MASK_MEMTYPE);
894

895 896 897 898 899
		if (mem_type == TTM_PL_SYSTEM)
			break;

		if (man->has_type && man->use_type) {
			type_found = true;
900
			ret = (*man->func->get_node)(man, bo, placement, mem);
901 902
			if (unlikely(ret))
				return ret;
903
		}
904
		if (mem->mm_node)
905 906 907
			break;
	}

908
	if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
909 910 911 912 913 914 915 916
		mem->mem_type = mem_type;
		mem->placement = cur_flags;
		return 0;
	}

	if (!type_found)
		return -EINVAL;

917 918
	for (i = 0; i < placement->num_busy_placement; ++i) {
		ret = ttm_mem_type_from_flags(placement->busy_placement[i],
919 920 921
						&mem_type);
		if (ret)
			return ret;
922 923 924 925
		man = &bdev->man[mem_type];
		if (!man->has_type)
			continue;
		if (!ttm_bo_mt_compatible(man,
926
						mem_type,
927
						placement->busy_placement[i],
928
						&cur_flags))
929 930
			continue;

931 932
		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
						  cur_flags);
933 934 935 936
		/*
		 * Use the access and other non-mapping-related flag bits from
		 * the memory placement flags to the current flags
		 */
937
		ttm_flag_masked(&cur_flags, placement->busy_placement[i],
938
				~TTM_PL_MASK_MEMTYPE);
939

940 941 942 943 944 945 946 947

		if (mem_type == TTM_PL_SYSTEM) {
			mem->mem_type = mem_type;
			mem->placement = cur_flags;
			mem->mm_node = NULL;
			return 0;
		}

948
		ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
949
						interruptible, no_wait_gpu);
950 951 952 953
		if (ret == 0 && mem->mm_node) {
			mem->placement = cur_flags;
			return 0;
		}
954 955
		if (ret == -ERESTARTSYS)
			has_erestartsys = true;
956
	}
957
	ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
958 959 960 961
	return ret;
}
EXPORT_SYMBOL(ttm_bo_mem_space);

962
static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
963
			struct ttm_placement *placement,
964
			bool interruptible,
965
			bool no_wait_gpu)
966 967 968
{
	int ret = 0;
	struct ttm_mem_reg mem;
969
	struct ttm_bo_device *bdev = bo->bdev;
970

971
	lockdep_assert_held(&bo->resv->lock.base);
972 973 974 975 976 977

	/*
	 * FIXME: It's possible to pipeline buffer moves.
	 * Have the driver move function wait for idle when necessary,
	 * instead of doing it here.
	 */
978
	spin_lock(&bdev->fence_lock);
979
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
980
	spin_unlock(&bdev->fence_lock);
981 982 983 984 985
	if (ret)
		return ret;
	mem.num_pages = bo->num_pages;
	mem.size = mem.num_pages << PAGE_SHIFT;
	mem.page_alignment = bo->mem.page_alignment;
986 987
	mem.bus.io_reserved_vm = false;
	mem.bus.io_reserved_count = 0;
988 989 990
	/*
	 * Determine where to move the buffer.
	 */
991 992
	ret = ttm_bo_mem_space(bo, placement, &mem,
			       interruptible, no_wait_gpu);
993 994
	if (ret)
		goto out_unlock;
995 996
	ret = ttm_bo_handle_move_mem(bo, &mem, false,
				     interruptible, no_wait_gpu);
997
out_unlock:
998 999
	if (ret && mem.mm_node)
		ttm_bo_mem_put(bo, &mem);
1000 1001 1002
	return ret;
}

1003 1004 1005
static bool ttm_bo_mem_compat(struct ttm_placement *placement,
			      struct ttm_mem_reg *mem,
			      uint32_t *new_flags)
1006
{
1007
	int i;
1008

1009 1010 1011
	if (mem->mm_node && placement->lpfn != 0 &&
	    (mem->start < placement->fpfn ||
	     mem->start + mem->num_pages > placement->lpfn))
1012
		return false;
1013 1014

	for (i = 0; i < placement->num_placement; i++) {
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
		*new_flags = placement->placement[i];
		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
			return true;
	}

	for (i = 0; i < placement->num_busy_placement; i++) {
		*new_flags = placement->busy_placement[i];
		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
			return true;
1026
	}
1027 1028

	return false;
1029 1030
}

1031 1032
int ttm_bo_validate(struct ttm_buffer_object *bo,
			struct ttm_placement *placement,
1033
			bool interruptible,
1034
			bool no_wait_gpu)
1035 1036
{
	int ret;
1037
	uint32_t new_flags;
1038

1039
	lockdep_assert_held(&bo->resv->lock.base);
1040 1041 1042 1043 1044
	/* Check that range is valid */
	if (placement->lpfn || placement->fpfn)
		if (placement->fpfn > placement->lpfn ||
			(placement->lpfn - placement->fpfn) < bo->num_pages)
			return -EINVAL;
1045 1046 1047
	/*
	 * Check whether we need to move buffer.
	 */
1048
	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1049 1050
		ret = ttm_bo_move_buffer(bo, placement, interruptible,
					 no_wait_gpu);
1051
		if (ret)
1052
			return ret;
1053 1054 1055 1056 1057
	} else {
		/*
		 * Use the access and other non-mapping-related flag bits from
		 * the compatible memory placement flags to the active flags
		 */
1058
		ttm_flag_masked(&bo->mem.placement, new_flags,
1059
				~TTM_PL_MASK_MEMTYPE);
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
	}
	/*
	 * We might need to add a TTM.
	 */
	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
		ret = ttm_bo_add_ttm(bo, true);
		if (ret)
			return ret;
	}
	return 0;
}
1071
EXPORT_SYMBOL(ttm_bo_validate);
1072

1073 1074
int ttm_bo_check_placement(struct ttm_buffer_object *bo,
				struct ttm_placement *placement)
1075
{
1076 1077
	BUG_ON((placement->fpfn || placement->lpfn) &&
	       (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
1078 1079 1080 1081

	return 0;
}

1082 1083 1084 1085 1086 1087 1088
int ttm_bo_init(struct ttm_bo_device *bdev,
		struct ttm_buffer_object *bo,
		unsigned long size,
		enum ttm_bo_type type,
		struct ttm_placement *placement,
		uint32_t page_alignment,
		bool interruptible,
J
Jan Engelhardt 已提交
1089
		struct file *persistent_swap_storage,
1090
		size_t acc_size,
1091
		struct sg_table *sg,
1092
		void (*destroy) (struct ttm_buffer_object *))
1093
{
1094
	int ret = 0;
1095
	unsigned long num_pages;
1096
	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1097
	bool locked;
1098 1099 1100

	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
	if (ret) {
J
Joe Perches 已提交
1101
		pr_err("Out of kernel memory\n");
1102 1103 1104 1105 1106 1107
		if (destroy)
			(*destroy)(bo);
		else
			kfree(bo);
		return -ENOMEM;
	}
1108 1109 1110

	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	if (num_pages == 0) {
J
Joe Perches 已提交
1111
		pr_err("Illegal buffer object size\n");
1112 1113 1114 1115
		if (destroy)
			(*destroy)(bo);
		else
			kfree(bo);
1116
		ttm_mem_global_free(mem_glob, acc_size);
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
		return -EINVAL;
	}
	bo->destroy = destroy;

	kref_init(&bo->kref);
	kref_init(&bo->list_kref);
	atomic_set(&bo->cpu_writers, 0);
	INIT_LIST_HEAD(&bo->lru);
	INIT_LIST_HEAD(&bo->ddestroy);
	INIT_LIST_HEAD(&bo->swap);
1127
	INIT_LIST_HEAD(&bo->io_reserve_lru);
1128
	mutex_init(&bo->wu_mutex);
1129
	bo->bdev = bdev;
1130
	bo->glob = bdev->glob;
1131 1132
	bo->type = type;
	bo->num_pages = num_pages;
1133
	bo->mem.size = num_pages << PAGE_SHIFT;
1134 1135 1136 1137
	bo->mem.mem_type = TTM_PL_SYSTEM;
	bo->mem.num_pages = bo->num_pages;
	bo->mem.mm_node = NULL;
	bo->mem.page_alignment = page_alignment;
1138 1139
	bo->mem.bus.io_reserved_vm = false;
	bo->mem.bus.io_reserved_count = 0;
1140 1141
	bo->priv_flags = 0;
	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
J
Jan Engelhardt 已提交
1142
	bo->persistent_swap_storage = persistent_swap_storage;
1143
	bo->acc_size = acc_size;
1144
	bo->sg = sg;
1145 1146
	bo->resv = &bo->ttm_resv;
	reservation_object_init(bo->resv);
1147
	atomic_inc(&bo->glob->bo_count);
1148
	drm_vma_node_reset(&bo->vma_node);
1149

1150
	ret = ttm_bo_check_placement(bo, placement);
1151 1152 1153 1154 1155

	/*
	 * For ttm_bo_type_device buffers, allocate
	 * address space from the device.
	 */
1156 1157 1158
	if (likely(!ret) &&
	    (bo->type == ttm_bo_type_device ||
	     bo->type == ttm_bo_type_sg))
1159 1160
		ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
					 bo->mem.num_pages);
1161

1162 1163
	locked = ww_mutex_trylock(&bo->resv->lock);
	WARN_ON(!locked);
1164

1165 1166
	if (likely(!ret))
		ret = ttm_bo_validate(bo, placement, interruptible, false);
1167 1168

	ttm_bo_unreserve(bo);
1169 1170 1171

	if (unlikely(ret))
		ttm_bo_unref(&bo);
1172 1173 1174

	return ret;
}
1175
EXPORT_SYMBOL(ttm_bo_init);
1176

1177 1178 1179
size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
		       unsigned long bo_size,
		       unsigned struct_size)
1180
{
1181 1182
	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
	size_t size = 0;
1183

1184 1185 1186 1187
	size += ttm_round_pot(struct_size);
	size += PAGE_ALIGN(npages * sizeof(void *));
	size += ttm_round_pot(sizeof(struct ttm_tt));
	return size;
1188
}
1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
EXPORT_SYMBOL(ttm_bo_acc_size);

size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
			   unsigned long bo_size,
			   unsigned struct_size)
{
	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
	size_t size = 0;

	size += ttm_round_pot(struct_size);
	size += PAGE_ALIGN(npages * sizeof(void *));
	size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
	return size;
}
EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1205

1206 1207 1208 1209 1210 1211
int ttm_bo_create(struct ttm_bo_device *bdev,
			unsigned long size,
			enum ttm_bo_type type,
			struct ttm_placement *placement,
			uint32_t page_alignment,
			bool interruptible,
J
Jan Engelhardt 已提交
1212
			struct file *persistent_swap_storage,
1213
			struct ttm_buffer_object **p_bo)
1214 1215
{
	struct ttm_buffer_object *bo;
1216
	size_t acc_size;
1217
	int ret;
1218 1219

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1220
	if (unlikely(bo == NULL))
1221 1222
		return -ENOMEM;

1223
	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1224
	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1225 1226
			  interruptible, persistent_swap_storage, acc_size,
			  NULL, NULL);
1227 1228 1229 1230 1231
	if (likely(ret == 0))
		*p_bo = bo;

	return ret;
}
T
Thomas Hellstrom 已提交
1232
EXPORT_SYMBOL(ttm_bo_create);
1233 1234

static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1235
					unsigned mem_type, bool allow_errors)
1236
{
1237
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1238
	struct ttm_bo_global *glob = bdev->glob;
1239 1240 1241 1242 1243 1244
	int ret;

	/*
	 * Can't use standard list traversal since we're unlocking.
	 */

1245
	spin_lock(&glob->lru_lock);
1246
	while (!list_empty(&man->lru)) {
1247
		spin_unlock(&glob->lru_lock);
1248
		ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1249 1250 1251 1252
		if (ret) {
			if (allow_errors) {
				return ret;
			} else {
J
Joe Perches 已提交
1253
				pr_err("Cleanup eviction failed\n");
1254 1255
			}
		}
1256
		spin_lock(&glob->lru_lock);
1257
	}
1258
	spin_unlock(&glob->lru_lock);
1259 1260 1261 1262 1263
	return 0;
}

int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
{
R
Roel Kluin 已提交
1264
	struct ttm_mem_type_manager *man;
1265 1266 1267
	int ret = -EINVAL;

	if (mem_type >= TTM_NUM_MEM_TYPES) {
J
Joe Perches 已提交
1268
		pr_err("Illegal memory type %d\n", mem_type);
1269 1270
		return ret;
	}
R
Roel Kluin 已提交
1271
	man = &bdev->man[mem_type];
1272 1273

	if (!man->has_type) {
J
Joe Perches 已提交
1274 1275
		pr_err("Trying to take down uninitialized memory manager type %u\n",
		       mem_type);
1276 1277 1278 1279 1280 1281 1282 1283
		return ret;
	}

	man->use_type = false;
	man->has_type = false;

	ret = 0;
	if (mem_type > 0) {
1284
		ttm_bo_force_list_clean(bdev, mem_type, false);
1285

1286
		ret = (*man->func->takedown)(man);
1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
	}

	return ret;
}
EXPORT_SYMBOL(ttm_bo_clean_mm);

int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
{
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];

	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
J
Joe Perches 已提交
1298
		pr_err("Illegal memory manager memory type %u\n", mem_type);
1299 1300 1301 1302
		return -EINVAL;
	}

	if (!man->has_type) {
J
Joe Perches 已提交
1303
		pr_err("Memory type %u has not been initialized\n", mem_type);
1304 1305 1306
		return 0;
	}

1307
	return ttm_bo_force_list_clean(bdev, mem_type, true);
1308 1309 1310 1311
}
EXPORT_SYMBOL(ttm_bo_evict_mm);

int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1312
			unsigned long p_size)
1313 1314 1315 1316
{
	int ret = -EINVAL;
	struct ttm_mem_type_manager *man;

1317
	BUG_ON(type >= TTM_NUM_MEM_TYPES);
1318
	man = &bdev->man[type];
1319
	BUG_ON(man->has_type);
1320 1321 1322 1323
	man->io_reserve_fastpath = true;
	man->use_io_reserve_lru = false;
	mutex_init(&man->io_reserve_mutex);
	INIT_LIST_HEAD(&man->io_reserve_lru);
1324 1325 1326 1327

	ret = bdev->driver->init_mem_type(bdev, type, man);
	if (ret)
		return ret;
1328
	man->bdev = bdev;
1329 1330 1331

	ret = 0;
	if (type != TTM_PL_SYSTEM) {
1332
		ret = (*man->func->init)(man, p_size);
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345
		if (ret)
			return ret;
	}
	man->has_type = true;
	man->use_type = true;
	man->size = p_size;

	INIT_LIST_HEAD(&man->lru);

	return 0;
}
EXPORT_SYMBOL(ttm_bo_init_mm);

1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
static void ttm_bo_global_kobj_release(struct kobject *kobj)
{
	struct ttm_bo_global *glob =
		container_of(kobj, struct ttm_bo_global, kobj);

	ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
	__free_page(glob->dummy_read_page);
	kfree(glob);
}

1356
void ttm_bo_global_release(struct drm_global_reference *ref)
1357 1358 1359 1360 1361 1362 1363 1364
{
	struct ttm_bo_global *glob = ref->object;

	kobject_del(&glob->kobj);
	kobject_put(&glob->kobj);
}
EXPORT_SYMBOL(ttm_bo_global_release);

1365
int ttm_bo_global_init(struct drm_global_reference *ref)
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387
{
	struct ttm_bo_global_ref *bo_ref =
		container_of(ref, struct ttm_bo_global_ref, ref);
	struct ttm_bo_global *glob = ref->object;
	int ret;

	mutex_init(&glob->device_list_mutex);
	spin_lock_init(&glob->lru_lock);
	glob->mem_glob = bo_ref->mem_glob;
	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);

	if (unlikely(glob->dummy_read_page == NULL)) {
		ret = -ENOMEM;
		goto out_no_drp;
	}

	INIT_LIST_HEAD(&glob->swap_lru);
	INIT_LIST_HEAD(&glob->device_list);

	ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
	ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
	if (unlikely(ret != 0)) {
J
Joe Perches 已提交
1388
		pr_err("Could not register buffer object swapout\n");
1389 1390 1391 1392 1393
		goto out_no_shrink;
	}

	atomic_set(&glob->bo_count, 0);

1394 1395
	ret = kobject_init_and_add(
		&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407
	if (unlikely(ret != 0))
		kobject_put(&glob->kobj);
	return ret;
out_no_shrink:
	__free_page(glob->dummy_read_page);
out_no_drp:
	kfree(glob);
	return ret;
}
EXPORT_SYMBOL(ttm_bo_global_init);


1408 1409 1410 1411 1412
int ttm_bo_device_release(struct ttm_bo_device *bdev)
{
	int ret = 0;
	unsigned i = TTM_NUM_MEM_TYPES;
	struct ttm_mem_type_manager *man;
1413
	struct ttm_bo_global *glob = bdev->glob;
1414 1415 1416 1417 1418 1419 1420

	while (i--) {
		man = &bdev->man[i];
		if (man->has_type) {
			man->use_type = false;
			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
				ret = -EBUSY;
J
Joe Perches 已提交
1421 1422
				pr_err("DRM memory manager type %d is not clean\n",
				       i);
1423 1424 1425 1426 1427
			}
			man->has_type = false;
		}
	}

1428 1429 1430 1431
	mutex_lock(&glob->device_list_mutex);
	list_del(&bdev->device_list);
	mutex_unlock(&glob->device_list_mutex);

1432
	cancel_delayed_work_sync(&bdev->wq);
1433 1434 1435 1436

	while (ttm_bo_delayed_delete(bdev, true))
		;

1437
	spin_lock(&glob->lru_lock);
1438 1439 1440 1441 1442
	if (list_empty(&bdev->ddestroy))
		TTM_DEBUG("Delayed destroy list was clean\n");

	if (list_empty(&bdev->man[0].lru))
		TTM_DEBUG("Swap list was clean\n");
1443
	spin_unlock(&glob->lru_lock);
1444

1445
	drm_vma_offset_manager_destroy(&bdev->vma_manager);
1446 1447 1448 1449 1450 1451

	return ret;
}
EXPORT_SYMBOL(ttm_bo_device_release);

int ttm_bo_device_init(struct ttm_bo_device *bdev,
1452 1453
		       struct ttm_bo_global *glob,
		       struct ttm_bo_driver *driver,
1454
		       struct address_space *mapping,
D
Dave Airlie 已提交
1455
		       uint64_t file_page_offset,
D
Dave Airlie 已提交
1456
		       bool need_dma32)
1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467
{
	int ret = -EINVAL;

	bdev->driver = driver;

	memset(bdev->man, 0, sizeof(bdev->man));

	/*
	 * Initialize the system memory buffer type.
	 * Other types need to be driver / IOCTL initialized.
	 */
1468
	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1469
	if (unlikely(ret != 0))
1470
		goto out_no_sys;
1471

1472 1473
	drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
				    0x10000000);
1474 1475
	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
	INIT_LIST_HEAD(&bdev->ddestroy);
1476
	bdev->dev_mapping = mapping;
1477
	bdev->glob = glob;
D
Dave Airlie 已提交
1478
	bdev->need_dma32 = need_dma32;
1479
	bdev->val_seq = 0;
1480
	spin_lock_init(&bdev->fence_lock);
1481 1482 1483
	mutex_lock(&glob->device_list_mutex);
	list_add_tail(&bdev->device_list, &glob->device_list);
	mutex_unlock(&glob->device_list_mutex);
1484 1485

	return 0;
1486
out_no_sys:
1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511
	return ret;
}
EXPORT_SYMBOL(ttm_bo_device_init);

/*
 * buffer object vm functions.
 */

bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];

	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
		if (mem->mem_type == TTM_PL_SYSTEM)
			return false;

		if (man->flags & TTM_MEMTYPE_FLAG_CMA)
			return false;

		if (mem->placement & TTM_PL_FLAG_CACHED)
			return false;
	}
	return true;
}

1512
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1513 1514 1515
{
	struct ttm_bo_device *bdev = bo->bdev;

1516
	drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
1517
	ttm_mem_io_free_vm(bo);
1518
}
1519 1520 1521 1522 1523 1524 1525 1526 1527

void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];

	ttm_mem_io_lock(man, false);
	ttm_bo_unmap_virtual_locked(bo);
	ttm_mem_io_unlock(man);
1528
}
1529 1530


1531
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1532 1533 1534


int ttm_bo_wait(struct ttm_buffer_object *bo,
1535
		bool lazy, bool interruptible, bool no_wait)
1536 1537
{
	struct ttm_bo_driver *driver = bo->bdev->driver;
1538
	struct ttm_bo_device *bdev = bo->bdev;
1539 1540 1541
	void *sync_obj;
	int ret = 0;

1542
	if (likely(bo->sync_obj == NULL))
1543 1544
		return 0;

1545
	while (bo->sync_obj) {
1546

1547
		if (driver->sync_obj_signaled(bo->sync_obj)) {
1548 1549 1550 1551 1552 1553
			void *tmp_obj = bo->sync_obj;
			bo->sync_obj = NULL;
			clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
			spin_unlock(&bdev->fence_lock);
			driver->sync_obj_unref(&tmp_obj);
			spin_lock(&bdev->fence_lock);
1554 1555 1556 1557 1558 1559
			continue;
		}

		if (no_wait)
			return -EBUSY;

1560
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
1561
		spin_unlock(&bdev->fence_lock);
1562
		ret = driver->sync_obj_wait(sync_obj,
1563 1564 1565
					    lazy, interruptible);
		if (unlikely(ret != 0)) {
			driver->sync_obj_unref(&sync_obj);
1566
			spin_lock(&bdev->fence_lock);
1567 1568
			return ret;
		}
1569
		spin_lock(&bdev->fence_lock);
1570
		if (likely(bo->sync_obj == sync_obj)) {
1571 1572 1573 1574 1575 1576 1577 1578
			void *tmp_obj = bo->sync_obj;
			bo->sync_obj = NULL;
			clear_bit(TTM_BO_PRIV_FLAG_MOVING,
				  &bo->priv_flags);
			spin_unlock(&bdev->fence_lock);
			driver->sync_obj_unref(&sync_obj);
			driver->sync_obj_unref(&tmp_obj);
			spin_lock(&bdev->fence_lock);
1579
		} else {
1580
			spin_unlock(&bdev->fence_lock);
1581
			driver->sync_obj_unref(&sync_obj);
1582
			spin_lock(&bdev->fence_lock);
1583 1584 1585 1586 1587 1588 1589 1590
		}
	}
	return 0;
}
EXPORT_SYMBOL(ttm_bo_wait);

int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
{
1591
	struct ttm_bo_device *bdev = bo->bdev;
1592 1593 1594
	int ret = 0;

	/*
1595
	 * Using ttm_bo_reserve makes sure the lru lists are updated.
1596 1597 1598 1599 1600
	 */

	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
	if (unlikely(ret != 0))
		return ret;
1601
	spin_lock(&bdev->fence_lock);
1602
	ret = ttm_bo_wait(bo, false, true, no_wait);
1603
	spin_unlock(&bdev->fence_lock);
1604 1605 1606 1607 1608
	if (likely(ret == 0))
		atomic_inc(&bo->cpu_writers);
	ttm_bo_unreserve(bo);
	return ret;
}
1609
EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1610 1611 1612

void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
{
1613
	atomic_dec(&bo->cpu_writers);
1614
}
1615
EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1616 1617 1618 1619 1620 1621 1622 1623

/**
 * A buffer object shrink method that tries to swap out the first
 * buffer object on the bo_global::swap_lru list.
 */

static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
{
1624 1625
	struct ttm_bo_global *glob =
	    container_of(shrink, struct ttm_bo_global, shrink);
1626 1627 1628 1629 1630
	struct ttm_buffer_object *bo;
	int ret = -EBUSY;
	int put_count;
	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);

1631
	spin_lock(&glob->lru_lock);
1632
	list_for_each_entry(bo, &glob->swap_lru, swap) {
1633
		ret = __ttm_bo_reserve(bo, false, true, false, 0);
1634 1635 1636
		if (!ret)
			break;
	}
1637

1638 1639 1640 1641
	if (ret) {
		spin_unlock(&glob->lru_lock);
		return ret;
	}
1642

1643
	kref_get(&bo->list_kref);
1644

1645 1646 1647 1648
	if (!list_empty(&bo->ddestroy)) {
		ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
		kref_put(&bo->list_kref, ttm_bo_release_list);
		return ret;
1649 1650 1651
	}

	put_count = ttm_bo_del_from_lru(bo);
1652
	spin_unlock(&glob->lru_lock);
1653

1654
	ttm_bo_list_ref_sub(bo, put_count, true);
1655 1656 1657 1658 1659

	/**
	 * Wait for GPU, then move to system cached.
	 */

1660
	spin_lock(&bo->bdev->fence_lock);
1661
	ret = ttm_bo_wait(bo, false, false, false);
1662
	spin_unlock(&bo->bdev->fence_lock);
1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675

	if (unlikely(ret != 0))
		goto out;

	if ((bo->mem.placement & swap_placement) != swap_placement) {
		struct ttm_mem_reg evict_mem;

		evict_mem = bo->mem;
		evict_mem.mm_node = NULL;
		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
		evict_mem.mem_type = TTM_PL_SYSTEM;

		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1676
					     false, false);
1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687
		if (unlikely(ret != 0))
			goto out;
	}

	ttm_bo_unmap_virtual(bo);

	/**
	 * Swap out. Buffer will be swapped in again as soon as
	 * anyone tries to access a ttm page.
	 */

1688 1689 1690
	if (bo->bdev->driver->swap_notify)
		bo->bdev->driver->swap_notify(bo);

J
Jan Engelhardt 已提交
1691
	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1692 1693 1694 1695 1696 1697 1698 1699
out:

	/**
	 *
	 * Unreserve without putting on LRU to avoid swapping out an
	 * already swapped buffer.
	 */

1700
	__ttm_bo_unreserve(bo);
1701 1702 1703 1704 1705 1706
	kref_put(&bo->list_kref, ttm_bo_release_list);
	return ret;
}

void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
{
1707
	while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1708 1709
		;
}
1710
EXPORT_SYMBOL(ttm_bo_swapout_all);
1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733

/**
 * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
 * unreserved
 *
 * @bo: Pointer to buffer
 */
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
{
	int ret;

	/*
	 * In the absense of a wait_unlocked API,
	 * Use the bo::wu_mutex to avoid triggering livelocks due to
	 * concurrent use of this function. Note that this use of
	 * bo::wu_mutex can go away if we change locking order to
	 * mmap_sem -> bo::reserve.
	 */
	ret = mutex_lock_interruptible(&bo->wu_mutex);
	if (unlikely(ret != 0))
		return -ERESTARTSYS;
	if (!ww_mutex_is_locked(&bo->resv->lock))
		goto out_unlock;
1734
	ret = __ttm_bo_reserve(bo, true, false, false, NULL);
1735 1736
	if (unlikely(ret != 0))
		goto out_unlock;
1737
	__ttm_bo_unreserve(bo);
1738 1739 1740 1741 1742

out_unlock:
	mutex_unlock(&bo->wu_mutex);
	return ret;
}