ttm_bo.c 42.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/**************************************************************************
 *
 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 */

J
Joe Perches 已提交
31 32
#define pr_fmt(fmt) "[TTM] " fmt

33 34 35
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
36 37 38 39 40 41
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/module.h>
A
Arun Sharma 已提交
42
#include <linux/atomic.h>
43
#include <linux/reservation.h>
44 45 46 47 48 49

#define TTM_ASSERT_LOCKED(param)
#define TTM_DEBUG(fmt, arg...)
#define TTM_BO_HASH_ORDER 13

static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
50 51 52 53 54 55 56
static void ttm_bo_global_kobj_release(struct kobject *kobj);

static struct attribute ttm_bo_count = {
	.name = "bo_count",
	.mode = S_IRUGO
};

57 58
static inline int ttm_mem_type_from_place(const struct ttm_place *place,
					  uint32_t *mem_type)
59 60 61 62
{
	int i;

	for (i = 0; i <= TTM_PL_PRIV5; i++)
63
		if (place->flags & (1 << i)) {
64 65 66 67 68 69
			*mem_type = i;
			return 0;
		}
	return -EINVAL;
}

70
static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
71
{
72 73
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];

J
Joe Perches 已提交
74 75 76
	pr_err("    has_type: %d\n", man->has_type);
	pr_err("    use_type: %d\n", man->use_type);
	pr_err("    flags: 0x%08X\n", man->flags);
77
	pr_err("    gpu_offset: 0x%08llX\n", man->gpu_offset);
J
Joe Perches 已提交
78 79 80
	pr_err("    size: %llu\n", man->size);
	pr_err("    available_caching: 0x%08X\n", man->available_caching);
	pr_err("    default_caching: 0x%08X\n", man->default_caching);
81 82
	if (mem_type != TTM_PL_SYSTEM)
		(*man->func->debug)(man, TTM_PFX);
83 84 85 86 87 88 89
}

static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
					struct ttm_placement *placement)
{
	int i, ret, mem_type;

J
Joe Perches 已提交
90 91 92
	pr_err("No space for %p (%lu pages, %luK, %luM)\n",
	       bo, bo->mem.num_pages, bo->mem.size >> 10,
	       bo->mem.size >> 20);
93
	for (i = 0; i < placement->num_placement; i++) {
94
		ret = ttm_mem_type_from_place(&placement->placement[i],
95 96 97
						&mem_type);
		if (ret)
			return;
J
Joe Perches 已提交
98
		pr_err("  placement[%d]=0x%08X (%d)\n",
99
		       i, placement->placement[i].flags, mem_type);
100
		ttm_mem_type_debug(bo->bdev, mem_type);
101 102 103
	}
}

104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
static ssize_t ttm_bo_global_show(struct kobject *kobj,
				  struct attribute *attr,
				  char *buffer)
{
	struct ttm_bo_global *glob =
		container_of(kobj, struct ttm_bo_global, kobj);

	return snprintf(buffer, PAGE_SIZE, "%lu\n",
			(unsigned long) atomic_read(&glob->bo_count));
}

static struct attribute *ttm_bo_global_attrs[] = {
	&ttm_bo_count,
	NULL
};

120
static const struct sysfs_ops ttm_bo_global_ops = {
121 122 123 124 125 126 127 128 129
	.show = &ttm_bo_global_show
};

static struct kobj_type ttm_bo_glob_kobj_type  = {
	.release = &ttm_bo_global_kobj_release,
	.sysfs_ops = &ttm_bo_global_ops,
	.default_attrs = ttm_bo_global_attrs
};

130 131 132 133 134 135 136 137 138 139 140

static inline uint32_t ttm_bo_type_flags(unsigned type)
{
	return 1 << (type);
}

static void ttm_bo_release_list(struct kref *list_kref)
{
	struct ttm_buffer_object *bo =
	    container_of(list_kref, struct ttm_buffer_object, list_kref);
	struct ttm_bo_device *bdev = bo->bdev;
141
	size_t acc_size = bo->acc_size;
142 143 144 145 146 147 148 149 150 151

	BUG_ON(atomic_read(&bo->list_kref.refcount));
	BUG_ON(atomic_read(&bo->kref.refcount));
	BUG_ON(atomic_read(&bo->cpu_writers));
	BUG_ON(bo->mem.mm_node != NULL);
	BUG_ON(!list_empty(&bo->lru));
	BUG_ON(!list_empty(&bo->ddestroy));

	if (bo->ttm)
		ttm_tt_destroy(bo->ttm);
152
	atomic_dec(&bo->glob->bo_count);
153 154
	if (bo->resv == &bo->ttm_resv)
		reservation_object_fini(&bo->ttm_resv);
155
	mutex_destroy(&bo->wu_mutex);
156 157 158 159 160
	if (bo->destroy)
		bo->destroy(bo);
	else {
		kfree(bo);
	}
161
	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
162 163
}

164
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
165 166 167
{
	struct ttm_bo_device *bdev = bo->bdev;

168
	lockdep_assert_held(&bo->resv->lock.base);
169 170 171 172 173

	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {

		BUG_ON(!list_empty(&bo->lru));

174
		list_add(&bo->lru, bdev->driver->lru_tail(bo));
175 176
		kref_get(&bo->list_kref);

177
		if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
178
			list_add(&bo->swap, bdev->driver->swap_lru_tail(bo));
179 180 181 182
			kref_get(&bo->list_kref);
		}
	}
}
183
EXPORT_SYMBOL(ttm_bo_add_to_lru);
184

185
int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
186
{
187
	struct ttm_bo_device *bdev = bo->bdev;
188 189
	int put_count = 0;

190 191 192
	if (bdev->driver->lru_removal)
		bdev->driver->lru_removal(bo);

193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
	if (!list_empty(&bo->swap)) {
		list_del_init(&bo->swap);
		++put_count;
	}
	if (!list_empty(&bo->lru)) {
		list_del_init(&bo->lru);
		++put_count;
	}

	return put_count;
}

static void ttm_bo_ref_bug(struct kref *list_kref)
{
	BUG();
}

210 211 212
void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
			 bool never_free)
{
213 214
	kref_sub(&bo->list_kref, count,
		 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
215 216
}

217
void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
218
{
219
	int put_count;
220

221 222 223 224
	spin_lock(&bo->glob->lru_lock);
	put_count = ttm_bo_del_from_lru(bo);
	spin_unlock(&bo->glob->lru_lock);
	ttm_bo_list_ref_sub(bo, put_count, true);
225
}
226
EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
227

228 229 230
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
{
	struct ttm_bo_device *bdev = bo->bdev;
231
	int put_count = 0;
232 233 234

	lockdep_assert_held(&bo->resv->lock.base);

235 236 237
	if (bdev->driver->lru_removal)
		bdev->driver->lru_removal(bo);

238 239 240
	put_count = ttm_bo_del_from_lru(bo);
	ttm_bo_list_ref_sub(bo, put_count, true);
	ttm_bo_add_to_lru(bo);
241 242 243
}
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);

244 245 246 247 248 249 250 251 252 253 254 255
struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo)
{
	return bo->bdev->man[bo->mem.mem_type].lru.prev;
}
EXPORT_SYMBOL(ttm_bo_default_lru_tail);

struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo)
{
	return bo->glob->swap_lru.prev;
}
EXPORT_SYMBOL(ttm_bo_default_swap_lru_tail);

256 257 258 259 260 261
/*
 * Call bo->mutex locked.
 */
static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
{
	struct ttm_bo_device *bdev = bo->bdev;
262
	struct ttm_bo_global *glob = bo->glob;
263 264 265 266 267 268
	int ret = 0;
	uint32_t page_flags = 0;

	TTM_ASSERT_LOCKED(&bo->mutex);
	bo->ttm = NULL;

D
Dave Airlie 已提交
269 270 271
	if (bdev->need_dma32)
		page_flags |= TTM_PAGE_FLAG_DMA32;

272 273 274 275 276
	switch (bo->type) {
	case ttm_bo_type_device:
		if (zero_alloc)
			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
	case ttm_bo_type_kernel:
277 278
		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
						      page_flags, glob->dummy_read_page);
279 280 281
		if (unlikely(bo->ttm == NULL))
			ret = -ENOMEM;
		break;
282 283 284 285 286 287 288 289 290 291
	case ttm_bo_type_sg:
		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
						      page_flags | TTM_PAGE_FLAG_SG,
						      glob->dummy_read_page);
		if (unlikely(bo->ttm == NULL)) {
			ret = -ENOMEM;
			break;
		}
		bo->ttm->sg = bo->sg;
		break;
292
	default:
J
Joe Perches 已提交
293
		pr_err("Illegal buffer object type\n");
294 295 296 297 298 299 300 301 302
		ret = -EINVAL;
		break;
	}

	return ret;
}

static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
				  struct ttm_mem_reg *mem,
303
				  bool evict, bool interruptible,
304
				  bool no_wait_gpu)
305 306 307 308 309 310 311 312 313
{
	struct ttm_bo_device *bdev = bo->bdev;
	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
	int ret = 0;

	if (old_is_pci || new_is_pci ||
314 315 316 317 318 319 320
	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
		ret = ttm_mem_io_lock(old_man, true);
		if (unlikely(ret != 0))
			goto out_err;
		ttm_bo_unmap_virtual_locked(bo);
		ttm_mem_io_unlock(old_man);
	}
321 322 323 324 325

	/*
	 * Create and bind a ttm if required.
	 */

326 327
	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
		if (bo->ttm == NULL) {
328 329
			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
			ret = ttm_bo_add_ttm(bo, zero);
330 331 332
			if (ret)
				goto out_err;
		}
333 334 335

		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
		if (ret)
336
			goto out_err;
337 338 339 340 341 342 343 344

		if (mem->mem_type != TTM_PL_SYSTEM) {
			ret = ttm_tt_bind(bo->ttm, mem);
			if (ret)
				goto out_err;
		}

		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
345 346
			if (bdev->driver->move_notify)
				bdev->driver->move_notify(bo, mem);
347
			bo->mem = *mem;
348 349 350 351 352
			mem->mm_node = NULL;
			goto moved;
		}
	}

353 354 355
	if (bdev->driver->move_notify)
		bdev->driver->move_notify(bo, mem);

356 357
	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
358
		ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
359 360
	else if (bdev->driver->move)
		ret = bdev->driver->move(bo, evict, interruptible,
361
					 no_wait_gpu, mem);
362
	else
363
		ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
364

365 366 367 368 369 370 371
	if (ret) {
		if (bdev->driver->move_notify) {
			struct ttm_mem_reg tmp_mem = *mem;
			*mem = bo->mem;
			bo->mem = tmp_mem;
			bdev->driver->move_notify(bo, mem);
			bo->mem = *mem;
372
			*mem = tmp_mem;
373
		}
374

375 376
		goto out_err;
	}
377

378 379
moved:
	if (bo->evicted) {
380 381 382 383 384
		if (bdev->driver->invalidate_caches) {
			ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
			if (ret)
				pr_err("Can not flush read caches\n");
		}
385 386 387 388
		bo->evicted = false;
	}

	if (bo->mem.mm_node) {
389
		bo->offset = (bo->mem.start << PAGE_SHIFT) +
390 391
		    bdev->man[bo->mem.mem_type].gpu_offset;
		bo->cur_placement = bo->mem.placement;
392 393
	} else
		bo->offset = 0;
394 395 396 397 398 399 400 401 402 403 404 405 406 407

	return 0;

out_err:
	new_man = &bdev->man[bo->mem.mem_type];
	if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
		ttm_tt_unbind(bo->ttm);
		ttm_tt_destroy(bo->ttm);
		bo->ttm = NULL;
	}

	return ret;
}

408
/**
409
 * Call bo::reserved.
410
 * Will release GPU memory type usage on destruction.
411 412 413
 * This is the place to put in driver specific hooks to release
 * driver private resources.
 * Will release the bo::reserved lock.
414 415 416 417
 */

static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
418 419 420
	if (bo->bdev->driver->move_notify)
		bo->bdev->driver->move_notify(bo, NULL);

421 422 423 424 425
	if (bo->ttm) {
		ttm_tt_unbind(bo->ttm);
		ttm_tt_destroy(bo->ttm);
		bo->ttm = NULL;
	}
426
	ttm_bo_mem_put(bo, &bo->mem);
427

428
	ww_mutex_unlock (&bo->resv->lock);
429 430
}

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
{
	struct reservation_object_list *fobj;
	struct fence *fence;
	int i;

	fobj = reservation_object_get_list(bo->resv);
	fence = reservation_object_get_excl(bo->resv);
	if (fence && !fence->ops->signaled)
		fence_enable_sw_signaling(fence);

	for (i = 0; fobj && i < fobj->shared_count; ++i) {
		fence = rcu_dereference_protected(fobj->shared[i],
					reservation_object_held(bo->resv));

		if (!fence->ops->signaled)
			fence_enable_sw_signaling(fence);
	}
}

451
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
452 453
{
	struct ttm_bo_device *bdev = bo->bdev;
454
	struct ttm_bo_global *glob = bo->glob;
455
	int put_count;
456 457
	int ret;

458
	spin_lock(&glob->lru_lock);
459
	ret = __ttm_bo_reserve(bo, false, true, NULL);
460

M
Maarten Lankhorst 已提交
461
	if (!ret) {
462
		if (!ttm_bo_wait(bo, false, true)) {
M
Maarten Lankhorst 已提交
463
			put_count = ttm_bo_del_from_lru(bo);
464

M
Maarten Lankhorst 已提交
465 466
			spin_unlock(&glob->lru_lock);
			ttm_bo_cleanup_memtype_use(bo);
467

M
Maarten Lankhorst 已提交
468
			ttm_bo_list_ref_sub(bo, put_count, true);
469

M
Maarten Lankhorst 已提交
470
			return;
471 472
		} else
			ttm_bo_flush_all_fences(bo);
473 474 475 476 477 478 479 480 481 482 483

		/*
		 * Make NO_EVICT bos immediately available to
		 * shrinkers, now that they are queued for
		 * destruction.
		 */
		if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
			bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
			ttm_bo_add_to_lru(bo);
		}

484
		__ttm_bo_unreserve(bo);
485
	}
486 487 488 489 490 491 492 493 494 495

	kref_get(&bo->list_kref);
	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
	spin_unlock(&glob->lru_lock);

	schedule_delayed_work(&bdev->wq,
			      ((HZ / 100) < 1) ? 1 : HZ / 100);
}

/**
496
 * function ttm_bo_cleanup_refs_and_unlock
497 498 499
 * If bo idle, remove from delayed- and lru lists, and unref.
 * If not idle, do nothing.
 *
500 501 502
 * Must be called with lru_lock and reservation held, this function
 * will drop both before returning.
 *
503 504 505 506
 * @interruptible         Any sleeps should occur interruptibly.
 * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
 */

507 508 509
static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
					  bool interruptible,
					  bool no_wait_gpu)
510 511 512
{
	struct ttm_bo_global *glob = bo->glob;
	int put_count;
513
	int ret;
514

515
	ret = ttm_bo_wait(bo, false, true);
516

517
	if (ret && !no_wait_gpu) {
M
Maarten Lankhorst 已提交
518 519 520 521 522 523 524 525 526 527 528 529 530
		long lret;
		ww_mutex_unlock(&bo->resv->lock);
		spin_unlock(&glob->lru_lock);

		lret = reservation_object_wait_timeout_rcu(bo->resv,
							   true,
							   interruptible,
							   30 * HZ);

		if (lret < 0)
			return lret;
		else if (lret == 0)
			return -EBUSY;
531

532
		spin_lock(&glob->lru_lock);
533
		ret = __ttm_bo_reserve(bo, false, true, NULL);
534

535 536 537 538 539 540 541 542 543 544 545 546
		/*
		 * We raced, and lost, someone else holds the reservation now,
		 * and is probably busy in ttm_bo_cleanup_memtype_use.
		 *
		 * Even if it's not the case, because we finished waiting any
		 * delayed destruction would succeed, so just return success
		 * here.
		 */
		if (ret) {
			spin_unlock(&glob->lru_lock);
			return 0;
		}
547 548 549 550 551

		/*
		 * remove sync_obj with ttm_bo_wait, the wait should be
		 * finished, and no new wait object should have been added.
		 */
552
		ret = ttm_bo_wait(bo, false, true);
553 554
		WARN_ON(ret);
	}
555

556
	if (ret || unlikely(list_empty(&bo->ddestroy))) {
557
		__ttm_bo_unreserve(bo);
558
		spin_unlock(&glob->lru_lock);
559
		return ret;
560 561
	}

562 563 564 565 566 567 568
	put_count = ttm_bo_del_from_lru(bo);
	list_del_init(&bo->ddestroy);
	++put_count;

	spin_unlock(&glob->lru_lock);
	ttm_bo_cleanup_memtype_use(bo);

569
	ttm_bo_list_ref_sub(bo, put_count, true);
570 571

	return 0;
572 573 574 575 576 577 578 579 580
}

/**
 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
 * encountered buffers.
 */

static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
{
581
	struct ttm_bo_global *glob = bdev->glob;
582 583
	struct ttm_buffer_object *entry = NULL;
	int ret = 0;
584

585
	spin_lock(&glob->lru_lock);
586 587 588 589 590 591 592 593 594 595 596 597 598
	if (list_empty(&bdev->ddestroy))
		goto out_unlock;

	entry = list_first_entry(&bdev->ddestroy,
		struct ttm_buffer_object, ddestroy);
	kref_get(&entry->list_kref);

	for (;;) {
		struct ttm_buffer_object *nentry = NULL;

		if (entry->ddestroy.next != &bdev->ddestroy) {
			nentry = list_first_entry(&entry->ddestroy,
				struct ttm_buffer_object, ddestroy);
599 600 601
			kref_get(&nentry->list_kref);
		}

602
		ret = __ttm_bo_reserve(entry, false, true, NULL);
603 604
		if (remove_all && ret) {
			spin_unlock(&glob->lru_lock);
605
			ret = __ttm_bo_reserve(entry, false, false, NULL);
606 607 608
			spin_lock(&glob->lru_lock);
		}

609 610 611 612 613 614
		if (!ret)
			ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
							     !remove_all);
		else
			spin_unlock(&glob->lru_lock);

615
		kref_put(&entry->list_kref, ttm_bo_release_list);
616 617 618 619
		entry = nentry;

		if (ret || !entry)
			goto out;
620

621
		spin_lock(&glob->lru_lock);
622
		if (list_empty(&entry->ddestroy))
623 624 625
			break;
	}

626 627 628 629 630
out_unlock:
	spin_unlock(&glob->lru_lock);
out:
	if (entry)
		kref_put(&entry->list_kref, ttm_bo_release_list);
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
	return ret;
}

static void ttm_bo_delayed_workqueue(struct work_struct *work)
{
	struct ttm_bo_device *bdev =
	    container_of(work, struct ttm_bo_device, wq.work);

	if (ttm_bo_delayed_delete(bdev, false)) {
		schedule_delayed_work(&bdev->wq,
				      ((HZ / 100) < 1) ? 1 : HZ / 100);
	}
}

static void ttm_bo_release(struct kref *kref)
{
	struct ttm_buffer_object *bo =
	    container_of(kref, struct ttm_buffer_object, kref);
	struct ttm_bo_device *bdev = bo->bdev;
650
	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
651

652
	drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
653 654 655
	ttm_mem_io_lock(man, false);
	ttm_mem_io_free_vm(bo);
	ttm_mem_io_unlock(man);
656
	ttm_bo_cleanup_refs_or_queue(bo);
657 658 659 660 661 662 663 664 665 666 667 668
	kref_put(&bo->list_kref, ttm_bo_release_list);
}

void ttm_bo_unref(struct ttm_buffer_object **p_bo)
{
	struct ttm_buffer_object *bo = *p_bo;

	*p_bo = NULL;
	kref_put(&bo->kref, ttm_bo_release);
}
EXPORT_SYMBOL(ttm_bo_unref);

669 670 671 672 673 674 675 676 677 678 679 680 681 682
int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
{
	return cancel_delayed_work_sync(&bdev->wq);
}
EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);

void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
{
	if (resched)
		schedule_delayed_work(&bdev->wq,
				      ((HZ / 100) < 1) ? 1 : HZ / 100);
}
EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);

683
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
684
			bool no_wait_gpu)
685 686 687
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_reg evict_mem;
688 689
	struct ttm_placement placement;
	int ret = 0;
690

691
	ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
692

693
	if (unlikely(ret != 0)) {
694
		if (ret != -ERESTARTSYS) {
J
Joe Perches 已提交
695
			pr_err("Failed to expire sync object before buffer eviction\n");
696
		}
697 698 699
		goto out;
	}

700
	lockdep_assert_held(&bo->resv->lock.base);
701 702 703

	evict_mem = bo->mem;
	evict_mem.mm_node = NULL;
704 705
	evict_mem.bus.io_reserved_vm = false;
	evict_mem.bus.io_reserved_count = 0;
706

707 708
	placement.num_placement = 0;
	placement.num_busy_placement = 0;
709 710
	bdev->driver->evict_flags(bo, &placement);
	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
711
				no_wait_gpu);
712
	if (ret) {
713
		if (ret != -ERESTARTSYS) {
J
Joe Perches 已提交
714 715
			pr_err("Failed to find memory space for buffer 0x%p eviction\n",
			       bo);
716 717
			ttm_bo_mem_space_debug(bo, &placement);
		}
718 719 720 721
		goto out;
	}

	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
722
				     no_wait_gpu);
723
	if (ret) {
724
		if (ret != -ERESTARTSYS)
J
Joe Perches 已提交
725
			pr_err("Buffer eviction failed\n");
726
		ttm_bo_mem_put(bo, &evict_mem);
727 728
		goto out;
	}
729 730 731 732 733 734 735
	bo->evicted = true;
out:
	return ret;
}

static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
				uint32_t mem_type,
736
				const struct ttm_place *place,
737
				bool interruptible,
738
				bool no_wait_gpu)
739 740 741 742
{
	struct ttm_bo_global *glob = bdev->glob;
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
	struct ttm_buffer_object *bo;
743
	int ret = -EBUSY, put_count;
744

745
	spin_lock(&glob->lru_lock);
746
	list_for_each_entry(bo, &man->lru, lru) {
747
		ret = __ttm_bo_reserve(bo, false, true, NULL);
748 749 750 751 752 753 754 755 756 757 758 759 760
		if (!ret) {
			if (place && (place->fpfn || place->lpfn)) {
				/* Don't evict this BO if it's outside of the
				 * requested placement range
				 */
				if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
				    (place->lpfn && place->lpfn <= bo->mem.start)) {
					__ttm_bo_unreserve(bo);
					ret = -EBUSY;
					continue;
				}
			}

761
			break;
762
		}
763 764 765
	}

	if (ret) {
766
		spin_unlock(&glob->lru_lock);
767
		return ret;
768 769
	}

770
	kref_get(&bo->list_kref);
771

772
	if (!list_empty(&bo->ddestroy)) {
773 774
		ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
						     no_wait_gpu);
775
		kref_put(&bo->list_kref, ttm_bo_release_list);
776
		return ret;
777 778
	}

779
	put_count = ttm_bo_del_from_lru(bo);
780
	spin_unlock(&glob->lru_lock);
781 782 783

	BUG_ON(ret != 0);

784
	ttm_bo_list_ref_sub(bo, put_count, true);
785

786
	ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
787
	ttm_bo_unreserve(bo);
788

789
	kref_put(&bo->list_kref, ttm_bo_release_list);
790 791 792
	return ret;
}

793 794
void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
{
795
	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
796

797 798
	if (mem->mm_node)
		(*man->func->put_node)(man, mem);
799 800 801
}
EXPORT_SYMBOL(ttm_bo_mem_put);

802 803 804 805
/**
 * Repeatedly evict memory from the LRU for @mem_type until we create enough
 * space, or we've evicted everything and there isn't enough space.
 */
806 807
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
					uint32_t mem_type,
808
					const struct ttm_place *place,
809
					struct ttm_mem_reg *mem,
810 811
					bool interruptible,
					bool no_wait_gpu)
812
{
813
	struct ttm_bo_device *bdev = bo->bdev;
814 815 816 817
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
	int ret;

	do {
818
		ret = (*man->func->get_node)(man, bo, place, mem);
819 820
		if (unlikely(ret != 0))
			return ret;
821
		if (mem->mm_node)
822
			break;
823
		ret = ttm_mem_evict_first(bdev, mem_type, place,
824
					  interruptible, no_wait_gpu);
825 826 827
		if (unlikely(ret != 0))
			return ret;
	} while (1);
828
	if (mem->mm_node == NULL)
829 830 831 832 833
		return -ENOMEM;
	mem->mem_type = mem_type;
	return 0;
}

834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
				      uint32_t cur_placement,
				      uint32_t proposed_placement)
{
	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;

	/**
	 * Keep current caching if possible.
	 */

	if ((cur_placement & caching) != 0)
		result |= (cur_placement & caching);
	else if ((man->default_caching & caching) != 0)
		result |= man->default_caching;
	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
		result |= TTM_PL_FLAG_CACHED;
	else if ((TTM_PL_FLAG_WC & caching) != 0)
		result |= TTM_PL_FLAG_WC;
	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
		result |= TTM_PL_FLAG_UNCACHED;

	return result;
}

859 860
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
				 uint32_t mem_type,
861
				 const struct ttm_place *place,
862
				 uint32_t *masked_placement)
863 864 865
{
	uint32_t cur_flags = ttm_bo_type_flags(mem_type);

866
	if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
867 868
		return false;

869
	if ((place->flags & man->available_caching) == 0)
870 871
		return false;

872
	cur_flags |= (place->flags & man->available_caching);
873 874

	*masked_placement = cur_flags;
875 876 877 878 879 880 881 882 883 884 885 886
	return true;
}

/**
 * Creates space for memory region @mem according to its type.
 *
 * This function first searches for free space in compatible memory types in
 * the priority order defined by the driver.  If free space isn't found, then
 * ttm_bo_mem_force_space is attempted in priority order to evict and find
 * space.
 */
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
887 888
			struct ttm_placement *placement,
			struct ttm_mem_reg *mem,
889
			bool interruptible,
890
			bool no_wait_gpu)
891 892 893 894 895 896 897
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man;
	uint32_t mem_type = TTM_PL_SYSTEM;
	uint32_t cur_flags = 0;
	bool type_found = false;
	bool type_ok = false;
898
	bool has_erestartsys = false;
899
	int i, ret;
900 901

	mem->mm_node = NULL;
902
	for (i = 0; i < placement->num_placement; ++i) {
903 904 905
		const struct ttm_place *place = &placement->placement[i];

		ret = ttm_mem_type_from_place(place, &mem_type);
906 907
		if (ret)
			return ret;
908
		man = &bdev->man[mem_type];
909 910
		if (!man->has_type || !man->use_type)
			continue;
911

912
		type_ok = ttm_bo_mt_compatible(man, mem_type, place,
913
						&cur_flags);
914 915 916 917

		if (!type_ok)
			continue;

918
		type_found = true;
919 920
		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
						  cur_flags);
921 922 923 924
		/*
		 * Use the access and other non-mapping-related flag bits from
		 * the memory placement flags to the current flags
		 */
925
		ttm_flag_masked(&cur_flags, place->flags,
926
				~TTM_PL_MASK_MEMTYPE);
927

928 929 930
		if (mem_type == TTM_PL_SYSTEM)
			break;

931 932 933 934
		ret = (*man->func->get_node)(man, bo, place, mem);
		if (unlikely(ret))
			return ret;
		
935
		if (mem->mm_node)
936 937 938
			break;
	}

939
	if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
940 941 942 943 944
		mem->mem_type = mem_type;
		mem->placement = cur_flags;
		return 0;
	}

945
	for (i = 0; i < placement->num_busy_placement; ++i) {
946 947 948
		const struct ttm_place *place = &placement->busy_placement[i];

		ret = ttm_mem_type_from_place(place, &mem_type);
949 950
		if (ret)
			return ret;
951
		man = &bdev->man[mem_type];
952
		if (!man->has_type || !man->use_type)
953
			continue;
954
		if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
955 956
			continue;

957
		type_found = true;
958 959
		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
						  cur_flags);
960 961 962 963
		/*
		 * Use the access and other non-mapping-related flag bits from
		 * the memory placement flags to the current flags
		 */
964
		ttm_flag_masked(&cur_flags, place->flags,
965
				~TTM_PL_MASK_MEMTYPE);
966

967 968 969 970 971 972 973
		if (mem_type == TTM_PL_SYSTEM) {
			mem->mem_type = mem_type;
			mem->placement = cur_flags;
			mem->mm_node = NULL;
			return 0;
		}

974
		ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
975
						interruptible, no_wait_gpu);
976 977 978 979
		if (ret == 0 && mem->mm_node) {
			mem->placement = cur_flags;
			return 0;
		}
980 981
		if (ret == -ERESTARTSYS)
			has_erestartsys = true;
982
	}
983 984 985 986 987 988 989

	if (!type_found) {
		printk(KERN_ERR TTM_PFX "No compatible memory type found.\n");
		return -EINVAL;
	}

	return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
990 991 992
}
EXPORT_SYMBOL(ttm_bo_mem_space);

993
static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
994
			struct ttm_placement *placement,
995
			bool interruptible,
996
			bool no_wait_gpu)
997 998 999 1000
{
	int ret = 0;
	struct ttm_mem_reg mem;

1001
	lockdep_assert_held(&bo->resv->lock.base);
1002 1003

	/*
1004 1005
	 * Don't wait for the BO on initial allocation. This is important when
	 * the BO has an imported reservation object.
1006
	 */
1007 1008 1009 1010 1011 1012
	if (bo->mem.mem_type != TTM_PL_SYSTEM || bo->ttm != NULL) {
		/*
		 * FIXME: It's possible to pipeline buffer moves.
		 * Have the driver move function wait for idle when necessary,
		 * instead of doing it here.
		 */
1013
		ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
1014 1015 1016
		if (ret)
			return ret;
	}
1017 1018 1019
	mem.num_pages = bo->num_pages;
	mem.size = mem.num_pages << PAGE_SHIFT;
	mem.page_alignment = bo->mem.page_alignment;
1020 1021
	mem.bus.io_reserved_vm = false;
	mem.bus.io_reserved_count = 0;
1022 1023 1024
	/*
	 * Determine where to move the buffer.
	 */
1025 1026
	ret = ttm_bo_mem_space(bo, placement, &mem,
			       interruptible, no_wait_gpu);
1027 1028
	if (ret)
		goto out_unlock;
1029 1030
	ret = ttm_bo_handle_move_mem(bo, &mem, false,
				     interruptible, no_wait_gpu);
1031
out_unlock:
1032 1033
	if (ret && mem.mm_node)
		ttm_bo_mem_put(bo, &mem);
1034 1035 1036
	return ret;
}

1037 1038 1039
static bool ttm_bo_mem_compat(struct ttm_placement *placement,
			      struct ttm_mem_reg *mem,
			      uint32_t *new_flags)
1040
{
1041
	int i;
1042

1043
	for (i = 0; i < placement->num_placement; i++) {
1044
		const struct ttm_place *heap = &placement->placement[i];
1045
		if (mem->mm_node &&
1046
		    (mem->start < heap->fpfn ||
1047
		     (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1048 1049 1050
			continue;

		*new_flags = heap->flags;
1051 1052 1053 1054 1055 1056
		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
			return true;
	}

	for (i = 0; i < placement->num_busy_placement; i++) {
1057
		const struct ttm_place *heap = &placement->busy_placement[i];
1058
		if (mem->mm_node &&
1059
		    (mem->start < heap->fpfn ||
1060
		     (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1061 1062 1063
			continue;

		*new_flags = heap->flags;
1064 1065 1066
		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
			return true;
1067
	}
1068 1069

	return false;
1070 1071
}

1072 1073
int ttm_bo_validate(struct ttm_buffer_object *bo,
			struct ttm_placement *placement,
1074
			bool interruptible,
1075
			bool no_wait_gpu)
1076 1077
{
	int ret;
1078
	uint32_t new_flags;
1079

1080
	lockdep_assert_held(&bo->resv->lock.base);
1081 1082 1083
	/*
	 * Check whether we need to move buffer.
	 */
1084
	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1085 1086
		ret = ttm_bo_move_buffer(bo, placement, interruptible,
					 no_wait_gpu);
1087
		if (ret)
1088
			return ret;
1089 1090 1091 1092 1093
	} else {
		/*
		 * Use the access and other non-mapping-related flag bits from
		 * the compatible memory placement flags to the active flags
		 */
1094
		ttm_flag_masked(&bo->mem.placement, new_flags,
1095
				~TTM_PL_MASK_MEMTYPE);
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
	}
	/*
	 * We might need to add a TTM.
	 */
	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
		ret = ttm_bo_add_ttm(bo, true);
		if (ret)
			return ret;
	}
	return 0;
}
1107
EXPORT_SYMBOL(ttm_bo_validate);
1108

1109 1110 1111 1112 1113 1114 1115
int ttm_bo_init(struct ttm_bo_device *bdev,
		struct ttm_buffer_object *bo,
		unsigned long size,
		enum ttm_bo_type type,
		struct ttm_placement *placement,
		uint32_t page_alignment,
		bool interruptible,
J
Jan Engelhardt 已提交
1116
		struct file *persistent_swap_storage,
1117
		size_t acc_size,
1118
		struct sg_table *sg,
1119
		struct reservation_object *resv,
1120
		void (*destroy) (struct ttm_buffer_object *))
1121
{
1122
	int ret = 0;
1123
	unsigned long num_pages;
1124
	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1125
	bool locked;
1126 1127 1128

	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
	if (ret) {
J
Joe Perches 已提交
1129
		pr_err("Out of kernel memory\n");
1130 1131 1132 1133 1134 1135
		if (destroy)
			(*destroy)(bo);
		else
			kfree(bo);
		return -ENOMEM;
	}
1136 1137 1138

	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	if (num_pages == 0) {
J
Joe Perches 已提交
1139
		pr_err("Illegal buffer object size\n");
1140 1141 1142 1143
		if (destroy)
			(*destroy)(bo);
		else
			kfree(bo);
1144
		ttm_mem_global_free(mem_glob, acc_size);
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
		return -EINVAL;
	}
	bo->destroy = destroy;

	kref_init(&bo->kref);
	kref_init(&bo->list_kref);
	atomic_set(&bo->cpu_writers, 0);
	INIT_LIST_HEAD(&bo->lru);
	INIT_LIST_HEAD(&bo->ddestroy);
	INIT_LIST_HEAD(&bo->swap);
1155
	INIT_LIST_HEAD(&bo->io_reserve_lru);
1156
	mutex_init(&bo->wu_mutex);
1157
	bo->bdev = bdev;
1158
	bo->glob = bdev->glob;
1159 1160
	bo->type = type;
	bo->num_pages = num_pages;
1161
	bo->mem.size = num_pages << PAGE_SHIFT;
1162 1163 1164 1165
	bo->mem.mem_type = TTM_PL_SYSTEM;
	bo->mem.num_pages = bo->num_pages;
	bo->mem.mm_node = NULL;
	bo->mem.page_alignment = page_alignment;
1166 1167
	bo->mem.bus.io_reserved_vm = false;
	bo->mem.bus.io_reserved_count = 0;
1168 1169
	bo->priv_flags = 0;
	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
J
Jan Engelhardt 已提交
1170
	bo->persistent_swap_storage = persistent_swap_storage;
1171
	bo->acc_size = acc_size;
1172
	bo->sg = sg;
1173 1174 1175 1176 1177 1178 1179
	if (resv) {
		bo->resv = resv;
		lockdep_assert_held(&bo->resv->lock.base);
	} else {
		bo->resv = &bo->ttm_resv;
		reservation_object_init(&bo->ttm_resv);
	}
1180
	atomic_inc(&bo->glob->bo_count);
1181
	drm_vma_node_reset(&bo->vma_node);
1182 1183 1184 1185 1186

	/*
	 * For ttm_bo_type_device buffers, allocate
	 * address space from the device.
	 */
1187 1188
	if (bo->type == ttm_bo_type_device ||
	    bo->type == ttm_bo_type_sg)
1189 1190
		ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
					 bo->mem.num_pages);
1191

1192 1193 1194 1195 1196 1197 1198
	/* passed reservation objects should already be locked,
	 * since otherwise lockdep will be angered in radeon.
	 */
	if (!resv) {
		locked = ww_mutex_trylock(&bo->resv->lock);
		WARN_ON(!locked);
	}
1199

1200 1201
	if (likely(!ret))
		ret = ttm_bo_validate(bo, placement, interruptible, false);
1202

1203
	if (!resv) {
1204
		ttm_bo_unreserve(bo);
1205

1206 1207 1208 1209 1210 1211
	} else if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
		spin_lock(&bo->glob->lru_lock);
		ttm_bo_add_to_lru(bo);
		spin_unlock(&bo->glob->lru_lock);
	}

1212 1213
	if (unlikely(ret))
		ttm_bo_unref(&bo);
1214 1215 1216

	return ret;
}
1217
EXPORT_SYMBOL(ttm_bo_init);
1218

1219 1220 1221
size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
		       unsigned long bo_size,
		       unsigned struct_size)
1222
{
1223 1224
	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
	size_t size = 0;
1225

1226
	size += ttm_round_pot(struct_size);
F
Felix Kuehling 已提交
1227
	size += ttm_round_pot(npages * sizeof(void *));
1228 1229
	size += ttm_round_pot(sizeof(struct ttm_tt));
	return size;
1230
}
1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
EXPORT_SYMBOL(ttm_bo_acc_size);

size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
			   unsigned long bo_size,
			   unsigned struct_size)
{
	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
	size_t size = 0;

	size += ttm_round_pot(struct_size);
F
Felix Kuehling 已提交
1241
	size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
1242 1243 1244 1245
	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
	return size;
}
EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1246

1247 1248 1249 1250 1251 1252
int ttm_bo_create(struct ttm_bo_device *bdev,
			unsigned long size,
			enum ttm_bo_type type,
			struct ttm_placement *placement,
			uint32_t page_alignment,
			bool interruptible,
J
Jan Engelhardt 已提交
1253
			struct file *persistent_swap_storage,
1254
			struct ttm_buffer_object **p_bo)
1255 1256
{
	struct ttm_buffer_object *bo;
1257
	size_t acc_size;
1258
	int ret;
1259 1260

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1261
	if (unlikely(bo == NULL))
1262 1263
		return -ENOMEM;

1264
	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1265
	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1266
			  interruptible, persistent_swap_storage, acc_size,
1267
			  NULL, NULL, NULL);
1268 1269 1270 1271 1272
	if (likely(ret == 0))
		*p_bo = bo;

	return ret;
}
T
Thomas Hellstrom 已提交
1273
EXPORT_SYMBOL(ttm_bo_create);
1274 1275

static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1276
					unsigned mem_type, bool allow_errors)
1277
{
1278
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1279
	struct ttm_bo_global *glob = bdev->glob;
1280 1281 1282 1283 1284 1285
	int ret;

	/*
	 * Can't use standard list traversal since we're unlocking.
	 */

1286
	spin_lock(&glob->lru_lock);
1287
	while (!list_empty(&man->lru)) {
1288
		spin_unlock(&glob->lru_lock);
1289
		ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
1290 1291 1292 1293
		if (ret) {
			if (allow_errors) {
				return ret;
			} else {
J
Joe Perches 已提交
1294
				pr_err("Cleanup eviction failed\n");
1295 1296
			}
		}
1297
		spin_lock(&glob->lru_lock);
1298
	}
1299
	spin_unlock(&glob->lru_lock);
1300 1301 1302 1303 1304
	return 0;
}

int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
{
R
Roel Kluin 已提交
1305
	struct ttm_mem_type_manager *man;
1306 1307 1308
	int ret = -EINVAL;

	if (mem_type >= TTM_NUM_MEM_TYPES) {
J
Joe Perches 已提交
1309
		pr_err("Illegal memory type %d\n", mem_type);
1310 1311
		return ret;
	}
R
Roel Kluin 已提交
1312
	man = &bdev->man[mem_type];
1313 1314

	if (!man->has_type) {
J
Joe Perches 已提交
1315 1316
		pr_err("Trying to take down uninitialized memory manager type %u\n",
		       mem_type);
1317 1318 1319 1320 1321 1322 1323 1324
		return ret;
	}

	man->use_type = false;
	man->has_type = false;

	ret = 0;
	if (mem_type > 0) {
1325
		ttm_bo_force_list_clean(bdev, mem_type, false);
1326

1327
		ret = (*man->func->takedown)(man);
1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338
	}

	return ret;
}
EXPORT_SYMBOL(ttm_bo_clean_mm);

int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
{
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];

	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
J
Joe Perches 已提交
1339
		pr_err("Illegal memory manager memory type %u\n", mem_type);
1340 1341 1342 1343
		return -EINVAL;
	}

	if (!man->has_type) {
J
Joe Perches 已提交
1344
		pr_err("Memory type %u has not been initialized\n", mem_type);
1345 1346 1347
		return 0;
	}

1348
	return ttm_bo_force_list_clean(bdev, mem_type, true);
1349 1350 1351 1352
}
EXPORT_SYMBOL(ttm_bo_evict_mm);

int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1353
			unsigned long p_size)
1354 1355 1356 1357
{
	int ret = -EINVAL;
	struct ttm_mem_type_manager *man;

1358
	BUG_ON(type >= TTM_NUM_MEM_TYPES);
1359
	man = &bdev->man[type];
1360
	BUG_ON(man->has_type);
1361 1362 1363 1364
	man->io_reserve_fastpath = true;
	man->use_io_reserve_lru = false;
	mutex_init(&man->io_reserve_mutex);
	INIT_LIST_HEAD(&man->io_reserve_lru);
1365 1366 1367 1368

	ret = bdev->driver->init_mem_type(bdev, type, man);
	if (ret)
		return ret;
1369
	man->bdev = bdev;
1370 1371 1372

	ret = 0;
	if (type != TTM_PL_SYSTEM) {
1373
		ret = (*man->func->init)(man, p_size);
1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
		if (ret)
			return ret;
	}
	man->has_type = true;
	man->use_type = true;
	man->size = p_size;

	INIT_LIST_HEAD(&man->lru);

	return 0;
}
EXPORT_SYMBOL(ttm_bo_init_mm);

1387 1388 1389 1390 1391 1392 1393 1394 1395 1396
static void ttm_bo_global_kobj_release(struct kobject *kobj)
{
	struct ttm_bo_global *glob =
		container_of(kobj, struct ttm_bo_global, kobj);

	ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
	__free_page(glob->dummy_read_page);
	kfree(glob);
}

1397
void ttm_bo_global_release(struct drm_global_reference *ref)
1398 1399 1400 1401 1402 1403 1404 1405
{
	struct ttm_bo_global *glob = ref->object;

	kobject_del(&glob->kobj);
	kobject_put(&glob->kobj);
}
EXPORT_SYMBOL(ttm_bo_global_release);

1406
int ttm_bo_global_init(struct drm_global_reference *ref)
1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428
{
	struct ttm_bo_global_ref *bo_ref =
		container_of(ref, struct ttm_bo_global_ref, ref);
	struct ttm_bo_global *glob = ref->object;
	int ret;

	mutex_init(&glob->device_list_mutex);
	spin_lock_init(&glob->lru_lock);
	glob->mem_glob = bo_ref->mem_glob;
	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);

	if (unlikely(glob->dummy_read_page == NULL)) {
		ret = -ENOMEM;
		goto out_no_drp;
	}

	INIT_LIST_HEAD(&glob->swap_lru);
	INIT_LIST_HEAD(&glob->device_list);

	ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
	ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
	if (unlikely(ret != 0)) {
J
Joe Perches 已提交
1429
		pr_err("Could not register buffer object swapout\n");
1430 1431 1432 1433 1434
		goto out_no_shrink;
	}

	atomic_set(&glob->bo_count, 0);

1435 1436
	ret = kobject_init_and_add(
		&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448
	if (unlikely(ret != 0))
		kobject_put(&glob->kobj);
	return ret;
out_no_shrink:
	__free_page(glob->dummy_read_page);
out_no_drp:
	kfree(glob);
	return ret;
}
EXPORT_SYMBOL(ttm_bo_global_init);


1449 1450 1451 1452 1453
int ttm_bo_device_release(struct ttm_bo_device *bdev)
{
	int ret = 0;
	unsigned i = TTM_NUM_MEM_TYPES;
	struct ttm_mem_type_manager *man;
1454
	struct ttm_bo_global *glob = bdev->glob;
1455 1456 1457 1458 1459 1460 1461

	while (i--) {
		man = &bdev->man[i];
		if (man->has_type) {
			man->use_type = false;
			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
				ret = -EBUSY;
J
Joe Perches 已提交
1462 1463
				pr_err("DRM memory manager type %d is not clean\n",
				       i);
1464 1465 1466 1467 1468
			}
			man->has_type = false;
		}
	}

1469 1470 1471 1472
	mutex_lock(&glob->device_list_mutex);
	list_del(&bdev->device_list);
	mutex_unlock(&glob->device_list_mutex);

1473
	cancel_delayed_work_sync(&bdev->wq);
1474 1475 1476 1477

	while (ttm_bo_delayed_delete(bdev, true))
		;

1478
	spin_lock(&glob->lru_lock);
1479 1480 1481 1482 1483
	if (list_empty(&bdev->ddestroy))
		TTM_DEBUG("Delayed destroy list was clean\n");

	if (list_empty(&bdev->man[0].lru))
		TTM_DEBUG("Swap list was clean\n");
1484
	spin_unlock(&glob->lru_lock);
1485

1486
	drm_vma_offset_manager_destroy(&bdev->vma_manager);
1487 1488 1489 1490 1491 1492

	return ret;
}
EXPORT_SYMBOL(ttm_bo_device_release);

int ttm_bo_device_init(struct ttm_bo_device *bdev,
1493 1494
		       struct ttm_bo_global *glob,
		       struct ttm_bo_driver *driver,
1495
		       struct address_space *mapping,
D
Dave Airlie 已提交
1496
		       uint64_t file_page_offset,
D
Dave Airlie 已提交
1497
		       bool need_dma32)
1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508
{
	int ret = -EINVAL;

	bdev->driver = driver;

	memset(bdev->man, 0, sizeof(bdev->man));

	/*
	 * Initialize the system memory buffer type.
	 * Other types need to be driver / IOCTL initialized.
	 */
1509
	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1510
	if (unlikely(ret != 0))
1511
		goto out_no_sys;
1512

1513 1514
	drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
				    0x10000000);
1515 1516
	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
	INIT_LIST_HEAD(&bdev->ddestroy);
1517
	bdev->dev_mapping = mapping;
1518
	bdev->glob = glob;
D
Dave Airlie 已提交
1519
	bdev->need_dma32 = need_dma32;
1520 1521 1522
	mutex_lock(&glob->device_list_mutex);
	list_add_tail(&bdev->device_list, &glob->device_list);
	mutex_unlock(&glob->device_list_mutex);
1523 1524

	return 0;
1525
out_no_sys:
1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550
	return ret;
}
EXPORT_SYMBOL(ttm_bo_device_init);

/*
 * buffer object vm functions.
 */

bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];

	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
		if (mem->mem_type == TTM_PL_SYSTEM)
			return false;

		if (man->flags & TTM_MEMTYPE_FLAG_CMA)
			return false;

		if (mem->placement & TTM_PL_FLAG_CACHED)
			return false;
	}
	return true;
}

1551
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1552 1553 1554
{
	struct ttm_bo_device *bdev = bo->bdev;

1555
	drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
1556
	ttm_mem_io_free_vm(bo);
1557
}
1558 1559 1560 1561 1562 1563 1564 1565 1566

void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];

	ttm_mem_io_lock(man, false);
	ttm_bo_unmap_virtual_locked(bo);
	ttm_mem_io_unlock(man);
1567
}
1568 1569


1570
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1571 1572

int ttm_bo_wait(struct ttm_buffer_object *bo,
1573
		bool interruptible, bool no_wait)
1574
{
1575 1576 1577 1578 1579
	struct reservation_object_list *fobj;
	struct reservation_object *resv;
	struct fence *excl;
	long timeout = 15 * HZ;
	int i;
1580

1581 1582 1583 1584 1585 1586 1587
	resv = bo->resv;
	fobj = reservation_object_get_list(resv);
	excl = reservation_object_get_excl(resv);
	if (excl) {
		if (!fence_is_signaled(excl)) {
			if (no_wait)
				return -EBUSY;
1588

1589 1590
			timeout = fence_wait_timeout(excl,
						     interruptible, timeout);
1591
		}
1592
	}
1593

1594 1595 1596 1597
	for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) {
		struct fence *fence;
		fence = rcu_dereference_protected(fobj->shared[i],
						reservation_object_held(resv));
1598

1599 1600 1601
		if (!fence_is_signaled(fence)) {
			if (no_wait)
				return -EBUSY;
M
Maarten Lankhorst 已提交
1602

1603 1604
			timeout = fence_wait_timeout(fence,
						     interruptible, timeout);
1605 1606
		}
	}
1607 1608 1609 1610 1611 1612 1613 1614 1615 1616

	if (timeout < 0)
		return timeout;

	if (timeout == 0)
		return -EBUSY;

	reservation_object_add_excl_fence(resv, NULL);
	clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
	return 0;
1617 1618 1619 1620 1621 1622 1623 1624
}
EXPORT_SYMBOL(ttm_bo_wait);

int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
{
	int ret = 0;

	/*
1625
	 * Using ttm_bo_reserve makes sure the lru lists are updated.
1626 1627
	 */

1628
	ret = ttm_bo_reserve(bo, true, no_wait, NULL);
1629 1630
	if (unlikely(ret != 0))
		return ret;
1631
	ret = ttm_bo_wait(bo, true, no_wait);
1632 1633 1634 1635 1636
	if (likely(ret == 0))
		atomic_inc(&bo->cpu_writers);
	ttm_bo_unreserve(bo);
	return ret;
}
1637
EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1638 1639 1640

void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
{
1641
	atomic_dec(&bo->cpu_writers);
1642
}
1643
EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1644 1645 1646 1647 1648 1649 1650 1651

/**
 * A buffer object shrink method that tries to swap out the first
 * buffer object on the bo_global::swap_lru list.
 */

static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
{
1652 1653
	struct ttm_bo_global *glob =
	    container_of(shrink, struct ttm_bo_global, shrink);
1654 1655 1656 1657 1658
	struct ttm_buffer_object *bo;
	int ret = -EBUSY;
	int put_count;
	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);

1659
	spin_lock(&glob->lru_lock);
1660
	list_for_each_entry(bo, &glob->swap_lru, swap) {
1661
		ret = __ttm_bo_reserve(bo, false, true, NULL);
1662 1663 1664
		if (!ret)
			break;
	}
1665

1666 1667 1668 1669
	if (ret) {
		spin_unlock(&glob->lru_lock);
		return ret;
	}
1670

1671
	kref_get(&bo->list_kref);
1672

1673 1674 1675 1676
	if (!list_empty(&bo->ddestroy)) {
		ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
		kref_put(&bo->list_kref, ttm_bo_release_list);
		return ret;
1677 1678 1679
	}

	put_count = ttm_bo_del_from_lru(bo);
1680
	spin_unlock(&glob->lru_lock);
1681

1682
	ttm_bo_list_ref_sub(bo, put_count, true);
1683 1684 1685 1686 1687

	/**
	 * Wait for GPU, then move to system cached.
	 */

1688
	ret = ttm_bo_wait(bo, false, false);
1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701

	if (unlikely(ret != 0))
		goto out;

	if ((bo->mem.placement & swap_placement) != swap_placement) {
		struct ttm_mem_reg evict_mem;

		evict_mem = bo->mem;
		evict_mem.mm_node = NULL;
		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
		evict_mem.mem_type = TTM_PL_SYSTEM;

		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1702
					     false, false);
1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713
		if (unlikely(ret != 0))
			goto out;
	}

	ttm_bo_unmap_virtual(bo);

	/**
	 * Swap out. Buffer will be swapped in again as soon as
	 * anyone tries to access a ttm page.
	 */

1714 1715 1716
	if (bo->bdev->driver->swap_notify)
		bo->bdev->driver->swap_notify(bo);

J
Jan Engelhardt 已提交
1717
	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1718 1719 1720 1721 1722 1723 1724 1725
out:

	/**
	 *
	 * Unreserve without putting on LRU to avoid swapping out an
	 * already swapped buffer.
	 */

1726
	__ttm_bo_unreserve(bo);
1727 1728 1729 1730 1731 1732
	kref_put(&bo->list_kref, ttm_bo_release_list);
	return ret;
}

void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
{
1733
	while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1734 1735
		;
}
1736
EXPORT_SYMBOL(ttm_bo_swapout_all);
1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759

/**
 * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
 * unreserved
 *
 * @bo: Pointer to buffer
 */
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
{
	int ret;

	/*
	 * In the absense of a wait_unlocked API,
	 * Use the bo::wu_mutex to avoid triggering livelocks due to
	 * concurrent use of this function. Note that this use of
	 * bo::wu_mutex can go away if we change locking order to
	 * mmap_sem -> bo::reserve.
	 */
	ret = mutex_lock_interruptible(&bo->wu_mutex);
	if (unlikely(ret != 0))
		return -ERESTARTSYS;
	if (!ww_mutex_is_locked(&bo->resv->lock))
		goto out_unlock;
1760
	ret = __ttm_bo_reserve(bo, true, false, NULL);
1761 1762
	if (unlikely(ret != 0))
		goto out_unlock;
1763
	__ttm_bo_unreserve(bo);
1764 1765 1766 1767 1768

out_unlock:
	mutex_unlock(&bo->wu_mutex);
	return ret;
}