ttm_bo.c 46.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/**************************************************************************
 *
 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 */

J
Joe Perches 已提交
31 32
#define pr_fmt(fmt) "[TTM] " fmt

33 34 35
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
36 37 38 39 40 41
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/module.h>
A
Arun Sharma 已提交
42
#include <linux/atomic.h>
43 44 45 46 47 48 49

#define TTM_ASSERT_LOCKED(param)
#define TTM_DEBUG(fmt, arg...)
#define TTM_BO_HASH_ORDER 13

static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
50 51 52 53 54 55 56
static void ttm_bo_global_kobj_release(struct kobject *kobj);

static struct attribute ttm_bo_count = {
	.name = "bo_count",
	.mode = S_IRUGO
};

57 58 59 60 61 62 63 64 65 66 67 68
static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
{
	int i;

	for (i = 0; i <= TTM_PL_PRIV5; i++)
		if (flags & (1 << i)) {
			*mem_type = i;
			return 0;
		}
	return -EINVAL;
}

69
static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
70
{
71 72
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];

J
Joe Perches 已提交
73 74 75 76 77 78 79
	pr_err("    has_type: %d\n", man->has_type);
	pr_err("    use_type: %d\n", man->use_type);
	pr_err("    flags: 0x%08X\n", man->flags);
	pr_err("    gpu_offset: 0x%08lX\n", man->gpu_offset);
	pr_err("    size: %llu\n", man->size);
	pr_err("    available_caching: 0x%08X\n", man->available_caching);
	pr_err("    default_caching: 0x%08X\n", man->default_caching);
80 81
	if (mem_type != TTM_PL_SYSTEM)
		(*man->func->debug)(man, TTM_PFX);
82 83 84 85 86 87 88
}

static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
					struct ttm_placement *placement)
{
	int i, ret, mem_type;

J
Joe Perches 已提交
89 90 91
	pr_err("No space for %p (%lu pages, %luK, %luM)\n",
	       bo, bo->mem.num_pages, bo->mem.size >> 10,
	       bo->mem.size >> 20);
92 93 94 95 96
	for (i = 0; i < placement->num_placement; i++) {
		ret = ttm_mem_type_from_flags(placement->placement[i],
						&mem_type);
		if (ret)
			return;
J
Joe Perches 已提交
97 98
		pr_err("  placement[%d]=0x%08X (%d)\n",
		       i, placement->placement[i], mem_type);
99
		ttm_mem_type_debug(bo->bdev, mem_type);
100 101 102
	}
}

103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
static ssize_t ttm_bo_global_show(struct kobject *kobj,
				  struct attribute *attr,
				  char *buffer)
{
	struct ttm_bo_global *glob =
		container_of(kobj, struct ttm_bo_global, kobj);

	return snprintf(buffer, PAGE_SIZE, "%lu\n",
			(unsigned long) atomic_read(&glob->bo_count));
}

static struct attribute *ttm_bo_global_attrs[] = {
	&ttm_bo_count,
	NULL
};

119
static const struct sysfs_ops ttm_bo_global_ops = {
120 121 122 123 124 125 126 127 128
	.show = &ttm_bo_global_show
};

static struct kobj_type ttm_bo_glob_kobj_type  = {
	.release = &ttm_bo_global_kobj_release,
	.sysfs_ops = &ttm_bo_global_ops,
	.default_attrs = ttm_bo_global_attrs
};

129 130 131 132 133 134 135 136 137 138 139

static inline uint32_t ttm_bo_type_flags(unsigned type)
{
	return 1 << (type);
}

static void ttm_bo_release_list(struct kref *list_kref)
{
	struct ttm_buffer_object *bo =
	    container_of(list_kref, struct ttm_buffer_object, list_kref);
	struct ttm_bo_device *bdev = bo->bdev;
140
	size_t acc_size = bo->acc_size;
141 142 143 144 145 146 147 148 149 150 151

	BUG_ON(atomic_read(&bo->list_kref.refcount));
	BUG_ON(atomic_read(&bo->kref.refcount));
	BUG_ON(atomic_read(&bo->cpu_writers));
	BUG_ON(bo->sync_obj != NULL);
	BUG_ON(bo->mem.mm_node != NULL);
	BUG_ON(!list_empty(&bo->lru));
	BUG_ON(!list_empty(&bo->ddestroy));

	if (bo->ttm)
		ttm_tt_destroy(bo->ttm);
152
	atomic_dec(&bo->glob->bo_count);
153 154 155 156 157
	if (bo->destroy)
		bo->destroy(bo);
	else {
		kfree(bo);
	}
158
	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
159 160
}

161 162
static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
				  bool interruptible)
163 164
{
	if (interruptible) {
165
		return wait_event_interruptible(bo->event_queue,
166
					       !ttm_bo_is_reserved(bo));
167
	} else {
168
		wait_event(bo->event_queue, !ttm_bo_is_reserved(bo));
169
		return 0;
170 171 172
	}
}

173
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
174 175 176 177
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man;

178
	BUG_ON(!ttm_bo_is_reserved(bo));
179 180 181 182 183 184 185 186 187 188

	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {

		BUG_ON(!list_empty(&bo->lru));

		man = &bdev->man[bo->mem.mem_type];
		list_add_tail(&bo->lru, &man->lru);
		kref_get(&bo->list_kref);

		if (bo->ttm != NULL) {
189
			list_add_tail(&bo->swap, &bo->glob->swap_lru);
190 191 192 193 194
			kref_get(&bo->list_kref);
		}
	}
}

195
int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
{
	int put_count = 0;

	if (!list_empty(&bo->swap)) {
		list_del_init(&bo->swap);
		++put_count;
	}
	if (!list_empty(&bo->lru)) {
		list_del_init(&bo->lru);
		++put_count;
	}

	/*
	 * TODO: Add a driver hook to delete from
	 * driver-specific LRU's here.
	 */

	return put_count;
}

216
int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
217 218 219 220 221
			  bool interruptible,
			  bool no_wait, bool use_sequence, uint32_t sequence)
{
	int ret;

222
	while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
223 224 225
		/**
		 * Deadlock avoidance for multi-bo reserving.
		 */
226 227 228 229 230 231 232 233 234 235 236 237
		if (use_sequence && bo->seq_valid) {
			/**
			 * We've already reserved this one.
			 */
			if (unlikely(sequence == bo->val_seq))
				return -EDEADLK;
			/**
			 * Already reserved by a thread that will not back
			 * off for us. We need to back off.
			 */
			if (unlikely(sequence - bo->val_seq < (1 << 31)))
				return -EAGAIN;
238 239 240 241 242 243 244 245 246 247 248 249
		}

		if (no_wait)
			return -EBUSY;

		ret = ttm_bo_wait_unreserved(bo, interruptible);

		if (unlikely(ret))
			return ret;
	}

	if (use_sequence) {
250
		bool wake_up = false;
251 252 253 254 255 256
		/**
		 * Wake up waiters that may need to recheck for deadlock,
		 * if we decreased the sequence number.
		 */
		if (unlikely((bo->val_seq - sequence < (1 << 31))
			     || !bo->seq_valid))
257
			wake_up = true;
258

259 260 261 262 263 264 265 266 267 268
		/*
		 * In the worst case with memory ordering these values can be
		 * seen in the wrong order. However since we call wake_up_all
		 * in that case, this will hopefully not pose a problem,
		 * and the worst case would only cause someone to accidentally
		 * hit -EAGAIN in ttm_bo_reserve when they see old value of
		 * val_seq. However this would only happen if seq_valid was
		 * written before val_seq was, and just means some slightly
		 * increased cpu usage
		 */
269 270
		bo->val_seq = sequence;
		bo->seq_valid = true;
271 272
		if (wake_up)
			wake_up_all(&bo->event_queue);
273 274 275 276 277 278 279 280 281 282 283 284 285
	} else {
		bo->seq_valid = false;
	}

	return 0;
}
EXPORT_SYMBOL(ttm_bo_reserve);

static void ttm_bo_ref_bug(struct kref *list_kref)
{
	BUG();
}

286 287 288
void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
			 bool never_free)
{
289 290
	kref_sub(&bo->list_kref, count,
		 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
291 292
}

293 294 295 296
int ttm_bo_reserve(struct ttm_buffer_object *bo,
		   bool interruptible,
		   bool no_wait, bool use_sequence, uint32_t sequence)
{
297
	struct ttm_bo_global *glob = bo->glob;
298 299 300
	int put_count = 0;
	int ret;

301 302 303 304
	ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
				   sequence);
	if (likely(ret == 0)) {
		spin_lock(&glob->lru_lock);
305
		put_count = ttm_bo_del_from_lru(bo);
306 307 308
		spin_unlock(&glob->lru_lock);
		ttm_bo_list_ref_sub(bo, put_count, true);
	}
309 310 311 312

	return ret;
}

313 314 315 316 317 318 319 320 321 322
int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
				  bool interruptible, uint32_t sequence)
{
	bool wake_up = false;
	int ret;

	while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
		WARN_ON(bo->seq_valid && sequence == bo->val_seq);

		ret = ttm_bo_wait_unreserved(bo, interruptible);
323

324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
		if (unlikely(ret))
			return ret;
	}

	if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid)
		wake_up = true;

	/**
	 * Wake up waiters that may need to recheck for deadlock,
	 * if we decreased the sequence number.
	 */
	bo->val_seq = sequence;
	bo->seq_valid = true;
	if (wake_up)
		wake_up_all(&bo->event_queue);

	return 0;
}

int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
			    bool interruptible, uint32_t sequence)
{
	struct ttm_bo_global *glob = bo->glob;
	int put_count, ret;

	ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
	if (likely(!ret)) {
		spin_lock(&glob->lru_lock);
		put_count = ttm_bo_del_from_lru(bo);
		spin_unlock(&glob->lru_lock);
		ttm_bo_list_ref_sub(bo, put_count, true);
	}
356 357
	return ret;
}
358
EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
359

360 361 362 363 364 365 366
void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
{
	ttm_bo_add_to_lru(bo);
	atomic_set(&bo->reserved, 0);
	wake_up_all(&bo->event_queue);
}

367 368
void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
369
	struct ttm_bo_global *glob = bo->glob;
370

371
	spin_lock(&glob->lru_lock);
372
	ttm_bo_unreserve_locked(bo);
373
	spin_unlock(&glob->lru_lock);
374 375 376 377 378 379 380 381 382
}
EXPORT_SYMBOL(ttm_bo_unreserve);

/*
 * Call bo->mutex locked.
 */
static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
{
	struct ttm_bo_device *bdev = bo->bdev;
383
	struct ttm_bo_global *glob = bo->glob;
384 385 386 387 388 389
	int ret = 0;
	uint32_t page_flags = 0;

	TTM_ASSERT_LOCKED(&bo->mutex);
	bo->ttm = NULL;

D
Dave Airlie 已提交
390 391 392
	if (bdev->need_dma32)
		page_flags |= TTM_PAGE_FLAG_DMA32;

393 394 395 396 397
	switch (bo->type) {
	case ttm_bo_type_device:
		if (zero_alloc)
			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
	case ttm_bo_type_kernel:
398 399
		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
						      page_flags, glob->dummy_read_page);
400 401 402
		if (unlikely(bo->ttm == NULL))
			ret = -ENOMEM;
		break;
403 404 405 406 407 408 409 410 411 412
	case ttm_bo_type_sg:
		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
						      page_flags | TTM_PAGE_FLAG_SG,
						      glob->dummy_read_page);
		if (unlikely(bo->ttm == NULL)) {
			ret = -ENOMEM;
			break;
		}
		bo->ttm->sg = bo->sg;
		break;
413
	default:
J
Joe Perches 已提交
414
		pr_err("Illegal buffer object type\n");
415 416 417 418 419 420 421 422 423
		ret = -EINVAL;
		break;
	}

	return ret;
}

static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
				  struct ttm_mem_reg *mem,
424
				  bool evict, bool interruptible,
425
				  bool no_wait_gpu)
426 427 428 429 430 431 432 433 434
{
	struct ttm_bo_device *bdev = bo->bdev;
	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
	int ret = 0;

	if (old_is_pci || new_is_pci ||
435 436 437 438 439 440 441
	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
		ret = ttm_mem_io_lock(old_man, true);
		if (unlikely(ret != 0))
			goto out_err;
		ttm_bo_unmap_virtual_locked(bo);
		ttm_mem_io_unlock(old_man);
	}
442 443 444 445 446

	/*
	 * Create and bind a ttm if required.
	 */

447 448
	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
		if (bo->ttm == NULL) {
449 450
			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
			ret = ttm_bo_add_ttm(bo, zero);
451 452 453
			if (ret)
				goto out_err;
		}
454 455 456

		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
		if (ret)
457
			goto out_err;
458 459 460 461 462 463 464 465

		if (mem->mem_type != TTM_PL_SYSTEM) {
			ret = ttm_tt_bind(bo->ttm, mem);
			if (ret)
				goto out_err;
		}

		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
466 467
			if (bdev->driver->move_notify)
				bdev->driver->move_notify(bo, mem);
468
			bo->mem = *mem;
469 470 471 472 473
			mem->mm_node = NULL;
			goto moved;
		}
	}

474 475 476
	if (bdev->driver->move_notify)
		bdev->driver->move_notify(bo, mem);

477 478
	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
479
		ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
480 481
	else if (bdev->driver->move)
		ret = bdev->driver->move(bo, evict, interruptible,
482
					 no_wait_gpu, mem);
483
	else
484
		ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
485

486 487 488 489 490 491 492
	if (ret) {
		if (bdev->driver->move_notify) {
			struct ttm_mem_reg tmp_mem = *mem;
			*mem = bo->mem;
			bo->mem = tmp_mem;
			bdev->driver->move_notify(bo, mem);
			bo->mem = *mem;
493
			*mem = tmp_mem;
494
		}
495

496 497
		goto out_err;
	}
498

499 500 501 502
moved:
	if (bo->evicted) {
		ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
		if (ret)
J
Joe Perches 已提交
503
			pr_err("Can not flush read caches\n");
504 505 506 507
		bo->evicted = false;
	}

	if (bo->mem.mm_node) {
508
		bo->offset = (bo->mem.start << PAGE_SHIFT) +
509 510
		    bdev->man[bo->mem.mem_type].gpu_offset;
		bo->cur_placement = bo->mem.placement;
511 512
	} else
		bo->offset = 0;
513 514 515 516 517 518 519 520 521 522 523 524 525 526

	return 0;

out_err:
	new_man = &bdev->man[bo->mem.mem_type];
	if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
		ttm_tt_unbind(bo->ttm);
		ttm_tt_destroy(bo->ttm);
		bo->ttm = NULL;
	}

	return ret;
}

527
/**
528
 * Call bo::reserved.
529
 * Will release GPU memory type usage on destruction.
530 531 532
 * This is the place to put in driver specific hooks to release
 * driver private resources.
 * Will release the bo::reserved lock.
533 534 535 536
 */

static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
537 538 539
	if (bo->bdev->driver->move_notify)
		bo->bdev->driver->move_notify(bo, NULL);

540 541 542 543 544
	if (bo->ttm) {
		ttm_tt_unbind(bo->ttm);
		ttm_tt_destroy(bo->ttm);
		bo->ttm = NULL;
	}
545
	ttm_bo_mem_put(bo, &bo->mem);
546 547

	atomic_set(&bo->reserved, 0);
548
	wake_up_all(&bo->event_queue);
549 550

	/*
551 552 553 554 555
	 * Since the final reference to this bo may not be dropped by
	 * the current task we have to put a memory barrier here to make
	 * sure the changes done in this function are always visible.
	 *
	 * This function only needs protection against the final kref_put.
556
	 */
557
	smp_mb__before_atomic_dec();
558 559
}

560
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
561 562
{
	struct ttm_bo_device *bdev = bo->bdev;
563
	struct ttm_bo_global *glob = bo->glob;
564
	struct ttm_bo_driver *driver = bdev->driver;
565
	void *sync_obj = NULL;
566
	int put_count;
567 568
	int ret;

569
	spin_lock(&glob->lru_lock);
570
	ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
571

572
	spin_lock(&bdev->fence_lock);
573
	(void) ttm_bo_wait(bo, false, false, true);
574
	if (!ret && !bo->sync_obj) {
575
		spin_unlock(&bdev->fence_lock);
576
		put_count = ttm_bo_del_from_lru(bo);
577

578
		spin_unlock(&glob->lru_lock);
579
		ttm_bo_cleanup_memtype_use(bo);
580

581
		ttm_bo_list_ref_sub(bo, put_count, true);
582

583
		return;
584
	}
585 586
	if (bo->sync_obj)
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
587 588 589 590 591 592
	spin_unlock(&bdev->fence_lock);

	if (!ret) {
		atomic_set(&bo->reserved, 0);
		wake_up_all(&bo->event_queue);
	}
593 594 595 596 597

	kref_get(&bo->list_kref);
	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
	spin_unlock(&glob->lru_lock);

598
	if (sync_obj) {
599
		driver->sync_obj_flush(sync_obj);
600 601
		driver->sync_obj_unref(&sync_obj);
	}
602 603 604 605 606
	schedule_delayed_work(&bdev->wq,
			      ((HZ / 100) < 1) ? 1 : HZ / 100);
}

/**
607
 * function ttm_bo_cleanup_refs_and_unlock
608 609 610
 * If bo idle, remove from delayed- and lru lists, and unref.
 * If not idle, do nothing.
 *
611 612 613
 * Must be called with lru_lock and reservation held, this function
 * will drop both before returning.
 *
614 615 616 617
 * @interruptible         Any sleeps should occur interruptibly.
 * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
 */

618 619 620
static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
					  bool interruptible,
					  bool no_wait_gpu)
621
{
622
	struct ttm_bo_device *bdev = bo->bdev;
623
	struct ttm_bo_driver *driver = bdev->driver;
624 625
	struct ttm_bo_global *glob = bo->glob;
	int put_count;
626
	int ret;
627

628
	spin_lock(&bdev->fence_lock);
629
	ret = ttm_bo_wait(bo, false, false, true);
630

631 632
	if (ret && !no_wait_gpu) {
		void *sync_obj;
633

634 635 636 637 638
		/*
		 * Take a reference to the fence and unreserve,
		 * at this point the buffer should be dead, so
		 * no new sync objects can be attached.
		 */
639
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
640
		spin_unlock(&bdev->fence_lock);
641

642 643
		atomic_set(&bo->reserved, 0);
		wake_up_all(&bo->event_queue);
644 645
		spin_unlock(&glob->lru_lock);

646 647 648
		ret = driver->sync_obj_wait(sync_obj, false, interruptible);
		driver->sync_obj_unref(&sync_obj);
		if (ret)
649 650
			return ret;

651 652 653 654 655 656 657 658 659 660
		/*
		 * remove sync_obj with ttm_bo_wait, the wait should be
		 * finished, and no new wait object should have been added.
		 */
		spin_lock(&bdev->fence_lock);
		ret = ttm_bo_wait(bo, false, false, true);
		WARN_ON(ret);
		spin_unlock(&bdev->fence_lock);
		if (ret)
			return ret;
661

662
		spin_lock(&glob->lru_lock);
663
		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
664

665 666 667 668 669 670 671 672 673 674 675 676 677 678
		/*
		 * We raced, and lost, someone else holds the reservation now,
		 * and is probably busy in ttm_bo_cleanup_memtype_use.
		 *
		 * Even if it's not the case, because we finished waiting any
		 * delayed destruction would succeed, so just return success
		 * here.
		 */
		if (ret) {
			spin_unlock(&glob->lru_lock);
			return 0;
		}
	} else
		spin_unlock(&bdev->fence_lock);
679

680
	if (ret || unlikely(list_empty(&bo->ddestroy))) {
681 682
		atomic_set(&bo->reserved, 0);
		wake_up_all(&bo->event_queue);
683
		spin_unlock(&glob->lru_lock);
684
		return ret;
685 686
	}

687 688 689 690 691 692 693
	put_count = ttm_bo_del_from_lru(bo);
	list_del_init(&bo->ddestroy);
	++put_count;

	spin_unlock(&glob->lru_lock);
	ttm_bo_cleanup_memtype_use(bo);

694
	ttm_bo_list_ref_sub(bo, put_count, true);
695 696

	return 0;
697 698 699 700 701 702 703 704 705
}

/**
 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
 * encountered buffers.
 */

static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
{
706
	struct ttm_bo_global *glob = bdev->glob;
707 708
	struct ttm_buffer_object *entry = NULL;
	int ret = 0;
709

710
	spin_lock(&glob->lru_lock);
711 712 713 714 715 716 717 718 719 720 721 722 723
	if (list_empty(&bdev->ddestroy))
		goto out_unlock;

	entry = list_first_entry(&bdev->ddestroy,
		struct ttm_buffer_object, ddestroy);
	kref_get(&entry->list_kref);

	for (;;) {
		struct ttm_buffer_object *nentry = NULL;

		if (entry->ddestroy.next != &bdev->ddestroy) {
			nentry = list_first_entry(&entry->ddestroy,
				struct ttm_buffer_object, ddestroy);
724 725 726
			kref_get(&nentry->list_kref);
		}

727 728 729 730 731 732 733 734
		ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
		if (remove_all && ret) {
			spin_unlock(&glob->lru_lock);
			ret = ttm_bo_reserve_nolru(entry, false, false,
						   false, 0);
			spin_lock(&glob->lru_lock);
		}

735 736 737 738 739 740
		if (!ret)
			ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
							     !remove_all);
		else
			spin_unlock(&glob->lru_lock);

741
		kref_put(&entry->list_kref, ttm_bo_release_list);
742 743 744 745
		entry = nentry;

		if (ret || !entry)
			goto out;
746

747
		spin_lock(&glob->lru_lock);
748
		if (list_empty(&entry->ddestroy))
749 750 751
			break;
	}

752 753 754 755 756
out_unlock:
	spin_unlock(&glob->lru_lock);
out:
	if (entry)
		kref_put(&entry->list_kref, ttm_bo_release_list);
757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775
	return ret;
}

static void ttm_bo_delayed_workqueue(struct work_struct *work)
{
	struct ttm_bo_device *bdev =
	    container_of(work, struct ttm_bo_device, wq.work);

	if (ttm_bo_delayed_delete(bdev, false)) {
		schedule_delayed_work(&bdev->wq,
				      ((HZ / 100) < 1) ? 1 : HZ / 100);
	}
}

static void ttm_bo_release(struct kref *kref)
{
	struct ttm_buffer_object *bo =
	    container_of(kref, struct ttm_buffer_object, kref);
	struct ttm_bo_device *bdev = bo->bdev;
776
	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
777

778
	write_lock(&bdev->vm_lock);
779 780 781 782 783 784
	if (likely(bo->vm_node != NULL)) {
		rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
		drm_mm_put_block(bo->vm_node);
		bo->vm_node = NULL;
	}
	write_unlock(&bdev->vm_lock);
785 786 787
	ttm_mem_io_lock(man, false);
	ttm_mem_io_free_vm(bo);
	ttm_mem_io_unlock(man);
788
	ttm_bo_cleanup_refs_or_queue(bo);
789 790 791 792 793 794 795 796 797 798 799 800
	kref_put(&bo->list_kref, ttm_bo_release_list);
}

void ttm_bo_unref(struct ttm_buffer_object **p_bo)
{
	struct ttm_buffer_object *bo = *p_bo;

	*p_bo = NULL;
	kref_put(&bo->kref, ttm_bo_release);
}
EXPORT_SYMBOL(ttm_bo_unref);

801 802 803 804 805 806 807 808 809 810 811 812 813 814
int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
{
	return cancel_delayed_work_sync(&bdev->wq);
}
EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);

void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
{
	if (resched)
		schedule_delayed_work(&bdev->wq,
				      ((HZ / 100) < 1) ? 1 : HZ / 100);
}
EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);

815
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
816
			bool no_wait_gpu)
817 818 819
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_reg evict_mem;
820 821
	struct ttm_placement placement;
	int ret = 0;
822

823
	spin_lock(&bdev->fence_lock);
824
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
825
	spin_unlock(&bdev->fence_lock);
826

827
	if (unlikely(ret != 0)) {
828
		if (ret != -ERESTARTSYS) {
J
Joe Perches 已提交
829
			pr_err("Failed to expire sync object before buffer eviction\n");
830
		}
831 832 833
		goto out;
	}

834
	BUG_ON(!ttm_bo_is_reserved(bo));
835 836 837

	evict_mem = bo->mem;
	evict_mem.mm_node = NULL;
838 839
	evict_mem.bus.io_reserved_vm = false;
	evict_mem.bus.io_reserved_count = 0;
840

841 842 843 844
	placement.fpfn = 0;
	placement.lpfn = 0;
	placement.num_placement = 0;
	placement.num_busy_placement = 0;
845 846
	bdev->driver->evict_flags(bo, &placement);
	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
847
				no_wait_gpu);
848
	if (ret) {
849
		if (ret != -ERESTARTSYS) {
J
Joe Perches 已提交
850 851
			pr_err("Failed to find memory space for buffer 0x%p eviction\n",
			       bo);
852 853
			ttm_bo_mem_space_debug(bo, &placement);
		}
854 855 856 857
		goto out;
	}

	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
858
				     no_wait_gpu);
859
	if (ret) {
860
		if (ret != -ERESTARTSYS)
J
Joe Perches 已提交
861
			pr_err("Buffer eviction failed\n");
862
		ttm_bo_mem_put(bo, &evict_mem);
863 864
		goto out;
	}
865 866 867 868 869 870 871
	bo->evicted = true;
out:
	return ret;
}

static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
				uint32_t mem_type,
872
				bool interruptible,
873
				bool no_wait_gpu)
874 875 876 877
{
	struct ttm_bo_global *glob = bdev->glob;
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
	struct ttm_buffer_object *bo;
878
	int ret = -EBUSY, put_count;
879

880
	spin_lock(&glob->lru_lock);
881
	list_for_each_entry(bo, &man->lru, lru) {
882
		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
883 884 885 886 887
		if (!ret)
			break;
	}

	if (ret) {
888
		spin_unlock(&glob->lru_lock);
889
		return ret;
890 891
	}

892
	kref_get(&bo->list_kref);
893

894
	if (!list_empty(&bo->ddestroy)) {
895 896
		ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
						     no_wait_gpu);
897
		kref_put(&bo->list_kref, ttm_bo_release_list);
898
		return ret;
899 900
	}

901
	put_count = ttm_bo_del_from_lru(bo);
902
	spin_unlock(&glob->lru_lock);
903 904 905

	BUG_ON(ret != 0);

906
	ttm_bo_list_ref_sub(bo, put_count, true);
907

908
	ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
909
	ttm_bo_unreserve(bo);
910

911
	kref_put(&bo->list_kref, ttm_bo_release_list);
912 913 914
	return ret;
}

915 916
void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
{
917
	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
918

919 920
	if (mem->mm_node)
		(*man->func->put_node)(man, mem);
921 922 923
}
EXPORT_SYMBOL(ttm_bo_mem_put);

924 925 926 927
/**
 * Repeatedly evict memory from the LRU for @mem_type until we create enough
 * space, or we've evicted everything and there isn't enough space.
 */
928 929 930 931
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
					uint32_t mem_type,
					struct ttm_placement *placement,
					struct ttm_mem_reg *mem,
932 933
					bool interruptible,
					bool no_wait_gpu)
934
{
935
	struct ttm_bo_device *bdev = bo->bdev;
936 937 938 939
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
	int ret;

	do {
940
		ret = (*man->func->get_node)(man, bo, placement, mem);
941 942
		if (unlikely(ret != 0))
			return ret;
943
		if (mem->mm_node)
944
			break;
945 946
		ret = ttm_mem_evict_first(bdev, mem_type,
					  interruptible, no_wait_gpu);
947 948 949
		if (unlikely(ret != 0))
			return ret;
	} while (1);
950
	if (mem->mm_node == NULL)
951 952 953 954 955
		return -ENOMEM;
	mem->mem_type = mem_type;
	return 0;
}

956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
				      uint32_t cur_placement,
				      uint32_t proposed_placement)
{
	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;

	/**
	 * Keep current caching if possible.
	 */

	if ((cur_placement & caching) != 0)
		result |= (cur_placement & caching);
	else if ((man->default_caching & caching) != 0)
		result |= man->default_caching;
	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
		result |= TTM_PL_FLAG_CACHED;
	else if ((TTM_PL_FLAG_WC & caching) != 0)
		result |= TTM_PL_FLAG_WC;
	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
		result |= TTM_PL_FLAG_UNCACHED;

	return result;
}

981 982
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
				 uint32_t mem_type,
983 984
				 uint32_t proposed_placement,
				 uint32_t *masked_placement)
985 986 987
{
	uint32_t cur_flags = ttm_bo_type_flags(mem_type);

988
	if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
989 990
		return false;

991
	if ((proposed_placement & man->available_caching) == 0)
992 993
		return false;

994 995 996
	cur_flags |= (proposed_placement & man->available_caching);

	*masked_placement = cur_flags;
997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
	return true;
}

/**
 * Creates space for memory region @mem according to its type.
 *
 * This function first searches for free space in compatible memory types in
 * the priority order defined by the driver.  If free space isn't found, then
 * ttm_bo_mem_force_space is attempted in priority order to evict and find
 * space.
 */
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
1009 1010
			struct ttm_placement *placement,
			struct ttm_mem_reg *mem,
1011
			bool interruptible,
1012
			bool no_wait_gpu)
1013 1014 1015 1016 1017 1018 1019
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man;
	uint32_t mem_type = TTM_PL_SYSTEM;
	uint32_t cur_flags = 0;
	bool type_found = false;
	bool type_ok = false;
1020
	bool has_erestartsys = false;
1021
	int i, ret;
1022 1023

	mem->mm_node = NULL;
1024
	for (i = 0; i < placement->num_placement; ++i) {
1025 1026 1027 1028
		ret = ttm_mem_type_from_flags(placement->placement[i],
						&mem_type);
		if (ret)
			return ret;
1029 1030 1031
		man = &bdev->man[mem_type];

		type_ok = ttm_bo_mt_compatible(man,
1032 1033 1034
						mem_type,
						placement->placement[i],
						&cur_flags);
1035 1036 1037 1038

		if (!type_ok)
			continue;

1039 1040
		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
						  cur_flags);
1041 1042 1043 1044 1045 1046
		/*
		 * Use the access and other non-mapping-related flag bits from
		 * the memory placement flags to the current flags
		 */
		ttm_flag_masked(&cur_flags, placement->placement[i],
				~TTM_PL_MASK_MEMTYPE);
1047

1048 1049 1050 1051 1052
		if (mem_type == TTM_PL_SYSTEM)
			break;

		if (man->has_type && man->use_type) {
			type_found = true;
1053
			ret = (*man->func->get_node)(man, bo, placement, mem);
1054 1055
			if (unlikely(ret))
				return ret;
1056
		}
1057
		if (mem->mm_node)
1058 1059 1060
			break;
	}

1061
	if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
1062 1063 1064 1065 1066 1067 1068 1069
		mem->mem_type = mem_type;
		mem->placement = cur_flags;
		return 0;
	}

	if (!type_found)
		return -EINVAL;

1070 1071
	for (i = 0; i < placement->num_busy_placement; ++i) {
		ret = ttm_mem_type_from_flags(placement->busy_placement[i],
1072 1073 1074
						&mem_type);
		if (ret)
			return ret;
1075 1076 1077 1078
		man = &bdev->man[mem_type];
		if (!man->has_type)
			continue;
		if (!ttm_bo_mt_compatible(man,
1079
						mem_type,
1080
						placement->busy_placement[i],
1081
						&cur_flags))
1082 1083
			continue;

1084 1085
		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
						  cur_flags);
1086 1087 1088 1089
		/*
		 * Use the access and other non-mapping-related flag bits from
		 * the memory placement flags to the current flags
		 */
1090
		ttm_flag_masked(&cur_flags, placement->busy_placement[i],
1091
				~TTM_PL_MASK_MEMTYPE);
1092

1093 1094 1095 1096 1097 1098 1099 1100

		if (mem_type == TTM_PL_SYSTEM) {
			mem->mem_type = mem_type;
			mem->placement = cur_flags;
			mem->mm_node = NULL;
			return 0;
		}

1101
		ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
1102
						interruptible, no_wait_gpu);
1103 1104 1105 1106
		if (ret == 0 && mem->mm_node) {
			mem->placement = cur_flags;
			return 0;
		}
1107 1108
		if (ret == -ERESTARTSYS)
			has_erestartsys = true;
1109
	}
1110
	ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
1111 1112 1113 1114 1115
	return ret;
}
EXPORT_SYMBOL(ttm_bo_mem_space);

int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1116
			struct ttm_placement *placement,
1117
			bool interruptible,
1118
			bool no_wait_gpu)
1119 1120 1121
{
	int ret = 0;
	struct ttm_mem_reg mem;
1122
	struct ttm_bo_device *bdev = bo->bdev;
1123

1124
	BUG_ON(!ttm_bo_is_reserved(bo));
1125 1126 1127 1128 1129 1130

	/*
	 * FIXME: It's possible to pipeline buffer moves.
	 * Have the driver move function wait for idle when necessary,
	 * instead of doing it here.
	 */
1131
	spin_lock(&bdev->fence_lock);
1132
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1133
	spin_unlock(&bdev->fence_lock);
1134 1135 1136 1137 1138
	if (ret)
		return ret;
	mem.num_pages = bo->num_pages;
	mem.size = mem.num_pages << PAGE_SHIFT;
	mem.page_alignment = bo->mem.page_alignment;
1139 1140
	mem.bus.io_reserved_vm = false;
	mem.bus.io_reserved_count = 0;
1141 1142 1143
	/*
	 * Determine where to move the buffer.
	 */
1144 1145
	ret = ttm_bo_mem_space(bo, placement, &mem,
			       interruptible, no_wait_gpu);
1146 1147
	if (ret)
		goto out_unlock;
1148 1149
	ret = ttm_bo_handle_move_mem(bo, &mem, false,
				     interruptible, no_wait_gpu);
1150
out_unlock:
1151 1152
	if (ret && mem.mm_node)
		ttm_bo_mem_put(bo, &mem);
1153 1154 1155
	return ret;
}

1156
static int ttm_bo_mem_compat(struct ttm_placement *placement,
1157 1158
			     struct ttm_mem_reg *mem)
{
1159
	int i;
1160

1161 1162 1163
	if (mem->mm_node && placement->lpfn != 0 &&
	    (mem->start < placement->fpfn ||
	     mem->start + mem->num_pages > placement->lpfn))
1164
		return -1;
1165 1166 1167 1168 1169 1170 1171 1172 1173

	for (i = 0; i < placement->num_placement; i++) {
		if ((placement->placement[i] & mem->placement &
			TTM_PL_MASK_CACHING) &&
			(placement->placement[i] & mem->placement &
			TTM_PL_MASK_MEM))
			return i;
	}
	return -1;
1174 1175
}

1176 1177
int ttm_bo_validate(struct ttm_buffer_object *bo,
			struct ttm_placement *placement,
1178
			bool interruptible,
1179
			bool no_wait_gpu)
1180 1181 1182
{
	int ret;

1183
	BUG_ON(!ttm_bo_is_reserved(bo));
1184 1185 1186 1187 1188
	/* Check that range is valid */
	if (placement->lpfn || placement->fpfn)
		if (placement->fpfn > placement->lpfn ||
			(placement->lpfn - placement->fpfn) < bo->num_pages)
			return -EINVAL;
1189 1190 1191
	/*
	 * Check whether we need to move buffer.
	 */
1192 1193
	ret = ttm_bo_mem_compat(placement, &bo->mem);
	if (ret < 0) {
1194 1195
		ret = ttm_bo_move_buffer(bo, placement, interruptible,
					 no_wait_gpu);
1196
		if (ret)
1197
			return ret;
1198 1199 1200 1201 1202 1203 1204
	} else {
		/*
		 * Use the access and other non-mapping-related flag bits from
		 * the compatible memory placement flags to the active flags
		 */
		ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
				~TTM_PL_MASK_MEMTYPE);
1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
	}
	/*
	 * We might need to add a TTM.
	 */
	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
		ret = ttm_bo_add_ttm(bo, true);
		if (ret)
			return ret;
	}
	return 0;
}
1216
EXPORT_SYMBOL(ttm_bo_validate);
1217

1218 1219
int ttm_bo_check_placement(struct ttm_buffer_object *bo,
				struct ttm_placement *placement)
1220
{
1221 1222
	BUG_ON((placement->fpfn || placement->lpfn) &&
	       (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
1223 1224 1225 1226

	return 0;
}

1227 1228 1229 1230 1231 1232 1233
int ttm_bo_init(struct ttm_bo_device *bdev,
		struct ttm_buffer_object *bo,
		unsigned long size,
		enum ttm_bo_type type,
		struct ttm_placement *placement,
		uint32_t page_alignment,
		bool interruptible,
J
Jan Engelhardt 已提交
1234
		struct file *persistent_swap_storage,
1235
		size_t acc_size,
1236
		struct sg_table *sg,
1237
		void (*destroy) (struct ttm_buffer_object *))
1238
{
1239
	int ret = 0;
1240
	unsigned long num_pages;
1241 1242 1243 1244
	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;

	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
	if (ret) {
J
Joe Perches 已提交
1245
		pr_err("Out of kernel memory\n");
1246 1247 1248 1249 1250 1251
		if (destroy)
			(*destroy)(bo);
		else
			kfree(bo);
		return -ENOMEM;
	}
1252 1253 1254

	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	if (num_pages == 0) {
J
Joe Perches 已提交
1255
		pr_err("Illegal buffer object size\n");
1256 1257 1258 1259
		if (destroy)
			(*destroy)(bo);
		else
			kfree(bo);
1260
		ttm_mem_global_free(mem_glob, acc_size);
1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272
		return -EINVAL;
	}
	bo->destroy = destroy;

	kref_init(&bo->kref);
	kref_init(&bo->list_kref);
	atomic_set(&bo->cpu_writers, 0);
	atomic_set(&bo->reserved, 1);
	init_waitqueue_head(&bo->event_queue);
	INIT_LIST_HEAD(&bo->lru);
	INIT_LIST_HEAD(&bo->ddestroy);
	INIT_LIST_HEAD(&bo->swap);
1273
	INIT_LIST_HEAD(&bo->io_reserve_lru);
1274
	bo->bdev = bdev;
1275
	bo->glob = bdev->glob;
1276 1277
	bo->type = type;
	bo->num_pages = num_pages;
1278
	bo->mem.size = num_pages << PAGE_SHIFT;
1279 1280 1281 1282
	bo->mem.mem_type = TTM_PL_SYSTEM;
	bo->mem.num_pages = bo->num_pages;
	bo->mem.mm_node = NULL;
	bo->mem.page_alignment = page_alignment;
1283 1284
	bo->mem.bus.io_reserved_vm = false;
	bo->mem.bus.io_reserved_count = 0;
1285 1286 1287
	bo->priv_flags = 0;
	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
	bo->seq_valid = false;
J
Jan Engelhardt 已提交
1288
	bo->persistent_swap_storage = persistent_swap_storage;
1289
	bo->acc_size = acc_size;
1290
	bo->sg = sg;
1291
	atomic_inc(&bo->glob->bo_count);
1292

1293
	ret = ttm_bo_check_placement(bo, placement);
1294 1295 1296 1297 1298 1299 1300
	if (unlikely(ret != 0))
		goto out_err;

	/*
	 * For ttm_bo_type_device buffers, allocate
	 * address space from the device.
	 */
1301 1302
	if (bo->type == ttm_bo_type_device ||
	    bo->type == ttm_bo_type_sg) {
1303 1304 1305 1306 1307
		ret = ttm_bo_setup_vm(bo);
		if (ret)
			goto out_err;
	}

1308
	ret = ttm_bo_validate(bo, placement, interruptible, false);
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
	if (ret)
		goto out_err;

	ttm_bo_unreserve(bo);
	return 0;

out_err:
	ttm_bo_unreserve(bo);
	ttm_bo_unref(&bo);

	return ret;
}
1321
EXPORT_SYMBOL(ttm_bo_init);
1322

1323 1324 1325
size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
		       unsigned long bo_size,
		       unsigned struct_size)
1326
{
1327 1328
	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
	size_t size = 0;
1329

1330 1331 1332 1333
	size += ttm_round_pot(struct_size);
	size += PAGE_ALIGN(npages * sizeof(void *));
	size += ttm_round_pot(sizeof(struct ttm_tt));
	return size;
1334
}
1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350
EXPORT_SYMBOL(ttm_bo_acc_size);

size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
			   unsigned long bo_size,
			   unsigned struct_size)
{
	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
	size_t size = 0;

	size += ttm_round_pot(struct_size);
	size += PAGE_ALIGN(npages * sizeof(void *));
	size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
	return size;
}
EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1351

1352 1353 1354 1355 1356 1357
int ttm_bo_create(struct ttm_bo_device *bdev,
			unsigned long size,
			enum ttm_bo_type type,
			struct ttm_placement *placement,
			uint32_t page_alignment,
			bool interruptible,
J
Jan Engelhardt 已提交
1358
			struct file *persistent_swap_storage,
1359
			struct ttm_buffer_object **p_bo)
1360 1361
{
	struct ttm_buffer_object *bo;
1362
	size_t acc_size;
1363
	int ret;
1364 1365

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1366
	if (unlikely(bo == NULL))
1367 1368
		return -ENOMEM;

1369
	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1370
	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1371 1372
			  interruptible, persistent_swap_storage, acc_size,
			  NULL, NULL);
1373 1374 1375 1376 1377
	if (likely(ret == 0))
		*p_bo = bo;

	return ret;
}
T
Thomas Hellstrom 已提交
1378
EXPORT_SYMBOL(ttm_bo_create);
1379 1380

static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1381
					unsigned mem_type, bool allow_errors)
1382
{
1383
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1384
	struct ttm_bo_global *glob = bdev->glob;
1385 1386 1387 1388 1389 1390
	int ret;

	/*
	 * Can't use standard list traversal since we're unlocking.
	 */

1391
	spin_lock(&glob->lru_lock);
1392
	while (!list_empty(&man->lru)) {
1393
		spin_unlock(&glob->lru_lock);
1394
		ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1395 1396 1397 1398
		if (ret) {
			if (allow_errors) {
				return ret;
			} else {
J
Joe Perches 已提交
1399
				pr_err("Cleanup eviction failed\n");
1400 1401
			}
		}
1402
		spin_lock(&glob->lru_lock);
1403
	}
1404
	spin_unlock(&glob->lru_lock);
1405 1406 1407 1408 1409
	return 0;
}

int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
{
R
Roel Kluin 已提交
1410
	struct ttm_mem_type_manager *man;
1411 1412 1413
	int ret = -EINVAL;

	if (mem_type >= TTM_NUM_MEM_TYPES) {
J
Joe Perches 已提交
1414
		pr_err("Illegal memory type %d\n", mem_type);
1415 1416
		return ret;
	}
R
Roel Kluin 已提交
1417
	man = &bdev->man[mem_type];
1418 1419

	if (!man->has_type) {
J
Joe Perches 已提交
1420 1421
		pr_err("Trying to take down uninitialized memory manager type %u\n",
		       mem_type);
1422 1423 1424 1425 1426 1427 1428 1429
		return ret;
	}

	man->use_type = false;
	man->has_type = false;

	ret = 0;
	if (mem_type > 0) {
1430
		ttm_bo_force_list_clean(bdev, mem_type, false);
1431

1432
		ret = (*man->func->takedown)(man);
1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443
	}

	return ret;
}
EXPORT_SYMBOL(ttm_bo_clean_mm);

int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
{
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];

	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
J
Joe Perches 已提交
1444
		pr_err("Illegal memory manager memory type %u\n", mem_type);
1445 1446 1447 1448
		return -EINVAL;
	}

	if (!man->has_type) {
J
Joe Perches 已提交
1449
		pr_err("Memory type %u has not been initialized\n", mem_type);
1450 1451 1452
		return 0;
	}

1453
	return ttm_bo_force_list_clean(bdev, mem_type, true);
1454 1455 1456 1457
}
EXPORT_SYMBOL(ttm_bo_evict_mm);

int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1458
			unsigned long p_size)
1459 1460 1461 1462
{
	int ret = -EINVAL;
	struct ttm_mem_type_manager *man;

1463
	BUG_ON(type >= TTM_NUM_MEM_TYPES);
1464
	man = &bdev->man[type];
1465
	BUG_ON(man->has_type);
1466 1467 1468 1469
	man->io_reserve_fastpath = true;
	man->use_io_reserve_lru = false;
	mutex_init(&man->io_reserve_mutex);
	INIT_LIST_HEAD(&man->io_reserve_lru);
1470 1471 1472 1473

	ret = bdev->driver->init_mem_type(bdev, type, man);
	if (ret)
		return ret;
1474
	man->bdev = bdev;
1475 1476 1477

	ret = 0;
	if (type != TTM_PL_SYSTEM) {
1478
		ret = (*man->func->init)(man, p_size);
1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491
		if (ret)
			return ret;
	}
	man->has_type = true;
	man->use_type = true;
	man->size = p_size;

	INIT_LIST_HEAD(&man->lru);

	return 0;
}
EXPORT_SYMBOL(ttm_bo_init_mm);

1492 1493 1494 1495 1496 1497 1498 1499 1500 1501
static void ttm_bo_global_kobj_release(struct kobject *kobj)
{
	struct ttm_bo_global *glob =
		container_of(kobj, struct ttm_bo_global, kobj);

	ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
	__free_page(glob->dummy_read_page);
	kfree(glob);
}

1502
void ttm_bo_global_release(struct drm_global_reference *ref)
1503 1504 1505 1506 1507 1508 1509 1510
{
	struct ttm_bo_global *glob = ref->object;

	kobject_del(&glob->kobj);
	kobject_put(&glob->kobj);
}
EXPORT_SYMBOL(ttm_bo_global_release);

1511
int ttm_bo_global_init(struct drm_global_reference *ref)
1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533
{
	struct ttm_bo_global_ref *bo_ref =
		container_of(ref, struct ttm_bo_global_ref, ref);
	struct ttm_bo_global *glob = ref->object;
	int ret;

	mutex_init(&glob->device_list_mutex);
	spin_lock_init(&glob->lru_lock);
	glob->mem_glob = bo_ref->mem_glob;
	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);

	if (unlikely(glob->dummy_read_page == NULL)) {
		ret = -ENOMEM;
		goto out_no_drp;
	}

	INIT_LIST_HEAD(&glob->swap_lru);
	INIT_LIST_HEAD(&glob->device_list);

	ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
	ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
	if (unlikely(ret != 0)) {
J
Joe Perches 已提交
1534
		pr_err("Could not register buffer object swapout\n");
1535 1536 1537 1538 1539
		goto out_no_shrink;
	}

	atomic_set(&glob->bo_count, 0);

1540 1541
	ret = kobject_init_and_add(
		&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
	if (unlikely(ret != 0))
		kobject_put(&glob->kobj);
	return ret;
out_no_shrink:
	__free_page(glob->dummy_read_page);
out_no_drp:
	kfree(glob);
	return ret;
}
EXPORT_SYMBOL(ttm_bo_global_init);


1554 1555 1556 1557 1558
int ttm_bo_device_release(struct ttm_bo_device *bdev)
{
	int ret = 0;
	unsigned i = TTM_NUM_MEM_TYPES;
	struct ttm_mem_type_manager *man;
1559
	struct ttm_bo_global *glob = bdev->glob;
1560 1561 1562 1563 1564 1565 1566

	while (i--) {
		man = &bdev->man[i];
		if (man->has_type) {
			man->use_type = false;
			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
				ret = -EBUSY;
J
Joe Perches 已提交
1567 1568
				pr_err("DRM memory manager type %d is not clean\n",
				       i);
1569 1570 1571 1572 1573
			}
			man->has_type = false;
		}
	}

1574 1575 1576 1577
	mutex_lock(&glob->device_list_mutex);
	list_del(&bdev->device_list);
	mutex_unlock(&glob->device_list_mutex);

1578
	cancel_delayed_work_sync(&bdev->wq);
1579 1580 1581 1582

	while (ttm_bo_delayed_delete(bdev, true))
		;

1583
	spin_lock(&glob->lru_lock);
1584 1585 1586 1587 1588
	if (list_empty(&bdev->ddestroy))
		TTM_DEBUG("Delayed destroy list was clean\n");

	if (list_empty(&bdev->man[0].lru))
		TTM_DEBUG("Swap list was clean\n");
1589
	spin_unlock(&glob->lru_lock);
1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600

	BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
	write_lock(&bdev->vm_lock);
	drm_mm_takedown(&bdev->addr_space_mm);
	write_unlock(&bdev->vm_lock);

	return ret;
}
EXPORT_SYMBOL(ttm_bo_device_release);

int ttm_bo_device_init(struct ttm_bo_device *bdev,
1601 1602
		       struct ttm_bo_global *glob,
		       struct ttm_bo_driver *driver,
D
Dave Airlie 已提交
1603
		       uint64_t file_page_offset,
D
Dave Airlie 已提交
1604
		       bool need_dma32)
1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616
{
	int ret = -EINVAL;

	rwlock_init(&bdev->vm_lock);
	bdev->driver = driver;

	memset(bdev->man, 0, sizeof(bdev->man));

	/*
	 * Initialize the system memory buffer type.
	 * Other types need to be driver / IOCTL initialized.
	 */
1617
	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1618
	if (unlikely(ret != 0))
1619
		goto out_no_sys;
1620 1621 1622 1623

	bdev->addr_space_rb = RB_ROOT;
	ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
	if (unlikely(ret != 0))
1624
		goto out_no_addr_mm;
1625 1626 1627 1628

	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
	INIT_LIST_HEAD(&bdev->ddestroy);
	bdev->dev_mapping = NULL;
1629
	bdev->glob = glob;
D
Dave Airlie 已提交
1630
	bdev->need_dma32 = need_dma32;
1631
	bdev->val_seq = 0;
1632
	spin_lock_init(&bdev->fence_lock);
1633 1634 1635
	mutex_lock(&glob->device_list_mutex);
	list_add_tail(&bdev->device_list, &glob->device_list);
	mutex_unlock(&glob->device_list_mutex);
1636 1637

	return 0;
1638
out_no_addr_mm:
1639
	ttm_bo_clean_mm(bdev, 0);
1640
out_no_sys:
1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665
	return ret;
}
EXPORT_SYMBOL(ttm_bo_device_init);

/*
 * buffer object vm functions.
 */

bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];

	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
		if (mem->mem_type == TTM_PL_SYSTEM)
			return false;

		if (man->flags & TTM_MEMTYPE_FLAG_CMA)
			return false;

		if (mem->placement & TTM_PL_FLAG_CACHED)
			return false;
	}
	return true;
}

1666
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1667 1668 1669 1670 1671 1672 1673 1674
{
	struct ttm_bo_device *bdev = bo->bdev;
	loff_t offset = (loff_t) bo->addr_space_offset;
	loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;

	if (!bdev->dev_mapping)
		return;
	unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1675
	ttm_mem_io_free_vm(bo);
1676
}
1677 1678 1679 1680 1681 1682 1683 1684 1685

void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];

	ttm_mem_io_lock(man, false);
	ttm_bo_unmap_virtual_locked(bo);
	ttm_mem_io_unlock(man);
1686
}
1687 1688


1689
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764

static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct rb_node **cur = &bdev->addr_space_rb.rb_node;
	struct rb_node *parent = NULL;
	struct ttm_buffer_object *cur_bo;
	unsigned long offset = bo->vm_node->start;
	unsigned long cur_offset;

	while (*cur) {
		parent = *cur;
		cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
		cur_offset = cur_bo->vm_node->start;
		if (offset < cur_offset)
			cur = &parent->rb_left;
		else if (offset > cur_offset)
			cur = &parent->rb_right;
		else
			BUG();
	}

	rb_link_node(&bo->vm_rb, parent, cur);
	rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
}

/**
 * ttm_bo_setup_vm:
 *
 * @bo: the buffer to allocate address space for
 *
 * Allocate address space in the drm device so that applications
 * can mmap the buffer and access the contents. This only
 * applies to ttm_bo_type_device objects as others are not
 * placed in the drm device address space.
 */

static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
{
	struct ttm_bo_device *bdev = bo->bdev;
	int ret;

retry_pre_get:
	ret = drm_mm_pre_get(&bdev->addr_space_mm);
	if (unlikely(ret != 0))
		return ret;

	write_lock(&bdev->vm_lock);
	bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
					 bo->mem.num_pages, 0, 0);

	if (unlikely(bo->vm_node == NULL)) {
		ret = -ENOMEM;
		goto out_unlock;
	}

	bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
					      bo->mem.num_pages, 0);

	if (unlikely(bo->vm_node == NULL)) {
		write_unlock(&bdev->vm_lock);
		goto retry_pre_get;
	}

	ttm_bo_vm_insert_rb(bo);
	write_unlock(&bdev->vm_lock);
	bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;

	return 0;
out_unlock:
	write_unlock(&bdev->vm_lock);
	return ret;
}

int ttm_bo_wait(struct ttm_buffer_object *bo,
1765
		bool lazy, bool interruptible, bool no_wait)
1766 1767
{
	struct ttm_bo_driver *driver = bo->bdev->driver;
1768
	struct ttm_bo_device *bdev = bo->bdev;
1769 1770 1771
	void *sync_obj;
	int ret = 0;

1772
	if (likely(bo->sync_obj == NULL))
1773 1774
		return 0;

1775
	while (bo->sync_obj) {
1776

1777
		if (driver->sync_obj_signaled(bo->sync_obj)) {
1778 1779 1780 1781 1782 1783
			void *tmp_obj = bo->sync_obj;
			bo->sync_obj = NULL;
			clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
			spin_unlock(&bdev->fence_lock);
			driver->sync_obj_unref(&tmp_obj);
			spin_lock(&bdev->fence_lock);
1784 1785 1786 1787 1788 1789
			continue;
		}

		if (no_wait)
			return -EBUSY;

1790
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
1791
		spin_unlock(&bdev->fence_lock);
1792
		ret = driver->sync_obj_wait(sync_obj,
1793 1794 1795
					    lazy, interruptible);
		if (unlikely(ret != 0)) {
			driver->sync_obj_unref(&sync_obj);
1796
			spin_lock(&bdev->fence_lock);
1797 1798
			return ret;
		}
1799
		spin_lock(&bdev->fence_lock);
1800
		if (likely(bo->sync_obj == sync_obj)) {
1801 1802 1803 1804 1805 1806 1807 1808
			void *tmp_obj = bo->sync_obj;
			bo->sync_obj = NULL;
			clear_bit(TTM_BO_PRIV_FLAG_MOVING,
				  &bo->priv_flags);
			spin_unlock(&bdev->fence_lock);
			driver->sync_obj_unref(&sync_obj);
			driver->sync_obj_unref(&tmp_obj);
			spin_lock(&bdev->fence_lock);
1809
		} else {
1810
			spin_unlock(&bdev->fence_lock);
1811
			driver->sync_obj_unref(&sync_obj);
1812
			spin_lock(&bdev->fence_lock);
1813 1814 1815 1816 1817 1818 1819 1820
		}
	}
	return 0;
}
EXPORT_SYMBOL(ttm_bo_wait);

int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
{
1821
	struct ttm_bo_device *bdev = bo->bdev;
1822 1823 1824
	int ret = 0;

	/*
1825
	 * Using ttm_bo_reserve makes sure the lru lists are updated.
1826 1827 1828 1829 1830
	 */

	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
	if (unlikely(ret != 0))
		return ret;
1831
	spin_lock(&bdev->fence_lock);
1832
	ret = ttm_bo_wait(bo, false, true, no_wait);
1833
	spin_unlock(&bdev->fence_lock);
1834 1835 1836 1837 1838
	if (likely(ret == 0))
		atomic_inc(&bo->cpu_writers);
	ttm_bo_unreserve(bo);
	return ret;
}
1839
EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1840 1841 1842

void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
{
1843
	atomic_dec(&bo->cpu_writers);
1844
}
1845
EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1846 1847 1848 1849 1850 1851 1852 1853

/**
 * A buffer object shrink method that tries to swap out the first
 * buffer object on the bo_global::swap_lru list.
 */

static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
{
1854 1855
	struct ttm_bo_global *glob =
	    container_of(shrink, struct ttm_bo_global, shrink);
1856 1857 1858 1859 1860
	struct ttm_buffer_object *bo;
	int ret = -EBUSY;
	int put_count;
	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);

1861
	spin_lock(&glob->lru_lock);
1862
	list_for_each_entry(bo, &glob->swap_lru, swap) {
1863
		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
1864 1865 1866
		if (!ret)
			break;
	}
1867

1868 1869 1870 1871
	if (ret) {
		spin_unlock(&glob->lru_lock);
		return ret;
	}
1872

1873
	kref_get(&bo->list_kref);
1874

1875 1876 1877 1878
	if (!list_empty(&bo->ddestroy)) {
		ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
		kref_put(&bo->list_kref, ttm_bo_release_list);
		return ret;
1879 1880 1881
	}

	put_count = ttm_bo_del_from_lru(bo);
1882
	spin_unlock(&glob->lru_lock);
1883

1884
	ttm_bo_list_ref_sub(bo, put_count, true);
1885 1886 1887 1888 1889

	/**
	 * Wait for GPU, then move to system cached.
	 */

1890
	spin_lock(&bo->bdev->fence_lock);
1891
	ret = ttm_bo_wait(bo, false, false, false);
1892
	spin_unlock(&bo->bdev->fence_lock);
1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905

	if (unlikely(ret != 0))
		goto out;

	if ((bo->mem.placement & swap_placement) != swap_placement) {
		struct ttm_mem_reg evict_mem;

		evict_mem = bo->mem;
		evict_mem.mm_node = NULL;
		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
		evict_mem.mem_type = TTM_PL_SYSTEM;

		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1906
					     false, false);
1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917
		if (unlikely(ret != 0))
			goto out;
	}

	ttm_bo_unmap_virtual(bo);

	/**
	 * Swap out. Buffer will be swapped in again as soon as
	 * anyone tries to access a ttm page.
	 */

1918 1919 1920
	if (bo->bdev->driver->swap_notify)
		bo->bdev->driver->swap_notify(bo);

J
Jan Engelhardt 已提交
1921
	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937
out:

	/**
	 *
	 * Unreserve without putting on LRU to avoid swapping out an
	 * already swapped buffer.
	 */

	atomic_set(&bo->reserved, 0);
	wake_up_all(&bo->event_queue);
	kref_put(&bo->list_kref, ttm_bo_release_list);
	return ret;
}

void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
{
1938
	while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1939 1940
		;
}
1941
EXPORT_SYMBOL(ttm_bo_swapout_all);