ttm_bo.c 41.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/**************************************************************************
 *
 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 */

J
Joe Perches 已提交
31 32
#define pr_fmt(fmt) "[TTM] " fmt

33 34 35
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
36 37 38 39 40 41
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/module.h>
A
Arun Sharma 已提交
42
#include <linux/atomic.h>
43 44 45 46 47 48 49

#define TTM_ASSERT_LOCKED(param)
#define TTM_DEBUG(fmt, arg...)
#define TTM_BO_HASH_ORDER 13

static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
50 51 52 53 54 55 56
static void ttm_bo_global_kobj_release(struct kobject *kobj);

static struct attribute ttm_bo_count = {
	.name = "bo_count",
	.mode = S_IRUGO
};

57 58 59 60 61 62 63 64 65 66 67 68
static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
{
	int i;

	for (i = 0; i <= TTM_PL_PRIV5; i++)
		if (flags & (1 << i)) {
			*mem_type = i;
			return 0;
		}
	return -EINVAL;
}

69
static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
70
{
71 72
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];

J
Joe Perches 已提交
73 74 75 76 77 78 79
	pr_err("    has_type: %d\n", man->has_type);
	pr_err("    use_type: %d\n", man->use_type);
	pr_err("    flags: 0x%08X\n", man->flags);
	pr_err("    gpu_offset: 0x%08lX\n", man->gpu_offset);
	pr_err("    size: %llu\n", man->size);
	pr_err("    available_caching: 0x%08X\n", man->available_caching);
	pr_err("    default_caching: 0x%08X\n", man->default_caching);
80 81
	if (mem_type != TTM_PL_SYSTEM)
		(*man->func->debug)(man, TTM_PFX);
82 83 84 85 86 87 88
}

static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
					struct ttm_placement *placement)
{
	int i, ret, mem_type;

J
Joe Perches 已提交
89 90 91
	pr_err("No space for %p (%lu pages, %luK, %luM)\n",
	       bo, bo->mem.num_pages, bo->mem.size >> 10,
	       bo->mem.size >> 20);
92 93 94 95 96
	for (i = 0; i < placement->num_placement; i++) {
		ret = ttm_mem_type_from_flags(placement->placement[i],
						&mem_type);
		if (ret)
			return;
J
Joe Perches 已提交
97 98
		pr_err("  placement[%d]=0x%08X (%d)\n",
		       i, placement->placement[i], mem_type);
99
		ttm_mem_type_debug(bo->bdev, mem_type);
100 101 102
	}
}

103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
static ssize_t ttm_bo_global_show(struct kobject *kobj,
				  struct attribute *attr,
				  char *buffer)
{
	struct ttm_bo_global *glob =
		container_of(kobj, struct ttm_bo_global, kobj);

	return snprintf(buffer, PAGE_SIZE, "%lu\n",
			(unsigned long) atomic_read(&glob->bo_count));
}

static struct attribute *ttm_bo_global_attrs[] = {
	&ttm_bo_count,
	NULL
};

119
static const struct sysfs_ops ttm_bo_global_ops = {
120 121 122 123 124 125 126 127 128
	.show = &ttm_bo_global_show
};

static struct kobj_type ttm_bo_glob_kobj_type  = {
	.release = &ttm_bo_global_kobj_release,
	.sysfs_ops = &ttm_bo_global_ops,
	.default_attrs = ttm_bo_global_attrs
};

129 130 131 132 133 134 135 136 137 138 139

static inline uint32_t ttm_bo_type_flags(unsigned type)
{
	return 1 << (type);
}

static void ttm_bo_release_list(struct kref *list_kref)
{
	struct ttm_buffer_object *bo =
	    container_of(list_kref, struct ttm_buffer_object, list_kref);
	struct ttm_bo_device *bdev = bo->bdev;
140
	size_t acc_size = bo->acc_size;
141 142 143 144 145 146 147 148 149 150 151

	BUG_ON(atomic_read(&bo->list_kref.refcount));
	BUG_ON(atomic_read(&bo->kref.refcount));
	BUG_ON(atomic_read(&bo->cpu_writers));
	BUG_ON(bo->sync_obj != NULL);
	BUG_ON(bo->mem.mm_node != NULL);
	BUG_ON(!list_empty(&bo->lru));
	BUG_ON(!list_empty(&bo->ddestroy));

	if (bo->ttm)
		ttm_tt_destroy(bo->ttm);
152
	atomic_dec(&bo->glob->bo_count);
153 154 155
	if (bo->resv == &bo->ttm_resv)
		reservation_object_fini(&bo->ttm_resv);

156 157 158 159 160
	if (bo->destroy)
		bo->destroy(bo);
	else {
		kfree(bo);
	}
161
	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
162 163
}

164
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
165 166 167 168
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man;

169
	lockdep_assert_held(&bo->resv->lock.base);
170 171 172 173 174 175 176 177 178 179

	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {

		BUG_ON(!list_empty(&bo->lru));

		man = &bdev->man[bo->mem.mem_type];
		list_add_tail(&bo->lru, &man->lru);
		kref_get(&bo->list_kref);

		if (bo->ttm != NULL) {
180
			list_add_tail(&bo->swap, &bo->glob->swap_lru);
181 182 183 184
			kref_get(&bo->list_kref);
		}
	}
}
185
EXPORT_SYMBOL(ttm_bo_add_to_lru);
186

187
int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
{
	int put_count = 0;

	if (!list_empty(&bo->swap)) {
		list_del_init(&bo->swap);
		++put_count;
	}
	if (!list_empty(&bo->lru)) {
		list_del_init(&bo->lru);
		++put_count;
	}

	/*
	 * TODO: Add a driver hook to delete from
	 * driver-specific LRU's here.
	 */

	return put_count;
}

static void ttm_bo_ref_bug(struct kref *list_kref)
{
	BUG();
}

213 214 215
void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
			 bool never_free)
{
216 217
	kref_sub(&bo->list_kref, count,
		 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
218 219
}

220
void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
221
{
222
	int put_count;
223

224 225 226 227
	spin_lock(&bo->glob->lru_lock);
	put_count = ttm_bo_del_from_lru(bo);
	spin_unlock(&bo->glob->lru_lock);
	ttm_bo_list_ref_sub(bo, put_count, true);
228
}
229
EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
230

231 232 233 234 235 236
/*
 * Call bo->mutex locked.
 */
static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
{
	struct ttm_bo_device *bdev = bo->bdev;
237
	struct ttm_bo_global *glob = bo->glob;
238 239 240 241 242 243
	int ret = 0;
	uint32_t page_flags = 0;

	TTM_ASSERT_LOCKED(&bo->mutex);
	bo->ttm = NULL;

D
Dave Airlie 已提交
244 245 246
	if (bdev->need_dma32)
		page_flags |= TTM_PAGE_FLAG_DMA32;

247 248 249 250 251
	switch (bo->type) {
	case ttm_bo_type_device:
		if (zero_alloc)
			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
	case ttm_bo_type_kernel:
252 253
		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
						      page_flags, glob->dummy_read_page);
254 255 256
		if (unlikely(bo->ttm == NULL))
			ret = -ENOMEM;
		break;
257 258 259 260 261 262 263 264 265 266
	case ttm_bo_type_sg:
		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
						      page_flags | TTM_PAGE_FLAG_SG,
						      glob->dummy_read_page);
		if (unlikely(bo->ttm == NULL)) {
			ret = -ENOMEM;
			break;
		}
		bo->ttm->sg = bo->sg;
		break;
267
	default:
J
Joe Perches 已提交
268
		pr_err("Illegal buffer object type\n");
269 270 271 272 273 274 275 276 277
		ret = -EINVAL;
		break;
	}

	return ret;
}

static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
				  struct ttm_mem_reg *mem,
278
				  bool evict, bool interruptible,
279
				  bool no_wait_gpu)
280 281 282 283 284 285 286 287 288
{
	struct ttm_bo_device *bdev = bo->bdev;
	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
	int ret = 0;

	if (old_is_pci || new_is_pci ||
289 290 291 292 293 294 295
	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
		ret = ttm_mem_io_lock(old_man, true);
		if (unlikely(ret != 0))
			goto out_err;
		ttm_bo_unmap_virtual_locked(bo);
		ttm_mem_io_unlock(old_man);
	}
296 297 298 299 300

	/*
	 * Create and bind a ttm if required.
	 */

301 302
	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
		if (bo->ttm == NULL) {
303 304
			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
			ret = ttm_bo_add_ttm(bo, zero);
305 306 307
			if (ret)
				goto out_err;
		}
308 309 310

		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
		if (ret)
311
			goto out_err;
312 313 314 315 316 317 318 319

		if (mem->mem_type != TTM_PL_SYSTEM) {
			ret = ttm_tt_bind(bo->ttm, mem);
			if (ret)
				goto out_err;
		}

		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
320 321
			if (bdev->driver->move_notify)
				bdev->driver->move_notify(bo, mem);
322
			bo->mem = *mem;
323 324 325 326 327
			mem->mm_node = NULL;
			goto moved;
		}
	}

328 329 330
	if (bdev->driver->move_notify)
		bdev->driver->move_notify(bo, mem);

331 332
	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
333
		ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
334 335
	else if (bdev->driver->move)
		ret = bdev->driver->move(bo, evict, interruptible,
336
					 no_wait_gpu, mem);
337
	else
338
		ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
339

340 341 342 343 344 345 346
	if (ret) {
		if (bdev->driver->move_notify) {
			struct ttm_mem_reg tmp_mem = *mem;
			*mem = bo->mem;
			bo->mem = tmp_mem;
			bdev->driver->move_notify(bo, mem);
			bo->mem = *mem;
347
			*mem = tmp_mem;
348
		}
349

350 351
		goto out_err;
	}
352

353 354 355 356
moved:
	if (bo->evicted) {
		ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
		if (ret)
J
Joe Perches 已提交
357
			pr_err("Can not flush read caches\n");
358 359 360 361
		bo->evicted = false;
	}

	if (bo->mem.mm_node) {
362
		bo->offset = (bo->mem.start << PAGE_SHIFT) +
363 364
		    bdev->man[bo->mem.mem_type].gpu_offset;
		bo->cur_placement = bo->mem.placement;
365 366
	} else
		bo->offset = 0;
367 368 369 370 371 372 373 374 375 376 377 378 379 380

	return 0;

out_err:
	new_man = &bdev->man[bo->mem.mem_type];
	if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
		ttm_tt_unbind(bo->ttm);
		ttm_tt_destroy(bo->ttm);
		bo->ttm = NULL;
	}

	return ret;
}

381
/**
382
 * Call bo::reserved.
383
 * Will release GPU memory type usage on destruction.
384 385 386
 * This is the place to put in driver specific hooks to release
 * driver private resources.
 * Will release the bo::reserved lock.
387 388 389 390
 */

static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
391 392 393
	if (bo->bdev->driver->move_notify)
		bo->bdev->driver->move_notify(bo, NULL);

394 395 396 397 398
	if (bo->ttm) {
		ttm_tt_unbind(bo->ttm);
		ttm_tt_destroy(bo->ttm);
		bo->ttm = NULL;
	}
399
	ttm_bo_mem_put(bo, &bo->mem);
400

401
	ww_mutex_unlock (&bo->resv->lock);
402 403
}

404
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
405 406
{
	struct ttm_bo_device *bdev = bo->bdev;
407
	struct ttm_bo_global *glob = bo->glob;
408
	struct ttm_bo_driver *driver = bdev->driver;
409
	void *sync_obj = NULL;
410
	int put_count;
411 412
	int ret;

413
	spin_lock(&glob->lru_lock);
414
	ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
415

416
	spin_lock(&bdev->fence_lock);
417
	(void) ttm_bo_wait(bo, false, false, true);
418
	if (!ret && !bo->sync_obj) {
419
		spin_unlock(&bdev->fence_lock);
420
		put_count = ttm_bo_del_from_lru(bo);
421

422
		spin_unlock(&glob->lru_lock);
423
		ttm_bo_cleanup_memtype_use(bo);
424

425
		ttm_bo_list_ref_sub(bo, put_count, true);
426

427
		return;
428
	}
429 430
	if (bo->sync_obj)
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
431 432
	spin_unlock(&bdev->fence_lock);

433 434
	if (!ret)
		ww_mutex_unlock(&bo->resv->lock);
435 436 437 438 439

	kref_get(&bo->list_kref);
	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
	spin_unlock(&glob->lru_lock);

440
	if (sync_obj) {
441
		driver->sync_obj_flush(sync_obj);
442 443
		driver->sync_obj_unref(&sync_obj);
	}
444 445 446 447 448
	schedule_delayed_work(&bdev->wq,
			      ((HZ / 100) < 1) ? 1 : HZ / 100);
}

/**
449
 * function ttm_bo_cleanup_refs_and_unlock
450 451 452
 * If bo idle, remove from delayed- and lru lists, and unref.
 * If not idle, do nothing.
 *
453 454 455
 * Must be called with lru_lock and reservation held, this function
 * will drop both before returning.
 *
456 457 458 459
 * @interruptible         Any sleeps should occur interruptibly.
 * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
 */

460 461 462
static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
					  bool interruptible,
					  bool no_wait_gpu)
463
{
464
	struct ttm_bo_device *bdev = bo->bdev;
465
	struct ttm_bo_driver *driver = bdev->driver;
466 467
	struct ttm_bo_global *glob = bo->glob;
	int put_count;
468
	int ret;
469

470
	spin_lock(&bdev->fence_lock);
471
	ret = ttm_bo_wait(bo, false, false, true);
472

473 474
	if (ret && !no_wait_gpu) {
		void *sync_obj;
475

476 477 478 479 480
		/*
		 * Take a reference to the fence and unreserve,
		 * at this point the buffer should be dead, so
		 * no new sync objects can be attached.
		 */
481
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
482
		spin_unlock(&bdev->fence_lock);
483

484
		ww_mutex_unlock(&bo->resv->lock);
485 486
		spin_unlock(&glob->lru_lock);

487 488 489
		ret = driver->sync_obj_wait(sync_obj, false, interruptible);
		driver->sync_obj_unref(&sync_obj);
		if (ret)
490 491
			return ret;

492 493 494 495 496 497 498 499 500 501
		/*
		 * remove sync_obj with ttm_bo_wait, the wait should be
		 * finished, and no new wait object should have been added.
		 */
		spin_lock(&bdev->fence_lock);
		ret = ttm_bo_wait(bo, false, false, true);
		WARN_ON(ret);
		spin_unlock(&bdev->fence_lock);
		if (ret)
			return ret;
502

503
		spin_lock(&glob->lru_lock);
504
		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
505

506 507 508 509 510 511 512 513 514 515 516 517 518 519
		/*
		 * We raced, and lost, someone else holds the reservation now,
		 * and is probably busy in ttm_bo_cleanup_memtype_use.
		 *
		 * Even if it's not the case, because we finished waiting any
		 * delayed destruction would succeed, so just return success
		 * here.
		 */
		if (ret) {
			spin_unlock(&glob->lru_lock);
			return 0;
		}
	} else
		spin_unlock(&bdev->fence_lock);
520

521
	if (ret || unlikely(list_empty(&bo->ddestroy))) {
522
		ww_mutex_unlock(&bo->resv->lock);
523
		spin_unlock(&glob->lru_lock);
524
		return ret;
525 526
	}

527 528 529 530 531 532 533
	put_count = ttm_bo_del_from_lru(bo);
	list_del_init(&bo->ddestroy);
	++put_count;

	spin_unlock(&glob->lru_lock);
	ttm_bo_cleanup_memtype_use(bo);

534
	ttm_bo_list_ref_sub(bo, put_count, true);
535 536

	return 0;
537 538 539 540 541 542 543 544 545
}

/**
 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
 * encountered buffers.
 */

static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
{
546
	struct ttm_bo_global *glob = bdev->glob;
547 548
	struct ttm_buffer_object *entry = NULL;
	int ret = 0;
549

550
	spin_lock(&glob->lru_lock);
551 552 553 554 555 556 557 558 559 560 561 562 563
	if (list_empty(&bdev->ddestroy))
		goto out_unlock;

	entry = list_first_entry(&bdev->ddestroy,
		struct ttm_buffer_object, ddestroy);
	kref_get(&entry->list_kref);

	for (;;) {
		struct ttm_buffer_object *nentry = NULL;

		if (entry->ddestroy.next != &bdev->ddestroy) {
			nentry = list_first_entry(&entry->ddestroy,
				struct ttm_buffer_object, ddestroy);
564 565 566
			kref_get(&nentry->list_kref);
		}

567 568 569 570 571 572 573 574
		ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
		if (remove_all && ret) {
			spin_unlock(&glob->lru_lock);
			ret = ttm_bo_reserve_nolru(entry, false, false,
						   false, 0);
			spin_lock(&glob->lru_lock);
		}

575 576 577 578 579 580
		if (!ret)
			ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
							     !remove_all);
		else
			spin_unlock(&glob->lru_lock);

581
		kref_put(&entry->list_kref, ttm_bo_release_list);
582 583 584 585
		entry = nentry;

		if (ret || !entry)
			goto out;
586

587
		spin_lock(&glob->lru_lock);
588
		if (list_empty(&entry->ddestroy))
589 590 591
			break;
	}

592 593 594 595 596
out_unlock:
	spin_unlock(&glob->lru_lock);
out:
	if (entry)
		kref_put(&entry->list_kref, ttm_bo_release_list);
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
	return ret;
}

static void ttm_bo_delayed_workqueue(struct work_struct *work)
{
	struct ttm_bo_device *bdev =
	    container_of(work, struct ttm_bo_device, wq.work);

	if (ttm_bo_delayed_delete(bdev, false)) {
		schedule_delayed_work(&bdev->wq,
				      ((HZ / 100) < 1) ? 1 : HZ / 100);
	}
}

static void ttm_bo_release(struct kref *kref)
{
	struct ttm_buffer_object *bo =
	    container_of(kref, struct ttm_buffer_object, kref);
	struct ttm_bo_device *bdev = bo->bdev;
616
	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
617

618
	drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
619 620 621
	ttm_mem_io_lock(man, false);
	ttm_mem_io_free_vm(bo);
	ttm_mem_io_unlock(man);
622
	ttm_bo_cleanup_refs_or_queue(bo);
623 624 625 626 627 628 629 630 631 632 633 634
	kref_put(&bo->list_kref, ttm_bo_release_list);
}

void ttm_bo_unref(struct ttm_buffer_object **p_bo)
{
	struct ttm_buffer_object *bo = *p_bo;

	*p_bo = NULL;
	kref_put(&bo->kref, ttm_bo_release);
}
EXPORT_SYMBOL(ttm_bo_unref);

635 636 637 638 639 640 641 642 643 644 645 646 647 648
int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
{
	return cancel_delayed_work_sync(&bdev->wq);
}
EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);

void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
{
	if (resched)
		schedule_delayed_work(&bdev->wq,
				      ((HZ / 100) < 1) ? 1 : HZ / 100);
}
EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);

649
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
650
			bool no_wait_gpu)
651 652 653
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_reg evict_mem;
654 655
	struct ttm_placement placement;
	int ret = 0;
656

657
	spin_lock(&bdev->fence_lock);
658
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
659
	spin_unlock(&bdev->fence_lock);
660

661
	if (unlikely(ret != 0)) {
662
		if (ret != -ERESTARTSYS) {
J
Joe Perches 已提交
663
			pr_err("Failed to expire sync object before buffer eviction\n");
664
		}
665 666 667
		goto out;
	}

668
	lockdep_assert_held(&bo->resv->lock.base);
669 670 671

	evict_mem = bo->mem;
	evict_mem.mm_node = NULL;
672 673
	evict_mem.bus.io_reserved_vm = false;
	evict_mem.bus.io_reserved_count = 0;
674

675 676 677 678
	placement.fpfn = 0;
	placement.lpfn = 0;
	placement.num_placement = 0;
	placement.num_busy_placement = 0;
679 680
	bdev->driver->evict_flags(bo, &placement);
	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
681
				no_wait_gpu);
682
	if (ret) {
683
		if (ret != -ERESTARTSYS) {
J
Joe Perches 已提交
684 685
			pr_err("Failed to find memory space for buffer 0x%p eviction\n",
			       bo);
686 687
			ttm_bo_mem_space_debug(bo, &placement);
		}
688 689 690 691
		goto out;
	}

	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
692
				     no_wait_gpu);
693
	if (ret) {
694
		if (ret != -ERESTARTSYS)
J
Joe Perches 已提交
695
			pr_err("Buffer eviction failed\n");
696
		ttm_bo_mem_put(bo, &evict_mem);
697 698
		goto out;
	}
699 700 701 702 703 704 705
	bo->evicted = true;
out:
	return ret;
}

static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
				uint32_t mem_type,
706
				bool interruptible,
707
				bool no_wait_gpu)
708 709 710 711
{
	struct ttm_bo_global *glob = bdev->glob;
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
	struct ttm_buffer_object *bo;
712
	int ret = -EBUSY, put_count;
713

714
	spin_lock(&glob->lru_lock);
715
	list_for_each_entry(bo, &man->lru, lru) {
716
		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
717 718 719 720 721
		if (!ret)
			break;
	}

	if (ret) {
722
		spin_unlock(&glob->lru_lock);
723
		return ret;
724 725
	}

726
	kref_get(&bo->list_kref);
727

728
	if (!list_empty(&bo->ddestroy)) {
729 730
		ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
						     no_wait_gpu);
731
		kref_put(&bo->list_kref, ttm_bo_release_list);
732
		return ret;
733 734
	}

735
	put_count = ttm_bo_del_from_lru(bo);
736
	spin_unlock(&glob->lru_lock);
737 738 739

	BUG_ON(ret != 0);

740
	ttm_bo_list_ref_sub(bo, put_count, true);
741

742
	ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
743
	ttm_bo_unreserve(bo);
744

745
	kref_put(&bo->list_kref, ttm_bo_release_list);
746 747 748
	return ret;
}

749 750
void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
{
751
	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
752

753 754
	if (mem->mm_node)
		(*man->func->put_node)(man, mem);
755 756 757
}
EXPORT_SYMBOL(ttm_bo_mem_put);

758 759 760 761
/**
 * Repeatedly evict memory from the LRU for @mem_type until we create enough
 * space, or we've evicted everything and there isn't enough space.
 */
762 763 764 765
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
					uint32_t mem_type,
					struct ttm_placement *placement,
					struct ttm_mem_reg *mem,
766 767
					bool interruptible,
					bool no_wait_gpu)
768
{
769
	struct ttm_bo_device *bdev = bo->bdev;
770 771 772 773
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
	int ret;

	do {
774
		ret = (*man->func->get_node)(man, bo, placement, mem);
775 776
		if (unlikely(ret != 0))
			return ret;
777
		if (mem->mm_node)
778
			break;
779 780
		ret = ttm_mem_evict_first(bdev, mem_type,
					  interruptible, no_wait_gpu);
781 782 783
		if (unlikely(ret != 0))
			return ret;
	} while (1);
784
	if (mem->mm_node == NULL)
785 786 787 788 789
		return -ENOMEM;
	mem->mem_type = mem_type;
	return 0;
}

790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
				      uint32_t cur_placement,
				      uint32_t proposed_placement)
{
	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;

	/**
	 * Keep current caching if possible.
	 */

	if ((cur_placement & caching) != 0)
		result |= (cur_placement & caching);
	else if ((man->default_caching & caching) != 0)
		result |= man->default_caching;
	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
		result |= TTM_PL_FLAG_CACHED;
	else if ((TTM_PL_FLAG_WC & caching) != 0)
		result |= TTM_PL_FLAG_WC;
	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
		result |= TTM_PL_FLAG_UNCACHED;

	return result;
}

815 816
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
				 uint32_t mem_type,
817 818
				 uint32_t proposed_placement,
				 uint32_t *masked_placement)
819 820 821
{
	uint32_t cur_flags = ttm_bo_type_flags(mem_type);

822
	if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
823 824
		return false;

825
	if ((proposed_placement & man->available_caching) == 0)
826 827
		return false;

828 829 830
	cur_flags |= (proposed_placement & man->available_caching);

	*masked_placement = cur_flags;
831 832 833 834 835 836 837 838 839 840 841 842
	return true;
}

/**
 * Creates space for memory region @mem according to its type.
 *
 * This function first searches for free space in compatible memory types in
 * the priority order defined by the driver.  If free space isn't found, then
 * ttm_bo_mem_force_space is attempted in priority order to evict and find
 * space.
 */
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
843 844
			struct ttm_placement *placement,
			struct ttm_mem_reg *mem,
845
			bool interruptible,
846
			bool no_wait_gpu)
847 848 849 850 851 852 853
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man;
	uint32_t mem_type = TTM_PL_SYSTEM;
	uint32_t cur_flags = 0;
	bool type_found = false;
	bool type_ok = false;
854
	bool has_erestartsys = false;
855
	int i, ret;
856 857

	mem->mm_node = NULL;
858
	for (i = 0; i < placement->num_placement; ++i) {
859 860 861 862
		ret = ttm_mem_type_from_flags(placement->placement[i],
						&mem_type);
		if (ret)
			return ret;
863 864 865
		man = &bdev->man[mem_type];

		type_ok = ttm_bo_mt_compatible(man,
866 867 868
						mem_type,
						placement->placement[i],
						&cur_flags);
869 870 871 872

		if (!type_ok)
			continue;

873 874
		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
						  cur_flags);
875 876 877 878 879 880
		/*
		 * Use the access and other non-mapping-related flag bits from
		 * the memory placement flags to the current flags
		 */
		ttm_flag_masked(&cur_flags, placement->placement[i],
				~TTM_PL_MASK_MEMTYPE);
881

882 883 884 885 886
		if (mem_type == TTM_PL_SYSTEM)
			break;

		if (man->has_type && man->use_type) {
			type_found = true;
887
			ret = (*man->func->get_node)(man, bo, placement, mem);
888 889
			if (unlikely(ret))
				return ret;
890
		}
891
		if (mem->mm_node)
892 893 894
			break;
	}

895
	if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
896 897 898 899 900 901 902 903
		mem->mem_type = mem_type;
		mem->placement = cur_flags;
		return 0;
	}

	if (!type_found)
		return -EINVAL;

904 905
	for (i = 0; i < placement->num_busy_placement; ++i) {
		ret = ttm_mem_type_from_flags(placement->busy_placement[i],
906 907 908
						&mem_type);
		if (ret)
			return ret;
909 910 911 912
		man = &bdev->man[mem_type];
		if (!man->has_type)
			continue;
		if (!ttm_bo_mt_compatible(man,
913
						mem_type,
914
						placement->busy_placement[i],
915
						&cur_flags))
916 917
			continue;

918 919
		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
						  cur_flags);
920 921 922 923
		/*
		 * Use the access and other non-mapping-related flag bits from
		 * the memory placement flags to the current flags
		 */
924
		ttm_flag_masked(&cur_flags, placement->busy_placement[i],
925
				~TTM_PL_MASK_MEMTYPE);
926

927 928 929 930 931 932 933 934

		if (mem_type == TTM_PL_SYSTEM) {
			mem->mem_type = mem_type;
			mem->placement = cur_flags;
			mem->mm_node = NULL;
			return 0;
		}

935
		ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
936
						interruptible, no_wait_gpu);
937 938 939 940
		if (ret == 0 && mem->mm_node) {
			mem->placement = cur_flags;
			return 0;
		}
941 942
		if (ret == -ERESTARTSYS)
			has_erestartsys = true;
943
	}
944
	ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
945 946 947 948 949
	return ret;
}
EXPORT_SYMBOL(ttm_bo_mem_space);

int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
950
			struct ttm_placement *placement,
951
			bool interruptible,
952
			bool no_wait_gpu)
953 954 955
{
	int ret = 0;
	struct ttm_mem_reg mem;
956
	struct ttm_bo_device *bdev = bo->bdev;
957

958
	lockdep_assert_held(&bo->resv->lock.base);
959 960 961 962 963 964

	/*
	 * FIXME: It's possible to pipeline buffer moves.
	 * Have the driver move function wait for idle when necessary,
	 * instead of doing it here.
	 */
965
	spin_lock(&bdev->fence_lock);
966
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
967
	spin_unlock(&bdev->fence_lock);
968 969 970 971 972
	if (ret)
		return ret;
	mem.num_pages = bo->num_pages;
	mem.size = mem.num_pages << PAGE_SHIFT;
	mem.page_alignment = bo->mem.page_alignment;
973 974
	mem.bus.io_reserved_vm = false;
	mem.bus.io_reserved_count = 0;
975 976 977
	/*
	 * Determine where to move the buffer.
	 */
978 979
	ret = ttm_bo_mem_space(bo, placement, &mem,
			       interruptible, no_wait_gpu);
980 981
	if (ret)
		goto out_unlock;
982 983
	ret = ttm_bo_handle_move_mem(bo, &mem, false,
				     interruptible, no_wait_gpu);
984
out_unlock:
985 986
	if (ret && mem.mm_node)
		ttm_bo_mem_put(bo, &mem);
987 988 989
	return ret;
}

990
static int ttm_bo_mem_compat(struct ttm_placement *placement,
991 992
			     struct ttm_mem_reg *mem)
{
993
	int i;
994

995 996 997
	if (mem->mm_node && placement->lpfn != 0 &&
	    (mem->start < placement->fpfn ||
	     mem->start + mem->num_pages > placement->lpfn))
998
		return -1;
999 1000 1001 1002 1003 1004 1005 1006 1007

	for (i = 0; i < placement->num_placement; i++) {
		if ((placement->placement[i] & mem->placement &
			TTM_PL_MASK_CACHING) &&
			(placement->placement[i] & mem->placement &
			TTM_PL_MASK_MEM))
			return i;
	}
	return -1;
1008 1009
}

1010 1011
int ttm_bo_validate(struct ttm_buffer_object *bo,
			struct ttm_placement *placement,
1012
			bool interruptible,
1013
			bool no_wait_gpu)
1014 1015 1016
{
	int ret;

1017
	lockdep_assert_held(&bo->resv->lock.base);
1018 1019 1020 1021 1022
	/* Check that range is valid */
	if (placement->lpfn || placement->fpfn)
		if (placement->fpfn > placement->lpfn ||
			(placement->lpfn - placement->fpfn) < bo->num_pages)
			return -EINVAL;
1023 1024 1025
	/*
	 * Check whether we need to move buffer.
	 */
1026 1027
	ret = ttm_bo_mem_compat(placement, &bo->mem);
	if (ret < 0) {
1028 1029
		ret = ttm_bo_move_buffer(bo, placement, interruptible,
					 no_wait_gpu);
1030
		if (ret)
1031
			return ret;
1032 1033 1034 1035 1036 1037 1038
	} else {
		/*
		 * Use the access and other non-mapping-related flag bits from
		 * the compatible memory placement flags to the active flags
		 */
		ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
				~TTM_PL_MASK_MEMTYPE);
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
	}
	/*
	 * We might need to add a TTM.
	 */
	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
		ret = ttm_bo_add_ttm(bo, true);
		if (ret)
			return ret;
	}
	return 0;
}
1050
EXPORT_SYMBOL(ttm_bo_validate);
1051

1052 1053
int ttm_bo_check_placement(struct ttm_buffer_object *bo,
				struct ttm_placement *placement)
1054
{
1055 1056
	BUG_ON((placement->fpfn || placement->lpfn) &&
	       (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
1057 1058 1059 1060

	return 0;
}

1061 1062 1063 1064 1065 1066 1067
int ttm_bo_init(struct ttm_bo_device *bdev,
		struct ttm_buffer_object *bo,
		unsigned long size,
		enum ttm_bo_type type,
		struct ttm_placement *placement,
		uint32_t page_alignment,
		bool interruptible,
J
Jan Engelhardt 已提交
1068
		struct file *persistent_swap_storage,
1069
		size_t acc_size,
1070
		struct sg_table *sg,
1071
		void (*destroy) (struct ttm_buffer_object *))
1072
{
1073
	int ret = 0;
1074
	unsigned long num_pages;
1075
	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1076
	bool locked;
1077 1078 1079

	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
	if (ret) {
J
Joe Perches 已提交
1080
		pr_err("Out of kernel memory\n");
1081 1082 1083 1084 1085 1086
		if (destroy)
			(*destroy)(bo);
		else
			kfree(bo);
		return -ENOMEM;
	}
1087 1088 1089

	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	if (num_pages == 0) {
J
Joe Perches 已提交
1090
		pr_err("Illegal buffer object size\n");
1091 1092 1093 1094
		if (destroy)
			(*destroy)(bo);
		else
			kfree(bo);
1095
		ttm_mem_global_free(mem_glob, acc_size);
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
		return -EINVAL;
	}
	bo->destroy = destroy;

	kref_init(&bo->kref);
	kref_init(&bo->list_kref);
	atomic_set(&bo->cpu_writers, 0);
	INIT_LIST_HEAD(&bo->lru);
	INIT_LIST_HEAD(&bo->ddestroy);
	INIT_LIST_HEAD(&bo->swap);
1106
	INIT_LIST_HEAD(&bo->io_reserve_lru);
1107
	bo->bdev = bdev;
1108
	bo->glob = bdev->glob;
1109 1110
	bo->type = type;
	bo->num_pages = num_pages;
1111
	bo->mem.size = num_pages << PAGE_SHIFT;
1112 1113 1114 1115
	bo->mem.mem_type = TTM_PL_SYSTEM;
	bo->mem.num_pages = bo->num_pages;
	bo->mem.mm_node = NULL;
	bo->mem.page_alignment = page_alignment;
1116 1117
	bo->mem.bus.io_reserved_vm = false;
	bo->mem.bus.io_reserved_count = 0;
1118 1119
	bo->priv_flags = 0;
	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
J
Jan Engelhardt 已提交
1120
	bo->persistent_swap_storage = persistent_swap_storage;
1121
	bo->acc_size = acc_size;
1122
	bo->sg = sg;
1123 1124
	bo->resv = &bo->ttm_resv;
	reservation_object_init(bo->resv);
1125
	atomic_inc(&bo->glob->bo_count);
1126
	drm_vma_node_reset(&bo->vma_node);
1127

1128
	ret = ttm_bo_check_placement(bo, placement);
1129 1130 1131 1132 1133

	/*
	 * For ttm_bo_type_device buffers, allocate
	 * address space from the device.
	 */
1134 1135 1136
	if (likely(!ret) &&
	    (bo->type == ttm_bo_type_device ||
	     bo->type == ttm_bo_type_sg))
1137 1138
		ret = ttm_bo_setup_vm(bo);

1139 1140
	locked = ww_mutex_trylock(&bo->resv->lock);
	WARN_ON(!locked);
1141

1142 1143
	if (likely(!ret))
		ret = ttm_bo_validate(bo, placement, interruptible, false);
1144 1145

	ttm_bo_unreserve(bo);
1146 1147 1148

	if (unlikely(ret))
		ttm_bo_unref(&bo);
1149 1150 1151

	return ret;
}
1152
EXPORT_SYMBOL(ttm_bo_init);
1153

1154 1155 1156
size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
		       unsigned long bo_size,
		       unsigned struct_size)
1157
{
1158 1159
	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
	size_t size = 0;
1160

1161 1162 1163 1164
	size += ttm_round_pot(struct_size);
	size += PAGE_ALIGN(npages * sizeof(void *));
	size += ttm_round_pot(sizeof(struct ttm_tt));
	return size;
1165
}
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
EXPORT_SYMBOL(ttm_bo_acc_size);

size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
			   unsigned long bo_size,
			   unsigned struct_size)
{
	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
	size_t size = 0;

	size += ttm_round_pot(struct_size);
	size += PAGE_ALIGN(npages * sizeof(void *));
	size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
	return size;
}
EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1182

1183 1184 1185 1186 1187 1188
int ttm_bo_create(struct ttm_bo_device *bdev,
			unsigned long size,
			enum ttm_bo_type type,
			struct ttm_placement *placement,
			uint32_t page_alignment,
			bool interruptible,
J
Jan Engelhardt 已提交
1189
			struct file *persistent_swap_storage,
1190
			struct ttm_buffer_object **p_bo)
1191 1192
{
	struct ttm_buffer_object *bo;
1193
	size_t acc_size;
1194
	int ret;
1195 1196

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1197
	if (unlikely(bo == NULL))
1198 1199
		return -ENOMEM;

1200
	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1201
	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1202 1203
			  interruptible, persistent_swap_storage, acc_size,
			  NULL, NULL);
1204 1205 1206 1207 1208
	if (likely(ret == 0))
		*p_bo = bo;

	return ret;
}
T
Thomas Hellstrom 已提交
1209
EXPORT_SYMBOL(ttm_bo_create);
1210 1211

static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1212
					unsigned mem_type, bool allow_errors)
1213
{
1214
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1215
	struct ttm_bo_global *glob = bdev->glob;
1216 1217 1218 1219 1220 1221
	int ret;

	/*
	 * Can't use standard list traversal since we're unlocking.
	 */

1222
	spin_lock(&glob->lru_lock);
1223
	while (!list_empty(&man->lru)) {
1224
		spin_unlock(&glob->lru_lock);
1225
		ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1226 1227 1228 1229
		if (ret) {
			if (allow_errors) {
				return ret;
			} else {
J
Joe Perches 已提交
1230
				pr_err("Cleanup eviction failed\n");
1231 1232
			}
		}
1233
		spin_lock(&glob->lru_lock);
1234
	}
1235
	spin_unlock(&glob->lru_lock);
1236 1237 1238 1239 1240
	return 0;
}

int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
{
R
Roel Kluin 已提交
1241
	struct ttm_mem_type_manager *man;
1242 1243 1244
	int ret = -EINVAL;

	if (mem_type >= TTM_NUM_MEM_TYPES) {
J
Joe Perches 已提交
1245
		pr_err("Illegal memory type %d\n", mem_type);
1246 1247
		return ret;
	}
R
Roel Kluin 已提交
1248
	man = &bdev->man[mem_type];
1249 1250

	if (!man->has_type) {
J
Joe Perches 已提交
1251 1252
		pr_err("Trying to take down uninitialized memory manager type %u\n",
		       mem_type);
1253 1254 1255 1256 1257 1258 1259 1260
		return ret;
	}

	man->use_type = false;
	man->has_type = false;

	ret = 0;
	if (mem_type > 0) {
1261
		ttm_bo_force_list_clean(bdev, mem_type, false);
1262

1263
		ret = (*man->func->takedown)(man);
1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274
	}

	return ret;
}
EXPORT_SYMBOL(ttm_bo_clean_mm);

int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
{
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];

	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
J
Joe Perches 已提交
1275
		pr_err("Illegal memory manager memory type %u\n", mem_type);
1276 1277 1278 1279
		return -EINVAL;
	}

	if (!man->has_type) {
J
Joe Perches 已提交
1280
		pr_err("Memory type %u has not been initialized\n", mem_type);
1281 1282 1283
		return 0;
	}

1284
	return ttm_bo_force_list_clean(bdev, mem_type, true);
1285 1286 1287 1288
}
EXPORT_SYMBOL(ttm_bo_evict_mm);

int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1289
			unsigned long p_size)
1290 1291 1292 1293
{
	int ret = -EINVAL;
	struct ttm_mem_type_manager *man;

1294
	BUG_ON(type >= TTM_NUM_MEM_TYPES);
1295
	man = &bdev->man[type];
1296
	BUG_ON(man->has_type);
1297 1298 1299 1300
	man->io_reserve_fastpath = true;
	man->use_io_reserve_lru = false;
	mutex_init(&man->io_reserve_mutex);
	INIT_LIST_HEAD(&man->io_reserve_lru);
1301 1302 1303 1304

	ret = bdev->driver->init_mem_type(bdev, type, man);
	if (ret)
		return ret;
1305
	man->bdev = bdev;
1306 1307 1308

	ret = 0;
	if (type != TTM_PL_SYSTEM) {
1309
		ret = (*man->func->init)(man, p_size);
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322
		if (ret)
			return ret;
	}
	man->has_type = true;
	man->use_type = true;
	man->size = p_size;

	INIT_LIST_HEAD(&man->lru);

	return 0;
}
EXPORT_SYMBOL(ttm_bo_init_mm);

1323 1324 1325 1326 1327 1328 1329 1330 1331 1332
static void ttm_bo_global_kobj_release(struct kobject *kobj)
{
	struct ttm_bo_global *glob =
		container_of(kobj, struct ttm_bo_global, kobj);

	ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
	__free_page(glob->dummy_read_page);
	kfree(glob);
}

1333
void ttm_bo_global_release(struct drm_global_reference *ref)
1334 1335 1336 1337 1338 1339 1340 1341
{
	struct ttm_bo_global *glob = ref->object;

	kobject_del(&glob->kobj);
	kobject_put(&glob->kobj);
}
EXPORT_SYMBOL(ttm_bo_global_release);

1342
int ttm_bo_global_init(struct drm_global_reference *ref)
1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
{
	struct ttm_bo_global_ref *bo_ref =
		container_of(ref, struct ttm_bo_global_ref, ref);
	struct ttm_bo_global *glob = ref->object;
	int ret;

	mutex_init(&glob->device_list_mutex);
	spin_lock_init(&glob->lru_lock);
	glob->mem_glob = bo_ref->mem_glob;
	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);

	if (unlikely(glob->dummy_read_page == NULL)) {
		ret = -ENOMEM;
		goto out_no_drp;
	}

	INIT_LIST_HEAD(&glob->swap_lru);
	INIT_LIST_HEAD(&glob->device_list);

	ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
	ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
	if (unlikely(ret != 0)) {
J
Joe Perches 已提交
1365
		pr_err("Could not register buffer object swapout\n");
1366 1367 1368 1369 1370
		goto out_no_shrink;
	}

	atomic_set(&glob->bo_count, 0);

1371 1372
	ret = kobject_init_and_add(
		&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
	if (unlikely(ret != 0))
		kobject_put(&glob->kobj);
	return ret;
out_no_shrink:
	__free_page(glob->dummy_read_page);
out_no_drp:
	kfree(glob);
	return ret;
}
EXPORT_SYMBOL(ttm_bo_global_init);


1385 1386 1387 1388 1389
int ttm_bo_device_release(struct ttm_bo_device *bdev)
{
	int ret = 0;
	unsigned i = TTM_NUM_MEM_TYPES;
	struct ttm_mem_type_manager *man;
1390
	struct ttm_bo_global *glob = bdev->glob;
1391 1392 1393 1394 1395 1396 1397

	while (i--) {
		man = &bdev->man[i];
		if (man->has_type) {
			man->use_type = false;
			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
				ret = -EBUSY;
J
Joe Perches 已提交
1398 1399
				pr_err("DRM memory manager type %d is not clean\n",
				       i);
1400 1401 1402 1403 1404
			}
			man->has_type = false;
		}
	}

1405 1406 1407 1408
	mutex_lock(&glob->device_list_mutex);
	list_del(&bdev->device_list);
	mutex_unlock(&glob->device_list_mutex);

1409
	cancel_delayed_work_sync(&bdev->wq);
1410 1411 1412 1413

	while (ttm_bo_delayed_delete(bdev, true))
		;

1414
	spin_lock(&glob->lru_lock);
1415 1416 1417 1418 1419
	if (list_empty(&bdev->ddestroy))
		TTM_DEBUG("Delayed destroy list was clean\n");

	if (list_empty(&bdev->man[0].lru))
		TTM_DEBUG("Swap list was clean\n");
1420
	spin_unlock(&glob->lru_lock);
1421

1422
	drm_vma_offset_manager_destroy(&bdev->vma_manager);
1423 1424 1425 1426 1427 1428

	return ret;
}
EXPORT_SYMBOL(ttm_bo_device_release);

int ttm_bo_device_init(struct ttm_bo_device *bdev,
1429 1430
		       struct ttm_bo_global *glob,
		       struct ttm_bo_driver *driver,
D
Dave Airlie 已提交
1431
		       uint64_t file_page_offset,
D
Dave Airlie 已提交
1432
		       bool need_dma32)
1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443
{
	int ret = -EINVAL;

	bdev->driver = driver;

	memset(bdev->man, 0, sizeof(bdev->man));

	/*
	 * Initialize the system memory buffer type.
	 * Other types need to be driver / IOCTL initialized.
	 */
1444
	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1445
	if (unlikely(ret != 0))
1446
		goto out_no_sys;
1447

1448 1449
	drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
				    0x10000000);
1450 1451 1452
	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
	INIT_LIST_HEAD(&bdev->ddestroy);
	bdev->dev_mapping = NULL;
1453
	bdev->glob = glob;
D
Dave Airlie 已提交
1454
	bdev->need_dma32 = need_dma32;
1455
	bdev->val_seq = 0;
1456
	spin_lock_init(&bdev->fence_lock);
1457 1458 1459
	mutex_lock(&glob->device_list_mutex);
	list_add_tail(&bdev->device_list, &glob->device_list);
	mutex_unlock(&glob->device_list_mutex);
1460 1461

	return 0;
1462
out_no_sys:
1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487
	return ret;
}
EXPORT_SYMBOL(ttm_bo_device_init);

/*
 * buffer object vm functions.
 */

bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];

	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
		if (mem->mem_type == TTM_PL_SYSTEM)
			return false;

		if (man->flags & TTM_MEMTYPE_FLAG_CMA)
			return false;

		if (mem->placement & TTM_PL_FLAG_CACHED)
			return false;
	}
	return true;
}

1488
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1489 1490 1491
{
	struct ttm_bo_device *bdev = bo->bdev;

1492
	drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
1493
	ttm_mem_io_free_vm(bo);
1494
}
1495 1496 1497 1498 1499 1500 1501 1502 1503

void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];

	ttm_mem_io_lock(man, false);
	ttm_bo_unmap_virtual_locked(bo);
	ttm_mem_io_unlock(man);
1504
}
1505 1506


1507
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523

/**
 * ttm_bo_setup_vm:
 *
 * @bo: the buffer to allocate address space for
 *
 * Allocate address space in the drm device so that applications
 * can mmap the buffer and access the contents. This only
 * applies to ttm_bo_type_device objects as others are not
 * placed in the drm device address space.
 */

static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
{
	struct ttm_bo_device *bdev = bo->bdev;

1524 1525
	return drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
				  bo->mem.num_pages);
1526 1527 1528
}

int ttm_bo_wait(struct ttm_buffer_object *bo,
1529
		bool lazy, bool interruptible, bool no_wait)
1530 1531
{
	struct ttm_bo_driver *driver = bo->bdev->driver;
1532
	struct ttm_bo_device *bdev = bo->bdev;
1533 1534 1535
	void *sync_obj;
	int ret = 0;

1536
	if (likely(bo->sync_obj == NULL))
1537 1538
		return 0;

1539
	while (bo->sync_obj) {
1540

1541
		if (driver->sync_obj_signaled(bo->sync_obj)) {
1542 1543 1544 1545 1546 1547
			void *tmp_obj = bo->sync_obj;
			bo->sync_obj = NULL;
			clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
			spin_unlock(&bdev->fence_lock);
			driver->sync_obj_unref(&tmp_obj);
			spin_lock(&bdev->fence_lock);
1548 1549 1550 1551 1552 1553
			continue;
		}

		if (no_wait)
			return -EBUSY;

1554
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
1555
		spin_unlock(&bdev->fence_lock);
1556
		ret = driver->sync_obj_wait(sync_obj,
1557 1558 1559
					    lazy, interruptible);
		if (unlikely(ret != 0)) {
			driver->sync_obj_unref(&sync_obj);
1560
			spin_lock(&bdev->fence_lock);
1561 1562
			return ret;
		}
1563
		spin_lock(&bdev->fence_lock);
1564
		if (likely(bo->sync_obj == sync_obj)) {
1565 1566 1567 1568 1569 1570 1571 1572
			void *tmp_obj = bo->sync_obj;
			bo->sync_obj = NULL;
			clear_bit(TTM_BO_PRIV_FLAG_MOVING,
				  &bo->priv_flags);
			spin_unlock(&bdev->fence_lock);
			driver->sync_obj_unref(&sync_obj);
			driver->sync_obj_unref(&tmp_obj);
			spin_lock(&bdev->fence_lock);
1573
		} else {
1574
			spin_unlock(&bdev->fence_lock);
1575
			driver->sync_obj_unref(&sync_obj);
1576
			spin_lock(&bdev->fence_lock);
1577 1578 1579 1580 1581 1582 1583 1584
		}
	}
	return 0;
}
EXPORT_SYMBOL(ttm_bo_wait);

int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
{
1585
	struct ttm_bo_device *bdev = bo->bdev;
1586 1587 1588
	int ret = 0;

	/*
1589
	 * Using ttm_bo_reserve makes sure the lru lists are updated.
1590 1591 1592 1593 1594
	 */

	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
	if (unlikely(ret != 0))
		return ret;
1595
	spin_lock(&bdev->fence_lock);
1596
	ret = ttm_bo_wait(bo, false, true, no_wait);
1597
	spin_unlock(&bdev->fence_lock);
1598 1599 1600 1601 1602
	if (likely(ret == 0))
		atomic_inc(&bo->cpu_writers);
	ttm_bo_unreserve(bo);
	return ret;
}
1603
EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1604 1605 1606

void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
{
1607
	atomic_dec(&bo->cpu_writers);
1608
}
1609
EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1610 1611 1612 1613 1614 1615 1616 1617

/**
 * A buffer object shrink method that tries to swap out the first
 * buffer object on the bo_global::swap_lru list.
 */

static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
{
1618 1619
	struct ttm_bo_global *glob =
	    container_of(shrink, struct ttm_bo_global, shrink);
1620 1621 1622 1623 1624
	struct ttm_buffer_object *bo;
	int ret = -EBUSY;
	int put_count;
	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);

1625
	spin_lock(&glob->lru_lock);
1626
	list_for_each_entry(bo, &glob->swap_lru, swap) {
1627
		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
1628 1629 1630
		if (!ret)
			break;
	}
1631

1632 1633 1634 1635
	if (ret) {
		spin_unlock(&glob->lru_lock);
		return ret;
	}
1636

1637
	kref_get(&bo->list_kref);
1638

1639 1640 1641 1642
	if (!list_empty(&bo->ddestroy)) {
		ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
		kref_put(&bo->list_kref, ttm_bo_release_list);
		return ret;
1643 1644 1645
	}

	put_count = ttm_bo_del_from_lru(bo);
1646
	spin_unlock(&glob->lru_lock);
1647

1648
	ttm_bo_list_ref_sub(bo, put_count, true);
1649 1650 1651 1652 1653

	/**
	 * Wait for GPU, then move to system cached.
	 */

1654
	spin_lock(&bo->bdev->fence_lock);
1655
	ret = ttm_bo_wait(bo, false, false, false);
1656
	spin_unlock(&bo->bdev->fence_lock);
1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669

	if (unlikely(ret != 0))
		goto out;

	if ((bo->mem.placement & swap_placement) != swap_placement) {
		struct ttm_mem_reg evict_mem;

		evict_mem = bo->mem;
		evict_mem.mm_node = NULL;
		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
		evict_mem.mem_type = TTM_PL_SYSTEM;

		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1670
					     false, false);
1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681
		if (unlikely(ret != 0))
			goto out;
	}

	ttm_bo_unmap_virtual(bo);

	/**
	 * Swap out. Buffer will be swapped in again as soon as
	 * anyone tries to access a ttm page.
	 */

1682 1683 1684
	if (bo->bdev->driver->swap_notify)
		bo->bdev->driver->swap_notify(bo);

J
Jan Engelhardt 已提交
1685
	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1686 1687 1688 1689 1690 1691 1692 1693
out:

	/**
	 *
	 * Unreserve without putting on LRU to avoid swapping out an
	 * already swapped buffer.
	 */

1694
	ww_mutex_unlock(&bo->resv->lock);
1695 1696 1697 1698 1699 1700
	kref_put(&bo->list_kref, ttm_bo_release_list);
	return ret;
}

void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
{
1701
	while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1702 1703
		;
}
1704
EXPORT_SYMBOL(ttm_bo_swapout_all);