ttm_bo.c 44.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/**************************************************************************
 *
 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 */

J
Joe Perches 已提交
31 32
#define pr_fmt(fmt) "[TTM] " fmt

33 34 35
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
36 37 38 39 40 41
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/module.h>
A
Arun Sharma 已提交
42
#include <linux/atomic.h>
43
#include <linux/reservation.h>
44 45 46 47 48 49

#define TTM_ASSERT_LOCKED(param)
#define TTM_DEBUG(fmt, arg...)
#define TTM_BO_HASH_ORDER 13

static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
50 51 52 53 54 55 56
static void ttm_bo_global_kobj_release(struct kobject *kobj);

static struct attribute ttm_bo_count = {
	.name = "bo_count",
	.mode = S_IRUGO
};

57 58
static inline int ttm_mem_type_from_place(const struct ttm_place *place,
					  uint32_t *mem_type)
59
{
60
	int pos;
61

62 63 64 65 66 67
	pos = ffs(place->flags & TTM_PL_MASK_MEM);
	if (unlikely(!pos))
		return -EINVAL;

	*mem_type = pos - 1;
	return 0;
68 69
}

70
static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
71
{
72
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
73
	struct drm_printer p = drm_debug_printer(TTM_PFX);
74

J
Joe Perches 已提交
75 76 77
	pr_err("    has_type: %d\n", man->has_type);
	pr_err("    use_type: %d\n", man->use_type);
	pr_err("    flags: 0x%08X\n", man->flags);
78
	pr_err("    gpu_offset: 0x%08llX\n", man->gpu_offset);
J
Joe Perches 已提交
79 80 81
	pr_err("    size: %llu\n", man->size);
	pr_err("    available_caching: 0x%08X\n", man->available_caching);
	pr_err("    default_caching: 0x%08X\n", man->default_caching);
82
	if (mem_type != TTM_PL_SYSTEM)
83
		(*man->func->debug)(man, &p);
84 85 86 87 88 89 90
}

static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
					struct ttm_placement *placement)
{
	int i, ret, mem_type;

J
Joe Perches 已提交
91 92 93
	pr_err("No space for %p (%lu pages, %luK, %luM)\n",
	       bo, bo->mem.num_pages, bo->mem.size >> 10,
	       bo->mem.size >> 20);
94
	for (i = 0; i < placement->num_placement; i++) {
95
		ret = ttm_mem_type_from_place(&placement->placement[i],
96 97 98
						&mem_type);
		if (ret)
			return;
J
Joe Perches 已提交
99
		pr_err("  placement[%d]=0x%08X (%d)\n",
100
		       i, placement->placement[i].flags, mem_type);
101
		ttm_mem_type_debug(bo->bdev, mem_type);
102 103 104
	}
}

105 106 107 108 109 110 111
static ssize_t ttm_bo_global_show(struct kobject *kobj,
				  struct attribute *attr,
				  char *buffer)
{
	struct ttm_bo_global *glob =
		container_of(kobj, struct ttm_bo_global, kobj);

112 113
	return snprintf(buffer, PAGE_SIZE, "%d\n",
				atomic_read(&glob->bo_count));
114 115 116 117 118 119 120
}

static struct attribute *ttm_bo_global_attrs[] = {
	&ttm_bo_count,
	NULL
};

121
static const struct sysfs_ops ttm_bo_global_ops = {
122 123 124 125 126 127 128 129 130
	.show = &ttm_bo_global_show
};

static struct kobj_type ttm_bo_glob_kobj_type  = {
	.release = &ttm_bo_global_kobj_release,
	.sysfs_ops = &ttm_bo_global_ops,
	.default_attrs = ttm_bo_global_attrs
};

131 132 133 134 135 136 137 138 139 140 141

static inline uint32_t ttm_bo_type_flags(unsigned type)
{
	return 1 << (type);
}

static void ttm_bo_release_list(struct kref *list_kref)
{
	struct ttm_buffer_object *bo =
	    container_of(list_kref, struct ttm_buffer_object, list_kref);
	struct ttm_bo_device *bdev = bo->bdev;
142
	size_t acc_size = bo->acc_size;
143

144 145
	BUG_ON(kref_read(&bo->list_kref));
	BUG_ON(kref_read(&bo->kref));
146 147 148 149
	BUG_ON(atomic_read(&bo->cpu_writers));
	BUG_ON(bo->mem.mm_node != NULL);
	BUG_ON(!list_empty(&bo->lru));
	BUG_ON(!list_empty(&bo->ddestroy));
150
	ttm_tt_destroy(bo->ttm);
151
	atomic_dec(&bo->glob->bo_count);
152
	dma_fence_put(bo->moving);
153
	reservation_object_fini(&bo->ttm_resv);
154
	mutex_destroy(&bo->wu_mutex);
155 156 157 158 159
	if (bo->destroy)
		bo->destroy(bo);
	else {
		kfree(bo);
	}
160
	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
161 162
}

163
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
164 165
{
	struct ttm_bo_device *bdev = bo->bdev;
166
	struct ttm_mem_type_manager *man;
167

168
	lockdep_assert_held(&bo->resv->lock.base);
169 170 171 172 173

	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {

		BUG_ON(!list_empty(&bo->lru));

174 175
		man = &bdev->man[bo->mem.mem_type];
		list_add_tail(&bo->lru, &man->lru[bo->priority]);
176 177
		kref_get(&bo->list_kref);

178
		if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
179 180
			list_add_tail(&bo->swap,
				      &bo->glob->swap_lru[bo->priority]);
181 182 183 184
			kref_get(&bo->list_kref);
		}
	}
}
185
EXPORT_SYMBOL(ttm_bo_add_to_lru);
186

187 188 189 190 191 192
static void ttm_bo_ref_bug(struct kref *list_kref)
{
	BUG();
}

void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
193 194 195
{
	if (!list_empty(&bo->swap)) {
		list_del_init(&bo->swap);
196
		kref_put(&bo->list_kref, ttm_bo_ref_bug);
197 198 199
	}
	if (!list_empty(&bo->lru)) {
		list_del_init(&bo->lru);
200
		kref_put(&bo->list_kref, ttm_bo_ref_bug);
201 202
	}

203 204 205 206
	/*
	 * TODO: Add a driver hook to delete from
	 * driver-specific LRU's here.
	 */
207 208
}

209
void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
210
{
211
	spin_lock(&bo->glob->lru_lock);
212
	ttm_bo_del_from_lru(bo);
213
	spin_unlock(&bo->glob->lru_lock);
214
}
215
EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
216

217 218 219 220
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
{
	lockdep_assert_held(&bo->resv->lock.base);

221
	ttm_bo_del_from_lru(bo);
222
	ttm_bo_add_to_lru(bo);
223 224 225
}
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);

226 227 228 229 230 231
/*
 * Call bo->mutex locked.
 */
static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
{
	struct ttm_bo_device *bdev = bo->bdev;
232
	struct ttm_bo_global *glob = bo->glob;
233 234 235 236 237 238
	int ret = 0;
	uint32_t page_flags = 0;

	TTM_ASSERT_LOCKED(&bo->mutex);
	bo->ttm = NULL;

D
Dave Airlie 已提交
239 240 241
	if (bdev->need_dma32)
		page_flags |= TTM_PAGE_FLAG_DMA32;

242 243 244 245 246
	switch (bo->type) {
	case ttm_bo_type_device:
		if (zero_alloc)
			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
	case ttm_bo_type_kernel:
247 248
		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
						      page_flags, glob->dummy_read_page);
249 250 251
		if (unlikely(bo->ttm == NULL))
			ret = -ENOMEM;
		break;
252 253 254 255 256 257 258 259 260 261
	case ttm_bo_type_sg:
		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
						      page_flags | TTM_PAGE_FLAG_SG,
						      glob->dummy_read_page);
		if (unlikely(bo->ttm == NULL)) {
			ret = -ENOMEM;
			break;
		}
		bo->ttm->sg = bo->sg;
		break;
262
	default:
J
Joe Perches 已提交
263
		pr_err("Illegal buffer object type\n");
264 265 266 267 268 269 270 271 272
		ret = -EINVAL;
		break;
	}

	return ret;
}

static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
				  struct ttm_mem_reg *mem,
273
				  bool evict, bool interruptible,
274
				  bool no_wait_gpu)
275 276 277 278 279 280 281 282 283
{
	struct ttm_bo_device *bdev = bo->bdev;
	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
	int ret = 0;

	if (old_is_pci || new_is_pci ||
284 285 286 287 288 289 290
	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
		ret = ttm_mem_io_lock(old_man, true);
		if (unlikely(ret != 0))
			goto out_err;
		ttm_bo_unmap_virtual_locked(bo);
		ttm_mem_io_unlock(old_man);
	}
291 292 293 294 295

	/*
	 * Create and bind a ttm if required.
	 */

296 297
	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
		if (bo->ttm == NULL) {
298 299
			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
			ret = ttm_bo_add_ttm(bo, zero);
300 301 302
			if (ret)
				goto out_err;
		}
303 304 305

		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
		if (ret)
306
			goto out_err;
307 308 309 310 311 312 313 314

		if (mem->mem_type != TTM_PL_SYSTEM) {
			ret = ttm_tt_bind(bo->ttm, mem);
			if (ret)
				goto out_err;
		}

		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
315
			if (bdev->driver->move_notify)
316
				bdev->driver->move_notify(bo, evict, mem);
317
			bo->mem = *mem;
318 319 320 321 322
			mem->mm_node = NULL;
			goto moved;
		}
	}

323
	if (bdev->driver->move_notify)
324
		bdev->driver->move_notify(bo, evict, mem);
325

326 327
	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
328
		ret = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, mem);
329 330
	else if (bdev->driver->move)
		ret = bdev->driver->move(bo, evict, interruptible,
331
					 no_wait_gpu, mem);
332
	else
333
		ret = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, mem);
334

335 336 337 338 339
	if (ret) {
		if (bdev->driver->move_notify) {
			struct ttm_mem_reg tmp_mem = *mem;
			*mem = bo->mem;
			bo->mem = tmp_mem;
340
			bdev->driver->move_notify(bo, false, mem);
341
			bo->mem = *mem;
342
			*mem = tmp_mem;
343
		}
344

345 346
		goto out_err;
	}
347

348 349
moved:
	if (bo->evicted) {
350 351 352 353 354
		if (bdev->driver->invalidate_caches) {
			ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
			if (ret)
				pr_err("Can not flush read caches\n");
		}
355 356 357 358
		bo->evicted = false;
	}

	if (bo->mem.mm_node) {
359
		bo->offset = (bo->mem.start << PAGE_SHIFT) +
360 361
		    bdev->man[bo->mem.mem_type].gpu_offset;
		bo->cur_placement = bo->mem.placement;
362 363
	} else
		bo->offset = 0;
364 365 366 367 368

	return 0;

out_err:
	new_man = &bdev->man[bo->mem.mem_type];
369
	if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
370 371 372 373 374 375 376
		ttm_tt_destroy(bo->ttm);
		bo->ttm = NULL;
	}

	return ret;
}

377
/**
378
 * Call bo::reserved.
379
 * Will release GPU memory type usage on destruction.
380 381 382
 * This is the place to put in driver specific hooks to release
 * driver private resources.
 * Will release the bo::reserved lock.
383 384 385 386
 */

static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
387
	if (bo->bdev->driver->move_notify)
388
		bo->bdev->driver->move_notify(bo, false, NULL);
389

390 391
	ttm_tt_destroy(bo->ttm);
	bo->ttm = NULL;
392
	ttm_bo_mem_put(bo, &bo->mem);
393 394
}

395 396 397 398 399 400 401
static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
{
	int r;

	if (bo->resv == &bo->ttm_resv)
		return 0;

402
	BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
403 404

	r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
405
	if (r)
406 407 408 409 410
		reservation_object_unlock(&bo->ttm_resv);

	return r;
}

411 412 413
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
{
	struct reservation_object_list *fobj;
414
	struct dma_fence *fence;
415 416
	int i;

417 418
	fobj = reservation_object_get_list(&bo->ttm_resv);
	fence = reservation_object_get_excl(&bo->ttm_resv);
419
	if (fence && !fence->ops->signaled)
420
		dma_fence_enable_sw_signaling(fence);
421 422 423 424 425 426

	for (i = 0; fobj && i < fobj->shared_count; ++i) {
		fence = rcu_dereference_protected(fobj->shared[i],
					reservation_object_held(bo->resv));

		if (!fence->ops->signaled)
427
			dma_fence_enable_sw_signaling(fence);
428 429 430
	}
}

431
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
432 433
{
	struct ttm_bo_device *bdev = bo->bdev;
434
	struct ttm_bo_global *glob = bo->glob;
435 436
	int ret;

437 438 439 440 441 442 443 444 445 446 447
	ret = ttm_bo_individualize_resv(bo);
	if (ret) {
		/* Last resort, if we fail to allocate memory for the
		 * fences block for the BO to become idle
		 */
		reservation_object_wait_timeout_rcu(bo->resv, true, false,
						    30 * HZ);
		spin_lock(&glob->lru_lock);
		goto error;
	}

448
	spin_lock(&glob->lru_lock);
449
	ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
M
Maarten Lankhorst 已提交
450
	if (!ret) {
451
		if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
452
			ttm_bo_del_from_lru(bo);
M
Maarten Lankhorst 已提交
453
			spin_unlock(&glob->lru_lock);
454
			if (bo->resv != &bo->ttm_resv)
455
				reservation_object_unlock(&bo->ttm_resv);
456

M
Maarten Lankhorst 已提交
457
			ttm_bo_cleanup_memtype_use(bo);
458
			reservation_object_unlock(bo->resv);
M
Maarten Lankhorst 已提交
459
			return;
460 461 462
		}

		ttm_bo_flush_all_fences(bo);
463 464 465 466 467 468 469 470 471 472 473

		/*
		 * Make NO_EVICT bos immediately available to
		 * shrinkers, now that they are queued for
		 * destruction.
		 */
		if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
			bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
			ttm_bo_add_to_lru(bo);
		}

474
		reservation_object_unlock(bo->resv);
475
	}
476 477
	if (bo->resv != &bo->ttm_resv)
		reservation_object_unlock(&bo->ttm_resv);
478

479
error:
480 481 482 483 484 485 486 487 488
	kref_get(&bo->list_kref);
	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
	spin_unlock(&glob->lru_lock);

	schedule_delayed_work(&bdev->wq,
			      ((HZ / 100) < 1) ? 1 : HZ / 100);
}

/**
489
 * function ttm_bo_cleanup_refs
490 491 492
 * If bo idle, remove from delayed- and lru lists, and unref.
 * If not idle, do nothing.
 *
493
 * Must be called with lru_lock and reservation held, this function
494
 * will drop the lru lock and optionally the reservation lock before returning.
495
 *
496 497
 * @interruptible         Any sleeps should occur interruptibly.
 * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
498
 * @unlock_resv           Unlock the reservation lock as well.
499 500
 */

501 502 503
static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
			       bool interruptible, bool no_wait_gpu,
			       bool unlock_resv)
504 505
{
	struct ttm_bo_global *glob = bo->glob;
506
	struct reservation_object *resv;
507
	int ret;
508

509 510 511 512 513 514 515 516 517
	if (unlikely(list_empty(&bo->ddestroy)))
		resv = bo->resv;
	else
		resv = &bo->ttm_resv;

	if (reservation_object_test_signaled_rcu(resv, true))
		ret = 0;
	else
		ret = -EBUSY;
518

519
	if (ret && !no_wait_gpu) {
M
Maarten Lankhorst 已提交
520
		long lret;
521

522 523
		if (unlock_resv)
			reservation_object_unlock(bo->resv);
M
Maarten Lankhorst 已提交
524 525
		spin_unlock(&glob->lru_lock);

526
		lret = reservation_object_wait_timeout_rcu(resv, true,
M
Maarten Lankhorst 已提交
527 528 529 530 531 532 533
							   interruptible,
							   30 * HZ);

		if (lret < 0)
			return lret;
		else if (lret == 0)
			return -EBUSY;
534

535
		spin_lock(&glob->lru_lock);
536 537 538 539 540 541 542 543 544
		if (unlock_resv && !reservation_object_trylock(bo->resv)) {
			/*
			 * We raced, and lost, someone else holds the reservation now,
			 * and is probably busy in ttm_bo_cleanup_memtype_use.
			 *
			 * Even if it's not the case, because we finished waiting any
			 * delayed destruction would succeed, so just return success
			 * here.
			 */
545 546 547
			spin_unlock(&glob->lru_lock);
			return 0;
		}
548
		ret = 0;
549
	}
550

551
	if (ret || unlikely(list_empty(&bo->ddestroy))) {
552 553
		if (unlock_resv)
			reservation_object_unlock(bo->resv);
554
		spin_unlock(&glob->lru_lock);
555
		return ret;
556 557
	}

558
	ttm_bo_del_from_lru(bo);
559
	list_del_init(&bo->ddestroy);
560
	kref_put(&bo->list_kref, ttm_bo_ref_bug);
561 562 563

	spin_unlock(&glob->lru_lock);
	ttm_bo_cleanup_memtype_use(bo);
564 565 566

	if (unlock_resv)
		reservation_object_unlock(bo->resv);
567 568

	return 0;
569 570 571 572 573 574
}

/**
 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
 * encountered buffers.
 */
575
static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
576
{
577
	struct ttm_bo_global *glob = bdev->glob;
578 579
	struct list_head removed;
	bool empty;
580

581
	INIT_LIST_HEAD(&removed);
582

583 584 585
	spin_lock(&glob->lru_lock);
	while (!list_empty(&bdev->ddestroy)) {
		struct ttm_buffer_object *bo;
586

587 588 589 590 591
		bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
				      ddestroy);
		kref_get(&bo->list_kref);
		list_move_tail(&bo->ddestroy, &removed);
		spin_unlock(&glob->lru_lock);
592

593
		reservation_object_lock(bo->resv, NULL);
594

595 596
		spin_lock(&glob->lru_lock);
		ttm_bo_cleanup_refs(bo, false, !remove_all, true);
597

598
		kref_put(&bo->list_kref, ttm_bo_release_list);
599
		spin_lock(&glob->lru_lock);
600
	}
601 602
	list_splice_tail(&removed, &bdev->ddestroy);
	empty = list_empty(&bdev->ddestroy);
603
	spin_unlock(&glob->lru_lock);
604 605

	return empty;
606 607 608 609 610 611 612
}

static void ttm_bo_delayed_workqueue(struct work_struct *work)
{
	struct ttm_bo_device *bdev =
	    container_of(work, struct ttm_bo_device, wq.work);

613
	if (!ttm_bo_delayed_delete(bdev, false)) {
614 615 616 617 618 619 620 621 622 623
		schedule_delayed_work(&bdev->wq,
				      ((HZ / 100) < 1) ? 1 : HZ / 100);
	}
}

static void ttm_bo_release(struct kref *kref)
{
	struct ttm_buffer_object *bo =
	    container_of(kref, struct ttm_buffer_object, kref);
	struct ttm_bo_device *bdev = bo->bdev;
624
	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
625

626
	drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
627 628 629
	ttm_mem_io_lock(man, false);
	ttm_mem_io_free_vm(bo);
	ttm_mem_io_unlock(man);
630
	ttm_bo_cleanup_refs_or_queue(bo);
631 632 633 634 635 636 637 638 639 640 641 642
	kref_put(&bo->list_kref, ttm_bo_release_list);
}

void ttm_bo_unref(struct ttm_buffer_object **p_bo)
{
	struct ttm_buffer_object *bo = *p_bo;

	*p_bo = NULL;
	kref_put(&bo->kref, ttm_bo_release);
}
EXPORT_SYMBOL(ttm_bo_unref);

643 644 645 646 647 648 649 650 651 652 653 654 655 656
int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
{
	return cancel_delayed_work_sync(&bdev->wq);
}
EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);

void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
{
	if (resched)
		schedule_delayed_work(&bdev->wq,
				      ((HZ / 100) < 1) ? 1 : HZ / 100);
}
EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);

657
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
658
			bool no_wait_gpu)
659 660 661
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_reg evict_mem;
662 663
	struct ttm_placement placement;
	int ret = 0;
664

665
	lockdep_assert_held(&bo->resv->lock.base);
666 667 668

	evict_mem = bo->mem;
	evict_mem.mm_node = NULL;
669 670
	evict_mem.bus.io_reserved_vm = false;
	evict_mem.bus.io_reserved_count = 0;
671

672 673
	placement.num_placement = 0;
	placement.num_busy_placement = 0;
674 675
	bdev->driver->evict_flags(bo, &placement);
	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
676
				no_wait_gpu);
677
	if (ret) {
678
		if (ret != -ERESTARTSYS) {
J
Joe Perches 已提交
679 680
			pr_err("Failed to find memory space for buffer 0x%p eviction\n",
			       bo);
681 682
			ttm_bo_mem_space_debug(bo, &placement);
		}
683 684 685 686
		goto out;
	}

	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
687
				     no_wait_gpu);
688
	if (unlikely(ret)) {
689
		if (ret != -ERESTARTSYS)
J
Joe Perches 已提交
690
			pr_err("Buffer eviction failed\n");
691
		ttm_bo_mem_put(bo, &evict_mem);
692 693
		goto out;
	}
694 695 696 697 698
	bo->evicted = true;
out:
	return ret;
}

699 700 701 702 703 704 705 706 707 708 709 710 711 712
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
			      const struct ttm_place *place)
{
	/* Don't evict this BO if it's outside of the
	 * requested placement range
	 */
	if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
	    (place->lpfn && place->lpfn <= bo->mem.start))
		return false;

	return true;
}
EXPORT_SYMBOL(ttm_bo_eviction_valuable);

713
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
714 715 716 717 718
			       struct reservation_object *resv,
			       uint32_t mem_type,
			       const struct ttm_place *place,
			       bool interruptible,
			       bool no_wait_gpu)
719 720 721
{
	struct ttm_bo_global *glob = bdev->glob;
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
722 723
	struct ttm_buffer_object *bo = NULL;
	bool locked = false;
724
	unsigned i;
725
	int ret;
726

727
	spin_lock(&glob->lru_lock);
728 729
	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
		list_for_each_entry(bo, &man->lru[i], lru) {
730 731 732 733 734 735 736 737
			if (bo->resv == resv) {
				if (list_empty(&bo->ddestroy))
					continue;
			} else {
				locked = reservation_object_trylock(bo->resv);
				if (!locked)
					continue;
			}
738

739 740
			if (place && !bdev->driver->eviction_valuable(bo,
								      place)) {
741 742
				if (locked)
					reservation_object_unlock(bo->resv);
743 744 745
				continue;
			}
			break;
746
		}
747

748 749
		/* If the inner loop terminated early, we have our candidate */
		if (&bo->lru != &man->lru[i])
750
			break;
751 752

		bo = NULL;
753 754
	}

755
	if (!bo) {
756
		spin_unlock(&glob->lru_lock);
757
		return -EBUSY;
758 759
	}

760
	kref_get(&bo->list_kref);
761

762
	if (!list_empty(&bo->ddestroy)) {
763 764
		ret = ttm_bo_cleanup_refs(bo, interruptible, no_wait_gpu,
					  locked);
765
		kref_put(&bo->list_kref, ttm_bo_release_list);
766
		return ret;
767 768
	}

769
	ttm_bo_del_from_lru(bo);
770
	spin_unlock(&glob->lru_lock);
771

772
	ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
773
	if (locked) {
774
		ttm_bo_unreserve(bo);
775 776
	} else {
		spin_lock(&glob->lru_lock);
777
		ttm_bo_add_to_lru(bo);
778 779
		spin_unlock(&glob->lru_lock);
	}
780

781
	kref_put(&bo->list_kref, ttm_bo_release_list);
782 783 784
	return ret;
}

785 786
void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
{
787
	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
788

789 790
	if (mem->mm_node)
		(*man->func->put_node)(man, mem);
791 792 793
}
EXPORT_SYMBOL(ttm_bo_mem_put);

794 795 796 797 798 799 800
/**
 * Add the last move fence to the BO and reserve a new shared slot.
 */
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
				 struct ttm_mem_type_manager *man,
				 struct ttm_mem_reg *mem)
{
801
	struct dma_fence *fence;
802 803 804
	int ret;

	spin_lock(&man->move_lock);
805
	fence = dma_fence_get(man->move);
806 807 808 809 810 811 812 813 814
	spin_unlock(&man->move_lock);

	if (fence) {
		reservation_object_add_shared_fence(bo->resv, fence);

		ret = reservation_object_reserve_shared(bo->resv);
		if (unlikely(ret))
			return ret;

815
		dma_fence_put(bo->moving);
816 817 818 819 820 821
		bo->moving = fence;
	}

	return 0;
}

822 823 824 825
/**
 * Repeatedly evict memory from the LRU for @mem_type until we create enough
 * space, or we've evicted everything and there isn't enough space.
 */
826 827
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
					uint32_t mem_type,
828
					const struct ttm_place *place,
829
					struct ttm_mem_reg *mem,
830 831
					bool interruptible,
					bool no_wait_gpu)
832
{
833
	struct ttm_bo_device *bdev = bo->bdev;
834 835 836 837
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
	int ret;

	do {
838
		ret = (*man->func->get_node)(man, bo, place, mem);
839 840
		if (unlikely(ret != 0))
			return ret;
841
		if (mem->mm_node)
842
			break;
843
		ret = ttm_mem_evict_first(bdev, bo->resv, mem_type, place,
844
					  interruptible, no_wait_gpu);
845 846 847 848
		if (unlikely(ret != 0))
			return ret;
	} while (1);
	mem->mem_type = mem_type;
849
	return ttm_bo_add_move_fence(bo, man, mem);
850 851
}

852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
				      uint32_t cur_placement,
				      uint32_t proposed_placement)
{
	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;

	/**
	 * Keep current caching if possible.
	 */

	if ((cur_placement & caching) != 0)
		result |= (cur_placement & caching);
	else if ((man->default_caching & caching) != 0)
		result |= man->default_caching;
	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
		result |= TTM_PL_FLAG_CACHED;
	else if ((TTM_PL_FLAG_WC & caching) != 0)
		result |= TTM_PL_FLAG_WC;
	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
		result |= TTM_PL_FLAG_UNCACHED;

	return result;
}

877 878
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
				 uint32_t mem_type,
879
				 const struct ttm_place *place,
880
				 uint32_t *masked_placement)
881 882 883
{
	uint32_t cur_flags = ttm_bo_type_flags(mem_type);

884
	if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
885 886
		return false;

887
	if ((place->flags & man->available_caching) == 0)
888 889
		return false;

890
	cur_flags |= (place->flags & man->available_caching);
891 892

	*masked_placement = cur_flags;
893 894 895 896 897 898 899 900 901 902 903 904
	return true;
}

/**
 * Creates space for memory region @mem according to its type.
 *
 * This function first searches for free space in compatible memory types in
 * the priority order defined by the driver.  If free space isn't found, then
 * ttm_bo_mem_force_space is attempted in priority order to evict and find
 * space.
 */
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
905 906
			struct ttm_placement *placement,
			struct ttm_mem_reg *mem,
907
			bool interruptible,
908
			bool no_wait_gpu)
909 910 911 912 913 914 915
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man;
	uint32_t mem_type = TTM_PL_SYSTEM;
	uint32_t cur_flags = 0;
	bool type_found = false;
	bool type_ok = false;
916
	bool has_erestartsys = false;
917
	int i, ret;
918

919 920 921 922
	ret = reservation_object_reserve_shared(bo->resv);
	if (unlikely(ret))
		return ret;

923
	mem->mm_node = NULL;
924
	for (i = 0; i < placement->num_placement; ++i) {
925 926 927
		const struct ttm_place *place = &placement->placement[i];

		ret = ttm_mem_type_from_place(place, &mem_type);
928 929
		if (ret)
			return ret;
930
		man = &bdev->man[mem_type];
931 932
		if (!man->has_type || !man->use_type)
			continue;
933

934
		type_ok = ttm_bo_mt_compatible(man, mem_type, place,
935
						&cur_flags);
936 937 938 939

		if (!type_ok)
			continue;

940
		type_found = true;
941 942
		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
						  cur_flags);
943 944 945 946
		/*
		 * Use the access and other non-mapping-related flag bits from
		 * the memory placement flags to the current flags
		 */
947
		ttm_flag_masked(&cur_flags, place->flags,
948
				~TTM_PL_MASK_MEMTYPE);
949

950 951 952
		if (mem_type == TTM_PL_SYSTEM)
			break;

953 954 955
		ret = (*man->func->get_node)(man, bo, place, mem);
		if (unlikely(ret))
			return ret;
956 957 958 959 960 961 962

		if (mem->mm_node) {
			ret = ttm_bo_add_move_fence(bo, man, mem);
			if (unlikely(ret)) {
				(*man->func->put_node)(man, mem);
				return ret;
			}
963
			break;
964
		}
965 966
	}

967
	if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
968 969 970 971 972
		mem->mem_type = mem_type;
		mem->placement = cur_flags;
		return 0;
	}

973
	for (i = 0; i < placement->num_busy_placement; ++i) {
974 975 976
		const struct ttm_place *place = &placement->busy_placement[i];

		ret = ttm_mem_type_from_place(place, &mem_type);
977 978
		if (ret)
			return ret;
979
		man = &bdev->man[mem_type];
980
		if (!man->has_type || !man->use_type)
981
			continue;
982
		if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
983 984
			continue;

985
		type_found = true;
986 987
		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
						  cur_flags);
988 989 990 991
		/*
		 * Use the access and other non-mapping-related flag bits from
		 * the memory placement flags to the current flags
		 */
992
		ttm_flag_masked(&cur_flags, place->flags,
993
				~TTM_PL_MASK_MEMTYPE);
994

995 996 997 998 999 1000 1001
		if (mem_type == TTM_PL_SYSTEM) {
			mem->mem_type = mem_type;
			mem->placement = cur_flags;
			mem->mm_node = NULL;
			return 0;
		}

1002
		ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
1003
						interruptible, no_wait_gpu);
1004 1005 1006 1007
		if (ret == 0 && mem->mm_node) {
			mem->placement = cur_flags;
			return 0;
		}
1008 1009
		if (ret == -ERESTARTSYS)
			has_erestartsys = true;
1010
	}
1011 1012

	if (!type_found) {
1013
		pr_err(TTM_PFX "No compatible memory type found\n");
1014 1015 1016 1017
		return -EINVAL;
	}

	return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
1018 1019 1020
}
EXPORT_SYMBOL(ttm_bo_mem_space);

1021
static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1022
			struct ttm_placement *placement,
1023
			bool interruptible,
1024
			bool no_wait_gpu)
1025 1026 1027 1028
{
	int ret = 0;
	struct ttm_mem_reg mem;

1029
	lockdep_assert_held(&bo->resv->lock.base);
1030 1031 1032 1033

	mem.num_pages = bo->num_pages;
	mem.size = mem.num_pages << PAGE_SHIFT;
	mem.page_alignment = bo->mem.page_alignment;
1034 1035
	mem.bus.io_reserved_vm = false;
	mem.bus.io_reserved_count = 0;
1036 1037 1038
	/*
	 * Determine where to move the buffer.
	 */
1039 1040
	ret = ttm_bo_mem_space(bo, placement, &mem,
			       interruptible, no_wait_gpu);
1041 1042
	if (ret)
		goto out_unlock;
1043 1044
	ret = ttm_bo_handle_move_mem(bo, &mem, false,
				     interruptible, no_wait_gpu);
1045
out_unlock:
1046 1047
	if (ret && mem.mm_node)
		ttm_bo_mem_put(bo, &mem);
1048 1049 1050
	return ret;
}

1051 1052 1053 1054
static bool ttm_bo_places_compat(const struct ttm_place *places,
				 unsigned num_placement,
				 struct ttm_mem_reg *mem,
				 uint32_t *new_flags)
1055
{
1056
	unsigned i;
1057

1058 1059
	for (i = 0; i < num_placement; i++) {
		const struct ttm_place *heap = &places[i];
1060

1061
		if (mem->mm_node && (mem->start < heap->fpfn ||
1062
		     (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1063 1064 1065
			continue;

		*new_flags = heap->flags;
1066
		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1067 1068 1069
		    (*new_flags & mem->placement & TTM_PL_MASK_MEM) &&
		    (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
		     (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
1070
			return true;
1071
	}
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
	return false;
}

bool ttm_bo_mem_compat(struct ttm_placement *placement,
		       struct ttm_mem_reg *mem,
		       uint32_t *new_flags)
{
	if (ttm_bo_places_compat(placement->placement, placement->num_placement,
				 mem, new_flags))
		return true;

	if ((placement->busy_placement != placement->placement ||
	     placement->num_busy_placement > placement->num_placement) &&
	    ttm_bo_places_compat(placement->busy_placement,
				 placement->num_busy_placement,
				 mem, new_flags))
		return true;
1089 1090

	return false;
1091
}
1092
EXPORT_SYMBOL(ttm_bo_mem_compat);
1093

1094 1095
int ttm_bo_validate(struct ttm_buffer_object *bo,
			struct ttm_placement *placement,
1096
			bool interruptible,
1097
			bool no_wait_gpu)
1098 1099
{
	int ret;
1100
	uint32_t new_flags;
1101

1102
	lockdep_assert_held(&bo->resv->lock.base);
1103 1104 1105
	/*
	 * Check whether we need to move buffer.
	 */
1106
	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1107 1108
		ret = ttm_bo_move_buffer(bo, placement, interruptible,
					 no_wait_gpu);
1109
		if (ret)
1110
			return ret;
1111 1112 1113 1114 1115
	} else {
		/*
		 * Use the access and other non-mapping-related flag bits from
		 * the compatible memory placement flags to the active flags
		 */
1116
		ttm_flag_masked(&bo->mem.placement, new_flags,
1117
				~TTM_PL_MASK_MEMTYPE);
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128
	}
	/*
	 * We might need to add a TTM.
	 */
	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
		ret = ttm_bo_add_ttm(bo, true);
		if (ret)
			return ret;
	}
	return 0;
}
1129
EXPORT_SYMBOL(ttm_bo_validate);
1130

1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
			 struct ttm_buffer_object *bo,
			 unsigned long size,
			 enum ttm_bo_type type,
			 struct ttm_placement *placement,
			 uint32_t page_alignment,
			 bool interruptible,
			 struct file *persistent_swap_storage,
			 size_t acc_size,
			 struct sg_table *sg,
			 struct reservation_object *resv,
			 void (*destroy) (struct ttm_buffer_object *))
1143
{
1144
	int ret = 0;
1145
	unsigned long num_pages;
1146
	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1147
	bool locked;
1148 1149 1150

	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
	if (ret) {
J
Joe Perches 已提交
1151
		pr_err("Out of kernel memory\n");
1152 1153 1154 1155 1156 1157
		if (destroy)
			(*destroy)(bo);
		else
			kfree(bo);
		return -ENOMEM;
	}
1158 1159 1160

	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	if (num_pages == 0) {
J
Joe Perches 已提交
1161
		pr_err("Illegal buffer object size\n");
1162 1163 1164 1165
		if (destroy)
			(*destroy)(bo);
		else
			kfree(bo);
1166
		ttm_mem_global_free(mem_glob, acc_size);
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176
		return -EINVAL;
	}
	bo->destroy = destroy;

	kref_init(&bo->kref);
	kref_init(&bo->list_kref);
	atomic_set(&bo->cpu_writers, 0);
	INIT_LIST_HEAD(&bo->lru);
	INIT_LIST_HEAD(&bo->ddestroy);
	INIT_LIST_HEAD(&bo->swap);
1177
	INIT_LIST_HEAD(&bo->io_reserve_lru);
1178
	mutex_init(&bo->wu_mutex);
1179
	bo->bdev = bdev;
1180
	bo->glob = bdev->glob;
1181 1182
	bo->type = type;
	bo->num_pages = num_pages;
1183
	bo->mem.size = num_pages << PAGE_SHIFT;
1184 1185 1186 1187
	bo->mem.mem_type = TTM_PL_SYSTEM;
	bo->mem.num_pages = bo->num_pages;
	bo->mem.mm_node = NULL;
	bo->mem.page_alignment = page_alignment;
1188 1189
	bo->mem.bus.io_reserved_vm = false;
	bo->mem.bus.io_reserved_count = 0;
1190
	bo->moving = NULL;
1191
	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
J
Jan Engelhardt 已提交
1192
	bo->persistent_swap_storage = persistent_swap_storage;
1193
	bo->acc_size = acc_size;
1194
	bo->sg = sg;
1195 1196 1197 1198 1199 1200
	if (resv) {
		bo->resv = resv;
		lockdep_assert_held(&bo->resv->lock.base);
	} else {
		bo->resv = &bo->ttm_resv;
	}
1201
	reservation_object_init(&bo->ttm_resv);
1202
	atomic_inc(&bo->glob->bo_count);
1203
	drm_vma_node_reset(&bo->vma_node);
1204
	bo->priority = 0;
1205 1206 1207 1208 1209

	/*
	 * For ttm_bo_type_device buffers, allocate
	 * address space from the device.
	 */
1210 1211
	if (bo->type == ttm_bo_type_device ||
	    bo->type == ttm_bo_type_sg)
1212 1213
		ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
					 bo->mem.num_pages);
1214

1215 1216 1217 1218 1219 1220 1221
	/* passed reservation objects should already be locked,
	 * since otherwise lockdep will be angered in radeon.
	 */
	if (!resv) {
		locked = ww_mutex_trylock(&bo->resv->lock);
		WARN_ON(!locked);
	}
1222

1223 1224
	if (likely(!ret))
		ret = ttm_bo_validate(bo, placement, interruptible, false);
1225

1226
	if (unlikely(ret)) {
1227 1228 1229
		if (!resv)
			ttm_bo_unreserve(bo);

1230 1231 1232 1233 1234
		ttm_bo_unref(&bo);
		return ret;
	}

	if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
1235 1236 1237 1238 1239
		spin_lock(&bo->glob->lru_lock);
		ttm_bo_add_to_lru(bo);
		spin_unlock(&bo->glob->lru_lock);
	}

1240 1241
	return ret;
}
1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270
EXPORT_SYMBOL(ttm_bo_init_reserved);

int ttm_bo_init(struct ttm_bo_device *bdev,
		struct ttm_buffer_object *bo,
		unsigned long size,
		enum ttm_bo_type type,
		struct ttm_placement *placement,
		uint32_t page_alignment,
		bool interruptible,
		struct file *persistent_swap_storage,
		size_t acc_size,
		struct sg_table *sg,
		struct reservation_object *resv,
		void (*destroy) (struct ttm_buffer_object *))
{
	int ret;

	ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
				   page_alignment, interruptible,
				   persistent_swap_storage, acc_size,
				   sg, resv, destroy);
	if (ret)
		return ret;

	if (!resv)
		ttm_bo_unreserve(bo);

	return 0;
}
1271
EXPORT_SYMBOL(ttm_bo_init);
1272

1273 1274 1275
size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
		       unsigned long bo_size,
		       unsigned struct_size)
1276
{
1277 1278
	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
	size_t size = 0;
1279

1280
	size += ttm_round_pot(struct_size);
F
Felix Kuehling 已提交
1281
	size += ttm_round_pot(npages * sizeof(void *));
1282 1283
	size += ttm_round_pot(sizeof(struct ttm_tt));
	return size;
1284
}
1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
EXPORT_SYMBOL(ttm_bo_acc_size);

size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
			   unsigned long bo_size,
			   unsigned struct_size)
{
	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
	size_t size = 0;

	size += ttm_round_pot(struct_size);
F
Felix Kuehling 已提交
1295
	size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
1296 1297 1298 1299
	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
	return size;
}
EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1300

1301 1302 1303 1304 1305 1306
int ttm_bo_create(struct ttm_bo_device *bdev,
			unsigned long size,
			enum ttm_bo_type type,
			struct ttm_placement *placement,
			uint32_t page_alignment,
			bool interruptible,
J
Jan Engelhardt 已提交
1307
			struct file *persistent_swap_storage,
1308
			struct ttm_buffer_object **p_bo)
1309 1310
{
	struct ttm_buffer_object *bo;
1311
	size_t acc_size;
1312
	int ret;
1313 1314

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1315
	if (unlikely(bo == NULL))
1316 1317
		return -ENOMEM;

1318
	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1319
	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1320
			  interruptible, persistent_swap_storage, acc_size,
1321
			  NULL, NULL, NULL);
1322 1323 1324 1325 1326
	if (likely(ret == 0))
		*p_bo = bo;

	return ret;
}
T
Thomas Hellstrom 已提交
1327
EXPORT_SYMBOL(ttm_bo_create);
1328 1329

static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1330
				   unsigned mem_type)
1331
{
1332
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1333
	struct ttm_bo_global *glob = bdev->glob;
1334
	struct dma_fence *fence;
1335
	int ret;
1336
	unsigned i;
1337 1338 1339 1340 1341

	/*
	 * Can't use standard list traversal since we're unlocking.
	 */

1342
	spin_lock(&glob->lru_lock);
1343 1344 1345
	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
		while (!list_empty(&man->lru[i])) {
			spin_unlock(&glob->lru_lock);
1346 1347
			ret = ttm_mem_evict_first(bdev, NULL, mem_type, NULL,
						  false, false);
1348
			if (ret)
1349
				return ret;
1350
			spin_lock(&glob->lru_lock);
1351
		}
1352
	}
1353
	spin_unlock(&glob->lru_lock);
1354 1355

	spin_lock(&man->move_lock);
1356
	fence = dma_fence_get(man->move);
1357 1358 1359
	spin_unlock(&man->move_lock);

	if (fence) {
1360 1361
		ret = dma_fence_wait(fence, false);
		dma_fence_put(fence);
1362 1363
		if (ret)
			return ret;
1364 1365
	}

1366 1367 1368 1369 1370
	return 0;
}

int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
{
R
Roel Kluin 已提交
1371
	struct ttm_mem_type_manager *man;
1372 1373 1374
	int ret = -EINVAL;

	if (mem_type >= TTM_NUM_MEM_TYPES) {
J
Joe Perches 已提交
1375
		pr_err("Illegal memory type %d\n", mem_type);
1376 1377
		return ret;
	}
R
Roel Kluin 已提交
1378
	man = &bdev->man[mem_type];
1379 1380

	if (!man->has_type) {
J
Joe Perches 已提交
1381 1382
		pr_err("Trying to take down uninitialized memory manager type %u\n",
		       mem_type);
1383 1384 1385 1386 1387 1388 1389 1390
		return ret;
	}

	man->use_type = false;
	man->has_type = false;

	ret = 0;
	if (mem_type > 0) {
1391 1392 1393 1394 1395
		ret = ttm_bo_force_list_clean(bdev, mem_type);
		if (ret) {
			pr_err("Cleanup eviction failed\n");
			return ret;
		}
1396

1397
		ret = (*man->func->takedown)(man);
1398 1399
	}

1400 1401 1402
	dma_fence_put(man->move);
	man->move = NULL;

1403 1404 1405 1406 1407 1408 1409 1410 1411
	return ret;
}
EXPORT_SYMBOL(ttm_bo_clean_mm);

int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
{
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];

	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
J
Joe Perches 已提交
1412
		pr_err("Illegal memory manager memory type %u\n", mem_type);
1413 1414 1415 1416
		return -EINVAL;
	}

	if (!man->has_type) {
J
Joe Perches 已提交
1417
		pr_err("Memory type %u has not been initialized\n", mem_type);
1418 1419 1420
		return 0;
	}

1421
	return ttm_bo_force_list_clean(bdev, mem_type);
1422 1423 1424 1425
}
EXPORT_SYMBOL(ttm_bo_evict_mm);

int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1426
			unsigned long p_size)
1427
{
H
Huang Rui 已提交
1428
	int ret;
1429
	struct ttm_mem_type_manager *man;
1430
	unsigned i;
1431

1432
	BUG_ON(type >= TTM_NUM_MEM_TYPES);
1433
	man = &bdev->man[type];
1434
	BUG_ON(man->has_type);
1435 1436 1437
	man->io_reserve_fastpath = true;
	man->use_io_reserve_lru = false;
	mutex_init(&man->io_reserve_mutex);
1438
	spin_lock_init(&man->move_lock);
1439
	INIT_LIST_HEAD(&man->io_reserve_lru);
1440 1441 1442 1443

	ret = bdev->driver->init_mem_type(bdev, type, man);
	if (ret)
		return ret;
1444
	man->bdev = bdev;
1445 1446

	if (type != TTM_PL_SYSTEM) {
1447
		ret = (*man->func->init)(man, p_size);
1448 1449 1450 1451 1452 1453 1454
		if (ret)
			return ret;
	}
	man->has_type = true;
	man->use_type = true;
	man->size = p_size;

1455 1456
	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
		INIT_LIST_HEAD(&man->lru[i]);
1457
	man->move = NULL;
1458 1459 1460 1461 1462

	return 0;
}
EXPORT_SYMBOL(ttm_bo_init_mm);

1463 1464 1465 1466 1467 1468 1469 1470 1471 1472
static void ttm_bo_global_kobj_release(struct kobject *kobj)
{
	struct ttm_bo_global *glob =
		container_of(kobj, struct ttm_bo_global, kobj);

	ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
	__free_page(glob->dummy_read_page);
	kfree(glob);
}

1473
void ttm_bo_global_release(struct drm_global_reference *ref)
1474 1475 1476 1477 1478 1479 1480 1481
{
	struct ttm_bo_global *glob = ref->object;

	kobject_del(&glob->kobj);
	kobject_put(&glob->kobj);
}
EXPORT_SYMBOL(ttm_bo_global_release);

1482
int ttm_bo_global_init(struct drm_global_reference *ref)
1483 1484 1485 1486 1487
{
	struct ttm_bo_global_ref *bo_ref =
		container_of(ref, struct ttm_bo_global_ref, ref);
	struct ttm_bo_global *glob = ref->object;
	int ret;
1488
	unsigned i;
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499

	mutex_init(&glob->device_list_mutex);
	spin_lock_init(&glob->lru_lock);
	glob->mem_glob = bo_ref->mem_glob;
	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);

	if (unlikely(glob->dummy_read_page == NULL)) {
		ret = -ENOMEM;
		goto out_no_drp;
	}

1500 1501
	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
		INIT_LIST_HEAD(&glob->swap_lru[i]);
1502 1503 1504 1505 1506
	INIT_LIST_HEAD(&glob->device_list);

	ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
	ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
	if (unlikely(ret != 0)) {
J
Joe Perches 已提交
1507
		pr_err("Could not register buffer object swapout\n");
1508 1509 1510 1511 1512
		goto out_no_shrink;
	}

	atomic_set(&glob->bo_count, 0);

1513 1514
	ret = kobject_init_and_add(
		&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526
	if (unlikely(ret != 0))
		kobject_put(&glob->kobj);
	return ret;
out_no_shrink:
	__free_page(glob->dummy_read_page);
out_no_drp:
	kfree(glob);
	return ret;
}
EXPORT_SYMBOL(ttm_bo_global_init);


1527 1528 1529 1530 1531
int ttm_bo_device_release(struct ttm_bo_device *bdev)
{
	int ret = 0;
	unsigned i = TTM_NUM_MEM_TYPES;
	struct ttm_mem_type_manager *man;
1532
	struct ttm_bo_global *glob = bdev->glob;
1533 1534 1535 1536 1537 1538 1539

	while (i--) {
		man = &bdev->man[i];
		if (man->has_type) {
			man->use_type = false;
			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
				ret = -EBUSY;
J
Joe Perches 已提交
1540 1541
				pr_err("DRM memory manager type %d is not clean\n",
				       i);
1542 1543 1544 1545 1546
			}
			man->has_type = false;
		}
	}

1547 1548 1549 1550
	mutex_lock(&glob->device_list_mutex);
	list_del(&bdev->device_list);
	mutex_unlock(&glob->device_list_mutex);

1551
	cancel_delayed_work_sync(&bdev->wq);
1552

1553
	if (ttm_bo_delayed_delete(bdev, true))
1554 1555
		TTM_DEBUG("Delayed destroy list was clean\n");

1556
	spin_lock(&glob->lru_lock);
1557 1558 1559
	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
		if (list_empty(&bdev->man[0].lru[0]))
			TTM_DEBUG("Swap list %d was clean\n", i);
1560
	spin_unlock(&glob->lru_lock);
1561

1562
	drm_vma_offset_manager_destroy(&bdev->vma_manager);
1563 1564 1565 1566 1567 1568

	return ret;
}
EXPORT_SYMBOL(ttm_bo_device_release);

int ttm_bo_device_init(struct ttm_bo_device *bdev,
1569 1570
		       struct ttm_bo_global *glob,
		       struct ttm_bo_driver *driver,
1571
		       struct address_space *mapping,
D
Dave Airlie 已提交
1572
		       uint64_t file_page_offset,
D
Dave Airlie 已提交
1573
		       bool need_dma32)
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584
{
	int ret = -EINVAL;

	bdev->driver = driver;

	memset(bdev->man, 0, sizeof(bdev->man));

	/*
	 * Initialize the system memory buffer type.
	 * Other types need to be driver / IOCTL initialized.
	 */
1585
	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1586
	if (unlikely(ret != 0))
1587
		goto out_no_sys;
1588

1589 1590
	drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
				    0x10000000);
1591 1592
	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
	INIT_LIST_HEAD(&bdev->ddestroy);
1593
	bdev->dev_mapping = mapping;
1594
	bdev->glob = glob;
D
Dave Airlie 已提交
1595
	bdev->need_dma32 = need_dma32;
1596 1597 1598
	mutex_lock(&glob->device_list_mutex);
	list_add_tail(&bdev->device_list, &glob->device_list);
	mutex_unlock(&glob->device_list_mutex);
1599 1600

	return 0;
1601
out_no_sys:
1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626
	return ret;
}
EXPORT_SYMBOL(ttm_bo_device_init);

/*
 * buffer object vm functions.
 */

bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];

	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
		if (mem->mem_type == TTM_PL_SYSTEM)
			return false;

		if (man->flags & TTM_MEMTYPE_FLAG_CMA)
			return false;

		if (mem->placement & TTM_PL_FLAG_CACHED)
			return false;
	}
	return true;
}

1627
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1628 1629 1630
{
	struct ttm_bo_device *bdev = bo->bdev;

1631
	drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
1632
	ttm_mem_io_free_vm(bo);
1633
}
1634 1635 1636 1637 1638 1639 1640 1641 1642

void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];

	ttm_mem_io_lock(man, false);
	ttm_bo_unmap_virtual_locked(bo);
	ttm_mem_io_unlock(man);
1643
}
1644 1645


1646
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1647 1648

int ttm_bo_wait(struct ttm_buffer_object *bo,
1649
		bool interruptible, bool no_wait)
1650
{
C
Christian König 已提交
1651 1652 1653 1654 1655 1656 1657 1658
	long timeout = 15 * HZ;

	if (no_wait) {
		if (reservation_object_test_signaled_rcu(bo->resv, true))
			return 0;
		else
			return -EBUSY;
	}
1659

C
Christian König 已提交
1660 1661
	timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
						      interruptible, timeout);
1662 1663 1664 1665 1666 1667
	if (timeout < 0)
		return timeout;

	if (timeout == 0)
		return -EBUSY;

C
Christian König 已提交
1668
	reservation_object_add_excl_fence(bo->resv, NULL);
1669
	return 0;
1670 1671 1672 1673 1674 1675 1676 1677
}
EXPORT_SYMBOL(ttm_bo_wait);

int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
{
	int ret = 0;

	/*
1678
	 * Using ttm_bo_reserve makes sure the lru lists are updated.
1679 1680
	 */

1681
	ret = ttm_bo_reserve(bo, true, no_wait, NULL);
1682 1683
	if (unlikely(ret != 0))
		return ret;
1684
	ret = ttm_bo_wait(bo, true, no_wait);
1685 1686 1687 1688 1689
	if (likely(ret == 0))
		atomic_inc(&bo->cpu_writers);
	ttm_bo_unreserve(bo);
	return ret;
}
1690
EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1691 1692 1693

void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
{
1694
	atomic_dec(&bo->cpu_writers);
1695
}
1696
EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1697 1698 1699 1700 1701 1702 1703 1704

/**
 * A buffer object shrink method that tries to swap out the first
 * buffer object on the bo_global::swap_lru list.
 */

static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
{
1705 1706
	struct ttm_bo_global *glob =
	    container_of(shrink, struct ttm_bo_global, shrink);
1707 1708
	struct ttm_buffer_object *bo;
	int ret = -EBUSY;
1709
	unsigned i;
1710

1711
	spin_lock(&glob->lru_lock);
1712 1713
	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
		list_for_each_entry(bo, &glob->swap_lru[i], swap) {
1714
			ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
1715 1716 1717
			if (!ret)
				break;
		}
1718 1719 1720
		if (!ret)
			break;
	}
1721

1722 1723 1724 1725
	if (ret) {
		spin_unlock(&glob->lru_lock);
		return ret;
	}
1726

1727
	kref_get(&bo->list_kref);
1728

1729
	if (!list_empty(&bo->ddestroy)) {
1730
		ret = ttm_bo_cleanup_refs(bo, false, false, true);
1731 1732
		kref_put(&bo->list_kref, ttm_bo_release_list);
		return ret;
1733 1734
	}

1735
	ttm_bo_del_from_lru(bo);
1736
	spin_unlock(&glob->lru_lock);
1737 1738

	/**
1739
	 * Move to system cached
1740 1741
	 */

1742 1743
	if (bo->mem.mem_type != TTM_PL_SYSTEM ||
	    bo->ttm->caching_state != tt_cached) {
1744 1745 1746 1747 1748 1749 1750 1751
		struct ttm_mem_reg evict_mem;

		evict_mem = bo->mem;
		evict_mem.mm_node = NULL;
		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
		evict_mem.mem_type = TTM_PL_SYSTEM;

		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1752
					     false, false);
1753 1754 1755 1756
		if (unlikely(ret != 0))
			goto out;
	}

1757 1758 1759 1760 1761 1762 1763 1764
	/**
	 * Make sure BO is idle.
	 */

	ret = ttm_bo_wait(bo, false, false);
	if (unlikely(ret != 0))
		goto out;

1765 1766 1767 1768 1769 1770 1771
	ttm_bo_unmap_virtual(bo);

	/**
	 * Swap out. Buffer will be swapped in again as soon as
	 * anyone tries to access a ttm page.
	 */

1772 1773 1774
	if (bo->bdev->driver->swap_notify)
		bo->bdev->driver->swap_notify(bo);

J
Jan Engelhardt 已提交
1775
	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1776 1777 1778 1779 1780 1781 1782 1783
out:

	/**
	 *
	 * Unreserve without putting on LRU to avoid swapping out an
	 * already swapped buffer.
	 */

1784
	reservation_object_unlock(bo->resv);
1785 1786 1787 1788 1789 1790
	kref_put(&bo->list_kref, ttm_bo_release_list);
	return ret;
}

void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
{
1791
	while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1792 1793
		;
}
1794
EXPORT_SYMBOL(ttm_bo_swapout_all);
1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817

/**
 * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
 * unreserved
 *
 * @bo: Pointer to buffer
 */
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
{
	int ret;

	/*
	 * In the absense of a wait_unlocked API,
	 * Use the bo::wu_mutex to avoid triggering livelocks due to
	 * concurrent use of this function. Note that this use of
	 * bo::wu_mutex can go away if we change locking order to
	 * mmap_sem -> bo::reserve.
	 */
	ret = mutex_lock_interruptible(&bo->wu_mutex);
	if (unlikely(ret != 0))
		return -ERESTARTSYS;
	if (!ww_mutex_is_locked(&bo->resv->lock))
		goto out_unlock;
1818 1819 1820
	ret = reservation_object_lock_interruptible(bo->resv, NULL);
	if (ret == -EINTR)
		ret = -ERESTARTSYS;
1821 1822
	if (unlikely(ret != 0))
		goto out_unlock;
1823
	reservation_object_unlock(bo->resv);
1824 1825 1826 1827 1828

out_unlock:
	mutex_unlock(&bo->wu_mutex);
	return ret;
}