ttm_bo.c 44.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/**************************************************************************
 *
 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 */

J
Joe Perches 已提交
31 32
#define pr_fmt(fmt) "[TTM] " fmt

33 34 35
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
36 37 38 39 40 41
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/module.h>
A
Arun Sharma 已提交
42
#include <linux/atomic.h>
43
#include <linux/reservation.h>
44

45 46 47 48 49 50 51
static void ttm_bo_global_kobj_release(struct kobject *kobj);

static struct attribute ttm_bo_count = {
	.name = "bo_count",
	.mode = S_IRUGO
};

52 53 54 55 56 57
/* default destructor */
static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
{
	kfree(bo);
}

58 59
static inline int ttm_mem_type_from_place(const struct ttm_place *place,
					  uint32_t *mem_type)
60
{
61
	int pos;
62

63 64 65 66 67 68
	pos = ffs(place->flags & TTM_PL_MASK_MEM);
	if (unlikely(!pos))
		return -EINVAL;

	*mem_type = pos - 1;
	return 0;
69 70
}

71
static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
72
{
73
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
74
	struct drm_printer p = drm_debug_printer(TTM_PFX);
75

J
Joe Perches 已提交
76 77 78
	pr_err("    has_type: %d\n", man->has_type);
	pr_err("    use_type: %d\n", man->use_type);
	pr_err("    flags: 0x%08X\n", man->flags);
79
	pr_err("    gpu_offset: 0x%08llX\n", man->gpu_offset);
J
Joe Perches 已提交
80 81 82
	pr_err("    size: %llu\n", man->size);
	pr_err("    available_caching: 0x%08X\n", man->available_caching);
	pr_err("    default_caching: 0x%08X\n", man->default_caching);
83
	if (mem_type != TTM_PL_SYSTEM)
84
		(*man->func->debug)(man, &p);
85 86 87 88 89 90 91
}

static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
					struct ttm_placement *placement)
{
	int i, ret, mem_type;

J
Joe Perches 已提交
92 93 94
	pr_err("No space for %p (%lu pages, %luK, %luM)\n",
	       bo, bo->mem.num_pages, bo->mem.size >> 10,
	       bo->mem.size >> 20);
95
	for (i = 0; i < placement->num_placement; i++) {
96
		ret = ttm_mem_type_from_place(&placement->placement[i],
97 98 99
						&mem_type);
		if (ret)
			return;
J
Joe Perches 已提交
100
		pr_err("  placement[%d]=0x%08X (%d)\n",
101
		       i, placement->placement[i].flags, mem_type);
102
		ttm_mem_type_debug(bo->bdev, mem_type);
103 104 105
	}
}

106 107 108 109 110 111 112
static ssize_t ttm_bo_global_show(struct kobject *kobj,
				  struct attribute *attr,
				  char *buffer)
{
	struct ttm_bo_global *glob =
		container_of(kobj, struct ttm_bo_global, kobj);

113 114
	return snprintf(buffer, PAGE_SIZE, "%d\n",
				atomic_read(&glob->bo_count));
115 116 117 118 119 120 121
}

static struct attribute *ttm_bo_global_attrs[] = {
	&ttm_bo_count,
	NULL
};

122
static const struct sysfs_ops ttm_bo_global_ops = {
123 124 125 126 127 128 129 130 131
	.show = &ttm_bo_global_show
};

static struct kobj_type ttm_bo_glob_kobj_type  = {
	.release = &ttm_bo_global_kobj_release,
	.sysfs_ops = &ttm_bo_global_ops,
	.default_attrs = ttm_bo_global_attrs
};

132 133 134 135 136 137 138 139 140 141 142

static inline uint32_t ttm_bo_type_flags(unsigned type)
{
	return 1 << (type);
}

static void ttm_bo_release_list(struct kref *list_kref)
{
	struct ttm_buffer_object *bo =
	    container_of(list_kref, struct ttm_buffer_object, list_kref);
	struct ttm_bo_device *bdev = bo->bdev;
143
	size_t acc_size = bo->acc_size;
144

145 146
	BUG_ON(kref_read(&bo->list_kref));
	BUG_ON(kref_read(&bo->kref));
147 148 149 150
	BUG_ON(atomic_read(&bo->cpu_writers));
	BUG_ON(bo->mem.mm_node != NULL);
	BUG_ON(!list_empty(&bo->lru));
	BUG_ON(!list_empty(&bo->ddestroy));
151
	ttm_tt_destroy(bo->ttm);
C
Christian König 已提交
152
	atomic_dec(&bo->bdev->glob->bo_count);
153
	dma_fence_put(bo->moving);
154
	reservation_object_fini(&bo->ttm_resv);
155
	mutex_destroy(&bo->wu_mutex);
156
	bo->destroy(bo);
157
	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
158 159
}

160
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
161 162
{
	struct ttm_bo_device *bdev = bo->bdev;
163
	struct ttm_mem_type_manager *man;
164

165
	reservation_object_assert_held(bo->resv);
166 167 168 169

	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
		BUG_ON(!list_empty(&bo->lru));

170 171
		man = &bdev->man[bo->mem.mem_type];
		list_add_tail(&bo->lru, &man->lru[bo->priority]);
172 173
		kref_get(&bo->list_kref);

174 175
		if (bo->ttm && !(bo->ttm->page_flags &
				 (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
176
			list_add_tail(&bo->swap,
C
Christian König 已提交
177
				      &bdev->glob->swap_lru[bo->priority]);
178 179 180 181
			kref_get(&bo->list_kref);
		}
	}
}
182
EXPORT_SYMBOL(ttm_bo_add_to_lru);
183

184 185 186 187 188 189
static void ttm_bo_ref_bug(struct kref *list_kref)
{
	BUG();
}

void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
190 191 192
{
	if (!list_empty(&bo->swap)) {
		list_del_init(&bo->swap);
193
		kref_put(&bo->list_kref, ttm_bo_ref_bug);
194 195 196
	}
	if (!list_empty(&bo->lru)) {
		list_del_init(&bo->lru);
197
		kref_put(&bo->list_kref, ttm_bo_ref_bug);
198 199
	}

200 201 202 203
	/*
	 * TODO: Add a driver hook to delete from
	 * driver-specific LRU's here.
	 */
204 205
}

206
void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
207
{
C
Christian König 已提交
208 209 210
	struct ttm_bo_global *glob = bo->bdev->glob;

	spin_lock(&glob->lru_lock);
211
	ttm_bo_del_from_lru(bo);
C
Christian König 已提交
212
	spin_unlock(&glob->lru_lock);
213
}
214
EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
215

216 217
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
{
218
	reservation_object_assert_held(bo->resv);
219

220
	ttm_bo_del_from_lru(bo);
221
	ttm_bo_add_to_lru(bo);
222 223 224
}
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);

225 226 227 228 229 230 231 232 233
/*
 * Call bo->mutex locked.
 */
static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
{
	struct ttm_bo_device *bdev = bo->bdev;
	int ret = 0;
	uint32_t page_flags = 0;

234
	reservation_object_assert_held(bo->resv);
235 236
	bo->ttm = NULL;

D
Dave Airlie 已提交
237 238 239
	if (bdev->need_dma32)
		page_flags |= TTM_PAGE_FLAG_DMA32;

240 241 242
	if (bdev->no_retry)
		page_flags |= TTM_PAGE_FLAG_NO_RETRY;

243 244 245 246 247
	switch (bo->type) {
	case ttm_bo_type_device:
		if (zero_alloc)
			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
	case ttm_bo_type_kernel:
248
		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
249
						      page_flags);
250 251 252
		if (unlikely(bo->ttm == NULL))
			ret = -ENOMEM;
		break;
253 254
	case ttm_bo_type_sg:
		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
255
						      page_flags | TTM_PAGE_FLAG_SG);
256 257 258 259 260 261
		if (unlikely(bo->ttm == NULL)) {
			ret = -ENOMEM;
			break;
		}
		bo->ttm->sg = bo->sg;
		break;
262
	default:
J
Joe Perches 已提交
263
		pr_err("Illegal buffer object type\n");
264 265 266 267 268 269 270 271
		ret = -EINVAL;
		break;
	}

	return ret;
}

static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
272 273
				  struct ttm_mem_reg *mem, bool evict,
				  struct ttm_operation_ctx *ctx)
274 275 276 277 278 279 280 281 282
{
	struct ttm_bo_device *bdev = bo->bdev;
	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
	int ret = 0;

	if (old_is_pci || new_is_pci ||
283 284 285 286 287 288 289
	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
		ret = ttm_mem_io_lock(old_man, true);
		if (unlikely(ret != 0))
			goto out_err;
		ttm_bo_unmap_virtual_locked(bo);
		ttm_mem_io_unlock(old_man);
	}
290 291 292 293 294

	/*
	 * Create and bind a ttm if required.
	 */

295 296
	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
		if (bo->ttm == NULL) {
297 298
			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
			ret = ttm_bo_add_ttm(bo, zero);
299 300 301
			if (ret)
				goto out_err;
		}
302 303 304

		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
		if (ret)
305
			goto out_err;
306 307

		if (mem->mem_type != TTM_PL_SYSTEM) {
308
			ret = ttm_tt_bind(bo->ttm, mem, ctx);
309 310 311 312 313
			if (ret)
				goto out_err;
		}

		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
314
			if (bdev->driver->move_notify)
315
				bdev->driver->move_notify(bo, evict, mem);
316
			bo->mem = *mem;
317 318 319 320 321
			mem->mm_node = NULL;
			goto moved;
		}
	}

322
	if (bdev->driver->move_notify)
323
		bdev->driver->move_notify(bo, evict, mem);
324

325 326
	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
327
		ret = ttm_bo_move_ttm(bo, ctx, mem);
328
	else if (bdev->driver->move)
329
		ret = bdev->driver->move(bo, evict, ctx, mem);
330
	else
331
		ret = ttm_bo_move_memcpy(bo, ctx, mem);
332

333 334 335 336 337
	if (ret) {
		if (bdev->driver->move_notify) {
			struct ttm_mem_reg tmp_mem = *mem;
			*mem = bo->mem;
			bo->mem = tmp_mem;
338
			bdev->driver->move_notify(bo, false, mem);
339
			bo->mem = *mem;
340
			*mem = tmp_mem;
341
		}
342

343 344
		goto out_err;
	}
345

346 347
moved:
	if (bo->evicted) {
348 349 350 351 352
		if (bdev->driver->invalidate_caches) {
			ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
			if (ret)
				pr_err("Can not flush read caches\n");
		}
353 354 355
		bo->evicted = false;
	}

C
Christian König 已提交
356
	if (bo->mem.mm_node)
357
		bo->offset = (bo->mem.start << PAGE_SHIFT) +
358
		    bdev->man[bo->mem.mem_type].gpu_offset;
C
Christian König 已提交
359
	else
360
		bo->offset = 0;
361

362
	ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
363 364 365 366
	return 0;

out_err:
	new_man = &bdev->man[bo->mem.mem_type];
367
	if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
368 369 370 371 372 373 374
		ttm_tt_destroy(bo->ttm);
		bo->ttm = NULL;
	}

	return ret;
}

375
/**
376
 * Call bo::reserved.
377
 * Will release GPU memory type usage on destruction.
378 379 380
 * This is the place to put in driver specific hooks to release
 * driver private resources.
 * Will release the bo::reserved lock.
381 382 383 384
 */

static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
385
	if (bo->bdev->driver->move_notify)
386
		bo->bdev->driver->move_notify(bo, false, NULL);
387

388 389
	ttm_tt_destroy(bo->ttm);
	bo->ttm = NULL;
390
	ttm_bo_mem_put(bo, &bo->mem);
391 392
}

393 394 395 396 397 398 399
static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
{
	int r;

	if (bo->resv == &bo->ttm_resv)
		return 0;

400
	BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
401 402

	r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
403
	if (r)
404 405 406 407 408
		reservation_object_unlock(&bo->ttm_resv);

	return r;
}

409 410 411
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
{
	struct reservation_object_list *fobj;
412
	struct dma_fence *fence;
413 414
	int i;

415 416
	fobj = reservation_object_get_list(&bo->ttm_resv);
	fence = reservation_object_get_excl(&bo->ttm_resv);
417
	if (fence && !fence->ops->signaled)
418
		dma_fence_enable_sw_signaling(fence);
419 420 421 422 423 424

	for (i = 0; fobj && i < fobj->shared_count; ++i) {
		fence = rcu_dereference_protected(fobj->shared[i],
					reservation_object_held(bo->resv));

		if (!fence->ops->signaled)
425
			dma_fence_enable_sw_signaling(fence);
426 427 428
	}
}

429
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
430 431
{
	struct ttm_bo_device *bdev = bo->bdev;
C
Christian König 已提交
432
	struct ttm_bo_global *glob = bdev->glob;
433 434
	int ret;

435 436 437 438 439 440 441 442 443 444 445
	ret = ttm_bo_individualize_resv(bo);
	if (ret) {
		/* Last resort, if we fail to allocate memory for the
		 * fences block for the BO to become idle
		 */
		reservation_object_wait_timeout_rcu(bo->resv, true, false,
						    30 * HZ);
		spin_lock(&glob->lru_lock);
		goto error;
	}

446
	spin_lock(&glob->lru_lock);
447
	ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
M
Maarten Lankhorst 已提交
448
	if (!ret) {
449
		if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
450
			ttm_bo_del_from_lru(bo);
M
Maarten Lankhorst 已提交
451
			spin_unlock(&glob->lru_lock);
452
			if (bo->resv != &bo->ttm_resv)
453
				reservation_object_unlock(&bo->ttm_resv);
454

M
Maarten Lankhorst 已提交
455
			ttm_bo_cleanup_memtype_use(bo);
456
			reservation_object_unlock(bo->resv);
M
Maarten Lankhorst 已提交
457
			return;
458 459 460
		}

		ttm_bo_flush_all_fences(bo);
461 462 463 464 465 466 467 468 469 470 471

		/*
		 * Make NO_EVICT bos immediately available to
		 * shrinkers, now that they are queued for
		 * destruction.
		 */
		if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
			bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
			ttm_bo_add_to_lru(bo);
		}

472
		reservation_object_unlock(bo->resv);
473
	}
474 475
	if (bo->resv != &bo->ttm_resv)
		reservation_object_unlock(&bo->ttm_resv);
476

477
error:
478 479 480 481 482 483 484 485 486
	kref_get(&bo->list_kref);
	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
	spin_unlock(&glob->lru_lock);

	schedule_delayed_work(&bdev->wq,
			      ((HZ / 100) < 1) ? 1 : HZ / 100);
}

/**
487
 * function ttm_bo_cleanup_refs
488 489 490
 * If bo idle, remove from delayed- and lru lists, and unref.
 * If not idle, do nothing.
 *
491
 * Must be called with lru_lock and reservation held, this function
492
 * will drop the lru lock and optionally the reservation lock before returning.
493
 *
494 495
 * @interruptible         Any sleeps should occur interruptibly.
 * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
496
 * @unlock_resv           Unlock the reservation lock as well.
497 498
 */

499 500 501
static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
			       bool interruptible, bool no_wait_gpu,
			       bool unlock_resv)
502
{
C
Christian König 已提交
503
	struct ttm_bo_global *glob = bo->bdev->glob;
504
	struct reservation_object *resv;
505
	int ret;
506

507 508 509 510 511 512 513 514 515
	if (unlikely(list_empty(&bo->ddestroy)))
		resv = bo->resv;
	else
		resv = &bo->ttm_resv;

	if (reservation_object_test_signaled_rcu(resv, true))
		ret = 0;
	else
		ret = -EBUSY;
516

517
	if (ret && !no_wait_gpu) {
M
Maarten Lankhorst 已提交
518
		long lret;
519

520 521
		if (unlock_resv)
			reservation_object_unlock(bo->resv);
M
Maarten Lankhorst 已提交
522 523
		spin_unlock(&glob->lru_lock);

524
		lret = reservation_object_wait_timeout_rcu(resv, true,
M
Maarten Lankhorst 已提交
525 526 527 528 529 530 531
							   interruptible,
							   30 * HZ);

		if (lret < 0)
			return lret;
		else if (lret == 0)
			return -EBUSY;
532

533
		spin_lock(&glob->lru_lock);
534 535 536 537 538 539 540 541 542
		if (unlock_resv && !reservation_object_trylock(bo->resv)) {
			/*
			 * We raced, and lost, someone else holds the reservation now,
			 * and is probably busy in ttm_bo_cleanup_memtype_use.
			 *
			 * Even if it's not the case, because we finished waiting any
			 * delayed destruction would succeed, so just return success
			 * here.
			 */
543 544 545
			spin_unlock(&glob->lru_lock);
			return 0;
		}
546
		ret = 0;
547
	}
548

549
	if (ret || unlikely(list_empty(&bo->ddestroy))) {
550 551
		if (unlock_resv)
			reservation_object_unlock(bo->resv);
552
		spin_unlock(&glob->lru_lock);
553
		return ret;
554 555
	}

556
	ttm_bo_del_from_lru(bo);
557
	list_del_init(&bo->ddestroy);
558
	kref_put(&bo->list_kref, ttm_bo_ref_bug);
559 560 561

	spin_unlock(&glob->lru_lock);
	ttm_bo_cleanup_memtype_use(bo);
562 563 564

	if (unlock_resv)
		reservation_object_unlock(bo->resv);
565 566

	return 0;
567 568 569 570 571 572
}

/**
 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
 * encountered buffers.
 */
573
static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
574
{
575
	struct ttm_bo_global *glob = bdev->glob;
576 577
	struct list_head removed;
	bool empty;
578

579
	INIT_LIST_HEAD(&removed);
580

581 582 583
	spin_lock(&glob->lru_lock);
	while (!list_empty(&bdev->ddestroy)) {
		struct ttm_buffer_object *bo;
584

585 586 587 588
		bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
				      ddestroy);
		kref_get(&bo->list_kref);
		list_move_tail(&bo->ddestroy, &removed);
589

590 591 592
		if (remove_all || bo->resv != &bo->ttm_resv) {
			spin_unlock(&glob->lru_lock);
			reservation_object_lock(bo->resv, NULL);
593

594 595 596 597 598
			spin_lock(&glob->lru_lock);
			ttm_bo_cleanup_refs(bo, false, !remove_all, true);

		} else if (reservation_object_trylock(bo->resv)) {
			ttm_bo_cleanup_refs(bo, false, !remove_all, true);
599 600
		} else {
			spin_unlock(&glob->lru_lock);
601
		}
602

603
		kref_put(&bo->list_kref, ttm_bo_release_list);
604
		spin_lock(&glob->lru_lock);
605
	}
606 607
	list_splice_tail(&removed, &bdev->ddestroy);
	empty = list_empty(&bdev->ddestroy);
608
	spin_unlock(&glob->lru_lock);
609 610

	return empty;
611 612 613 614 615 616 617
}

static void ttm_bo_delayed_workqueue(struct work_struct *work)
{
	struct ttm_bo_device *bdev =
	    container_of(work, struct ttm_bo_device, wq.work);

618
	if (!ttm_bo_delayed_delete(bdev, false))
619 620 621 622 623 624 625 626 627
		schedule_delayed_work(&bdev->wq,
				      ((HZ / 100) < 1) ? 1 : HZ / 100);
}

static void ttm_bo_release(struct kref *kref)
{
	struct ttm_buffer_object *bo =
	    container_of(kref, struct ttm_buffer_object, kref);
	struct ttm_bo_device *bdev = bo->bdev;
628
	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
629

630
	drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
631 632 633
	ttm_mem_io_lock(man, false);
	ttm_mem_io_free_vm(bo);
	ttm_mem_io_unlock(man);
634
	ttm_bo_cleanup_refs_or_queue(bo);
635 636 637 638 639 640 641 642 643 644 645 646
	kref_put(&bo->list_kref, ttm_bo_release_list);
}

void ttm_bo_unref(struct ttm_buffer_object **p_bo)
{
	struct ttm_buffer_object *bo = *p_bo;

	*p_bo = NULL;
	kref_put(&bo->kref, ttm_bo_release);
}
EXPORT_SYMBOL(ttm_bo_unref);

647 648 649 650 651 652 653 654 655 656 657 658 659 660
int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
{
	return cancel_delayed_work_sync(&bdev->wq);
}
EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);

void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
{
	if (resched)
		schedule_delayed_work(&bdev->wq,
				      ((HZ / 100) < 1) ? 1 : HZ / 100);
}
EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);

661 662
static int ttm_bo_evict(struct ttm_buffer_object *bo,
			struct ttm_operation_ctx *ctx)
663 664 665
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_reg evict_mem;
666 667
	struct ttm_placement placement;
	int ret = 0;
668

669
	reservation_object_assert_held(bo->resv);
670 671 672

	evict_mem = bo->mem;
	evict_mem.mm_node = NULL;
673 674
	evict_mem.bus.io_reserved_vm = false;
	evict_mem.bus.io_reserved_count = 0;
675

676 677
	placement.num_placement = 0;
	placement.num_busy_placement = 0;
678
	bdev->driver->evict_flags(bo, &placement);
679
	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
680
	if (ret) {
681
		if (ret != -ERESTARTSYS) {
J
Joe Perches 已提交
682 683
			pr_err("Failed to find memory space for buffer 0x%p eviction\n",
			       bo);
684 685
			ttm_bo_mem_space_debug(bo, &placement);
		}
686 687 688
		goto out;
	}

689
	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx);
690
	if (unlikely(ret)) {
691
		if (ret != -ERESTARTSYS)
J
Joe Perches 已提交
692
			pr_err("Buffer eviction failed\n");
693
		ttm_bo_mem_put(bo, &evict_mem);
694 695
		goto out;
	}
696 697 698 699 700
	bo->evicted = true;
out:
	return ret;
}

701 702 703 704 705 706 707 708 709 710 711 712 713 714
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
			      const struct ttm_place *place)
{
	/* Don't evict this BO if it's outside of the
	 * requested placement range
	 */
	if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
	    (place->lpfn && place->lpfn <= bo->mem.start))
		return false;

	return true;
}
EXPORT_SYMBOL(ttm_bo_eviction_valuable);

715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732
/**
 * Check the target bo is allowable to be evicted or swapout, including cases:
 *
 * a. if share same reservation object with ctx->resv, have assumption
 * reservation objects should already be locked, so not lock again and
 * return true directly when either the opreation allow_reserved_eviction
 * or the target bo already is in delayed free list;
 *
 * b. Otherwise, trylock it.
 */
static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
			struct ttm_operation_ctx *ctx, bool *locked)
{
	bool ret = false;

	*locked = false;
	if (bo->resv == ctx->resv) {
		reservation_object_assert_held(bo->resv);
733 734
		if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
		    || !list_empty(&bo->ddestroy))
735 736 737 738 739 740 741 742 743
			ret = true;
	} else {
		*locked = reservation_object_trylock(bo->resv);
		ret = *locked;
	}

	return ret;
}

744
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
745 746
			       uint32_t mem_type,
			       const struct ttm_place *place,
747
			       struct ttm_operation_ctx *ctx)
748 749 750
{
	struct ttm_bo_global *glob = bdev->glob;
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
751 752
	struct ttm_buffer_object *bo = NULL;
	bool locked = false;
753
	unsigned i;
754
	int ret;
755

756
	spin_lock(&glob->lru_lock);
757 758
	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
		list_for_each_entry(bo, &man->lru[i], lru) {
759 760
			if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked))
				continue;
761

762 763
			if (place && !bdev->driver->eviction_valuable(bo,
								      place)) {
764 765
				if (locked)
					reservation_object_unlock(bo->resv);
766 767 768
				continue;
			}
			break;
769
		}
770

771 772
		/* If the inner loop terminated early, we have our candidate */
		if (&bo->lru != &man->lru[i])
773
			break;
774 775

		bo = NULL;
776 777
	}

778
	if (!bo) {
779
		spin_unlock(&glob->lru_lock);
780
		return -EBUSY;
781 782
	}

783
	kref_get(&bo->list_kref);
784

785
	if (!list_empty(&bo->ddestroy)) {
786 787
		ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
					  ctx->no_wait_gpu, locked);
788
		kref_put(&bo->list_kref, ttm_bo_release_list);
789
		return ret;
790 791
	}

792
	ttm_bo_del_from_lru(bo);
793
	spin_unlock(&glob->lru_lock);
794

795
	ret = ttm_bo_evict(bo, ctx);
796
	if (locked) {
797
		ttm_bo_unreserve(bo);
798 799
	} else {
		spin_lock(&glob->lru_lock);
800
		ttm_bo_add_to_lru(bo);
801 802
		spin_unlock(&glob->lru_lock);
	}
803

804
	kref_put(&bo->list_kref, ttm_bo_release_list);
805 806 807
	return ret;
}

808 809
void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
{
810
	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
811

812 813
	if (mem->mm_node)
		(*man->func->put_node)(man, mem);
814 815 816
}
EXPORT_SYMBOL(ttm_bo_mem_put);

817 818 819 820 821 822 823
/**
 * Add the last move fence to the BO and reserve a new shared slot.
 */
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
				 struct ttm_mem_type_manager *man,
				 struct ttm_mem_reg *mem)
{
824
	struct dma_fence *fence;
825 826 827
	int ret;

	spin_lock(&man->move_lock);
828
	fence = dma_fence_get(man->move);
829 830 831 832 833 834 835 836 837
	spin_unlock(&man->move_lock);

	if (fence) {
		reservation_object_add_shared_fence(bo->resv, fence);

		ret = reservation_object_reserve_shared(bo->resv);
		if (unlikely(ret))
			return ret;

838
		dma_fence_put(bo->moving);
839 840 841 842 843 844
		bo->moving = fence;
	}

	return 0;
}

845 846 847 848
/**
 * Repeatedly evict memory from the LRU for @mem_type until we create enough
 * space, or we've evicted everything and there isn't enough space.
 */
849 850
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
					uint32_t mem_type,
851
					const struct ttm_place *place,
852
					struct ttm_mem_reg *mem,
853
					struct ttm_operation_ctx *ctx)
854
{
855
	struct ttm_bo_device *bdev = bo->bdev;
856 857 858 859
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
	int ret;

	do {
860
		ret = (*man->func->get_node)(man, bo, place, mem);
861 862
		if (unlikely(ret != 0))
			return ret;
863
		if (mem->mm_node)
864
			break;
R
Roger He 已提交
865
		ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
866 867 868 869
		if (unlikely(ret != 0))
			return ret;
	} while (1);
	mem->mem_type = mem_type;
870
	return ttm_bo_add_move_fence(bo, man, mem);
871 872
}

873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
				      uint32_t cur_placement,
				      uint32_t proposed_placement)
{
	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;

	/**
	 * Keep current caching if possible.
	 */

	if ((cur_placement & caching) != 0)
		result |= (cur_placement & caching);
	else if ((man->default_caching & caching) != 0)
		result |= man->default_caching;
	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
		result |= TTM_PL_FLAG_CACHED;
	else if ((TTM_PL_FLAG_WC & caching) != 0)
		result |= TTM_PL_FLAG_WC;
	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
		result |= TTM_PL_FLAG_UNCACHED;

	return result;
}

898 899
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
				 uint32_t mem_type,
900
				 const struct ttm_place *place,
901
				 uint32_t *masked_placement)
902 903 904
{
	uint32_t cur_flags = ttm_bo_type_flags(mem_type);

905
	if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
906 907
		return false;

908
	if ((place->flags & man->available_caching) == 0)
909 910
		return false;

911
	cur_flags |= (place->flags & man->available_caching);
912 913

	*masked_placement = cur_flags;
914 915 916 917 918 919 920 921 922 923 924 925
	return true;
}

/**
 * Creates space for memory region @mem according to its type.
 *
 * This function first searches for free space in compatible memory types in
 * the priority order defined by the driver.  If free space isn't found, then
 * ttm_bo_mem_force_space is attempted in priority order to evict and find
 * space.
 */
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
926 927
			struct ttm_placement *placement,
			struct ttm_mem_reg *mem,
928
			struct ttm_operation_ctx *ctx)
929 930 931 932 933 934 935
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man;
	uint32_t mem_type = TTM_PL_SYSTEM;
	uint32_t cur_flags = 0;
	bool type_found = false;
	bool type_ok = false;
936
	bool has_erestartsys = false;
937
	int i, ret;
938

939 940 941 942
	ret = reservation_object_reserve_shared(bo->resv);
	if (unlikely(ret))
		return ret;

943
	mem->mm_node = NULL;
944
	for (i = 0; i < placement->num_placement; ++i) {
945 946 947
		const struct ttm_place *place = &placement->placement[i];

		ret = ttm_mem_type_from_place(place, &mem_type);
948 949
		if (ret)
			return ret;
950
		man = &bdev->man[mem_type];
951 952
		if (!man->has_type || !man->use_type)
			continue;
953

954
		type_ok = ttm_bo_mt_compatible(man, mem_type, place,
955
						&cur_flags);
956 957 958 959

		if (!type_ok)
			continue;

960
		type_found = true;
961 962
		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
						  cur_flags);
963 964 965 966
		/*
		 * Use the access and other non-mapping-related flag bits from
		 * the memory placement flags to the current flags
		 */
967
		ttm_flag_masked(&cur_flags, place->flags,
968
				~TTM_PL_MASK_MEMTYPE);
969

970 971 972
		if (mem_type == TTM_PL_SYSTEM)
			break;

973 974 975
		ret = (*man->func->get_node)(man, bo, place, mem);
		if (unlikely(ret))
			return ret;
976 977 978 979 980 981 982

		if (mem->mm_node) {
			ret = ttm_bo_add_move_fence(bo, man, mem);
			if (unlikely(ret)) {
				(*man->func->put_node)(man, mem);
				return ret;
			}
983
			break;
984
		}
985 986
	}

987
	if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
988 989 990 991 992
		mem->mem_type = mem_type;
		mem->placement = cur_flags;
		return 0;
	}

993
	for (i = 0; i < placement->num_busy_placement; ++i) {
994 995 996
		const struct ttm_place *place = &placement->busy_placement[i];

		ret = ttm_mem_type_from_place(place, &mem_type);
997 998
		if (ret)
			return ret;
999
		man = &bdev->man[mem_type];
1000
		if (!man->has_type || !man->use_type)
1001
			continue;
1002
		if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
1003 1004
			continue;

1005
		type_found = true;
1006 1007
		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
						  cur_flags);
1008 1009 1010 1011
		/*
		 * Use the access and other non-mapping-related flag bits from
		 * the memory placement flags to the current flags
		 */
1012
		ttm_flag_masked(&cur_flags, place->flags,
1013
				~TTM_PL_MASK_MEMTYPE);
1014

1015 1016 1017 1018 1019 1020 1021
		if (mem_type == TTM_PL_SYSTEM) {
			mem->mem_type = mem_type;
			mem->placement = cur_flags;
			mem->mm_node = NULL;
			return 0;
		}

1022
		ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, ctx);
1023 1024 1025 1026
		if (ret == 0 && mem->mm_node) {
			mem->placement = cur_flags;
			return 0;
		}
1027 1028
		if (ret == -ERESTARTSYS)
			has_erestartsys = true;
1029
	}
1030 1031

	if (!type_found) {
1032
		pr_err(TTM_PFX "No compatible memory type found\n");
1033 1034 1035 1036
		return -EINVAL;
	}

	return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
1037 1038 1039
}
EXPORT_SYMBOL(ttm_bo_mem_space);

1040
static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1041 1042
			      struct ttm_placement *placement,
			      struct ttm_operation_ctx *ctx)
1043 1044 1045 1046
{
	int ret = 0;
	struct ttm_mem_reg mem;

1047
	reservation_object_assert_held(bo->resv);
1048 1049 1050 1051

	mem.num_pages = bo->num_pages;
	mem.size = mem.num_pages << PAGE_SHIFT;
	mem.page_alignment = bo->mem.page_alignment;
1052 1053
	mem.bus.io_reserved_vm = false;
	mem.bus.io_reserved_count = 0;
1054 1055 1056
	/*
	 * Determine where to move the buffer.
	 */
1057
	ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
1058 1059
	if (ret)
		goto out_unlock;
1060
	ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx);
1061
out_unlock:
1062 1063
	if (ret && mem.mm_node)
		ttm_bo_mem_put(bo, &mem);
1064 1065 1066
	return ret;
}

1067 1068 1069 1070
static bool ttm_bo_places_compat(const struct ttm_place *places,
				 unsigned num_placement,
				 struct ttm_mem_reg *mem,
				 uint32_t *new_flags)
1071
{
1072
	unsigned i;
1073

1074 1075
	for (i = 0; i < num_placement; i++) {
		const struct ttm_place *heap = &places[i];
1076

1077
		if (mem->mm_node && (mem->start < heap->fpfn ||
1078
		     (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1079 1080 1081
			continue;

		*new_flags = heap->flags;
1082
		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1083 1084 1085
		    (*new_flags & mem->placement & TTM_PL_MASK_MEM) &&
		    (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
		     (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
1086
			return true;
1087
	}
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
	return false;
}

bool ttm_bo_mem_compat(struct ttm_placement *placement,
		       struct ttm_mem_reg *mem,
		       uint32_t *new_flags)
{
	if (ttm_bo_places_compat(placement->placement, placement->num_placement,
				 mem, new_flags))
		return true;

	if ((placement->busy_placement != placement->placement ||
	     placement->num_busy_placement > placement->num_placement) &&
	    ttm_bo_places_compat(placement->busy_placement,
				 placement->num_busy_placement,
				 mem, new_flags))
		return true;
1105 1106

	return false;
1107
}
1108
EXPORT_SYMBOL(ttm_bo_mem_compat);
1109

1110
int ttm_bo_validate(struct ttm_buffer_object *bo,
1111 1112
		    struct ttm_placement *placement,
		    struct ttm_operation_ctx *ctx)
1113 1114
{
	int ret;
1115
	uint32_t new_flags;
1116

1117
	reservation_object_assert_held(bo->resv);
1118 1119 1120
	/*
	 * Check whether we need to move buffer.
	 */
1121
	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1122
		ret = ttm_bo_move_buffer(bo, placement, ctx);
1123
		if (ret)
1124
			return ret;
1125 1126 1127 1128 1129
	} else {
		/*
		 * Use the access and other non-mapping-related flag bits from
		 * the compatible memory placement flags to the active flags
		 */
1130
		ttm_flag_masked(&bo->mem.placement, new_flags,
1131
				~TTM_PL_MASK_MEMTYPE);
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
	}
	/*
	 * We might need to add a TTM.
	 */
	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
		ret = ttm_bo_add_ttm(bo, true);
		if (ret)
			return ret;
	}
	return 0;
}
1143
EXPORT_SYMBOL(ttm_bo_validate);
1144

1145 1146 1147 1148 1149 1150
int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
			 struct ttm_buffer_object *bo,
			 unsigned long size,
			 enum ttm_bo_type type,
			 struct ttm_placement *placement,
			 uint32_t page_alignment,
1151
			 struct ttm_operation_ctx *ctx,
1152 1153 1154 1155
			 size_t acc_size,
			 struct sg_table *sg,
			 struct reservation_object *resv,
			 void (*destroy) (struct ttm_buffer_object *))
1156
{
1157
	int ret = 0;
1158
	unsigned long num_pages;
1159
	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1160
	bool locked;
1161

1162
	ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
1163
	if (ret) {
J
Joe Perches 已提交
1164
		pr_err("Out of kernel memory\n");
1165 1166 1167 1168 1169 1170
		if (destroy)
			(*destroy)(bo);
		else
			kfree(bo);
		return -ENOMEM;
	}
1171 1172 1173

	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	if (num_pages == 0) {
J
Joe Perches 已提交
1174
		pr_err("Illegal buffer object size\n");
1175 1176 1177 1178
		if (destroy)
			(*destroy)(bo);
		else
			kfree(bo);
1179
		ttm_mem_global_free(mem_glob, acc_size);
1180 1181
		return -EINVAL;
	}
1182
	bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
1183 1184 1185 1186 1187 1188 1189

	kref_init(&bo->kref);
	kref_init(&bo->list_kref);
	atomic_set(&bo->cpu_writers, 0);
	INIT_LIST_HEAD(&bo->lru);
	INIT_LIST_HEAD(&bo->ddestroy);
	INIT_LIST_HEAD(&bo->swap);
1190
	INIT_LIST_HEAD(&bo->io_reserve_lru);
1191
	mutex_init(&bo->wu_mutex);
1192 1193 1194
	bo->bdev = bdev;
	bo->type = type;
	bo->num_pages = num_pages;
1195
	bo->mem.size = num_pages << PAGE_SHIFT;
1196 1197 1198 1199
	bo->mem.mem_type = TTM_PL_SYSTEM;
	bo->mem.num_pages = bo->num_pages;
	bo->mem.mm_node = NULL;
	bo->mem.page_alignment = page_alignment;
1200 1201
	bo->mem.bus.io_reserved_vm = false;
	bo->mem.bus.io_reserved_count = 0;
1202
	bo->moving = NULL;
1203 1204
	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
	bo->acc_size = acc_size;
1205
	bo->sg = sg;
1206 1207
	if (resv) {
		bo->resv = resv;
1208
		reservation_object_assert_held(bo->resv);
1209 1210 1211
	} else {
		bo->resv = &bo->ttm_resv;
	}
1212
	reservation_object_init(&bo->ttm_resv);
C
Christian König 已提交
1213
	atomic_inc(&bo->bdev->glob->bo_count);
1214
	drm_vma_node_reset(&bo->vma_node);
1215
	bo->priority = 0;
1216 1217 1218 1219 1220

	/*
	 * For ttm_bo_type_device buffers, allocate
	 * address space from the device.
	 */
1221 1222
	if (bo->type == ttm_bo_type_device ||
	    bo->type == ttm_bo_type_sg)
1223 1224
		ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
					 bo->mem.num_pages);
1225

1226 1227 1228 1229
	/* passed reservation objects should already be locked,
	 * since otherwise lockdep will be angered in radeon.
	 */
	if (!resv) {
1230
		locked = reservation_object_trylock(bo->resv);
1231 1232
		WARN_ON(!locked);
	}
1233

1234 1235
	if (likely(!ret))
		ret = ttm_bo_validate(bo, placement, ctx);
1236

1237
	if (unlikely(ret)) {
1238 1239 1240
		if (!resv)
			ttm_bo_unreserve(bo);

1241 1242 1243 1244 1245
		ttm_bo_unref(&bo);
		return ret;
	}

	if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
C
Christian König 已提交
1246
		spin_lock(&bdev->glob->lru_lock);
1247
		ttm_bo_add_to_lru(bo);
C
Christian König 已提交
1248
		spin_unlock(&bdev->glob->lru_lock);
1249 1250
	}

1251 1252
	return ret;
}
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
EXPORT_SYMBOL(ttm_bo_init_reserved);

int ttm_bo_init(struct ttm_bo_device *bdev,
		struct ttm_buffer_object *bo,
		unsigned long size,
		enum ttm_bo_type type,
		struct ttm_placement *placement,
		uint32_t page_alignment,
		bool interruptible,
		size_t acc_size,
		struct sg_table *sg,
		struct reservation_object *resv,
		void (*destroy) (struct ttm_buffer_object *))
{
1267
	struct ttm_operation_ctx ctx = { interruptible, false };
1268 1269 1270
	int ret;

	ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1271
				   page_alignment, &ctx, acc_size,
1272 1273 1274 1275 1276 1277 1278 1279 1280
				   sg, resv, destroy);
	if (ret)
		return ret;

	if (!resv)
		ttm_bo_unreserve(bo);

	return 0;
}
1281
EXPORT_SYMBOL(ttm_bo_init);
1282

1283 1284 1285
size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
		       unsigned long bo_size,
		       unsigned struct_size)
1286
{
1287 1288
	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
	size_t size = 0;
1289

1290
	size += ttm_round_pot(struct_size);
F
Felix Kuehling 已提交
1291
	size += ttm_round_pot(npages * sizeof(void *));
1292 1293
	size += ttm_round_pot(sizeof(struct ttm_tt));
	return size;
1294
}
1295 1296 1297 1298 1299 1300 1301 1302 1303 1304
EXPORT_SYMBOL(ttm_bo_acc_size);

size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
			   unsigned long bo_size,
			   unsigned struct_size)
{
	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
	size_t size = 0;

	size += ttm_round_pot(struct_size);
F
Felix Kuehling 已提交
1305
	size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
1306 1307 1308 1309
	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
	return size;
}
EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1310

1311 1312 1313 1314 1315 1316 1317
int ttm_bo_create(struct ttm_bo_device *bdev,
			unsigned long size,
			enum ttm_bo_type type,
			struct ttm_placement *placement,
			uint32_t page_alignment,
			bool interruptible,
			struct ttm_buffer_object **p_bo)
1318 1319
{
	struct ttm_buffer_object *bo;
1320
	size_t acc_size;
1321
	int ret;
1322 1323

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1324
	if (unlikely(bo == NULL))
1325 1326
		return -ENOMEM;

1327
	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1328
	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1329
			  interruptible, acc_size,
1330
			  NULL, NULL, NULL);
1331 1332 1333 1334 1335
	if (likely(ret == 0))
		*p_bo = bo;

	return ret;
}
T
Thomas Hellstrom 已提交
1336
EXPORT_SYMBOL(ttm_bo_create);
1337 1338

static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1339
				   unsigned mem_type)
1340
{
1341 1342 1343 1344 1345
	struct ttm_operation_ctx ctx = {
		.interruptible = false,
		.no_wait_gpu = false,
		.flags = TTM_OPT_FLAG_FORCE_ALLOC
	};
1346
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1347
	struct ttm_bo_global *glob = bdev->glob;
1348
	struct dma_fence *fence;
1349
	int ret;
1350
	unsigned i;
1351 1352 1353 1354 1355

	/*
	 * Can't use standard list traversal since we're unlocking.
	 */

1356
	spin_lock(&glob->lru_lock);
1357 1358 1359
	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
		while (!list_empty(&man->lru[i])) {
			spin_unlock(&glob->lru_lock);
R
Roger He 已提交
1360
			ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
1361
			if (ret)
1362
				return ret;
1363
			spin_lock(&glob->lru_lock);
1364
		}
1365
	}
1366
	spin_unlock(&glob->lru_lock);
1367 1368

	spin_lock(&man->move_lock);
1369
	fence = dma_fence_get(man->move);
1370 1371 1372
	spin_unlock(&man->move_lock);

	if (fence) {
1373 1374
		ret = dma_fence_wait(fence, false);
		dma_fence_put(fence);
1375 1376
		if (ret)
			return ret;
1377 1378
	}

1379 1380 1381 1382 1383
	return 0;
}

int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
{
R
Roel Kluin 已提交
1384
	struct ttm_mem_type_manager *man;
1385 1386 1387
	int ret = -EINVAL;

	if (mem_type >= TTM_NUM_MEM_TYPES) {
J
Joe Perches 已提交
1388
		pr_err("Illegal memory type %d\n", mem_type);
1389 1390
		return ret;
	}
R
Roel Kluin 已提交
1391
	man = &bdev->man[mem_type];
1392 1393

	if (!man->has_type) {
J
Joe Perches 已提交
1394 1395
		pr_err("Trying to take down uninitialized memory manager type %u\n",
		       mem_type);
1396 1397 1398 1399 1400 1401 1402 1403
		return ret;
	}

	man->use_type = false;
	man->has_type = false;

	ret = 0;
	if (mem_type > 0) {
1404 1405 1406 1407 1408
		ret = ttm_bo_force_list_clean(bdev, mem_type);
		if (ret) {
			pr_err("Cleanup eviction failed\n");
			return ret;
		}
1409

1410
		ret = (*man->func->takedown)(man);
1411 1412
	}

1413 1414 1415
	dma_fence_put(man->move);
	man->move = NULL;

1416 1417 1418 1419 1420 1421 1422 1423 1424
	return ret;
}
EXPORT_SYMBOL(ttm_bo_clean_mm);

int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
{
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];

	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
J
Joe Perches 已提交
1425
		pr_err("Illegal memory manager memory type %u\n", mem_type);
1426 1427 1428 1429
		return -EINVAL;
	}

	if (!man->has_type) {
J
Joe Perches 已提交
1430
		pr_err("Memory type %u has not been initialized\n", mem_type);
1431 1432 1433
		return 0;
	}

1434
	return ttm_bo_force_list_clean(bdev, mem_type);
1435 1436 1437 1438
}
EXPORT_SYMBOL(ttm_bo_evict_mm);

int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1439
			unsigned long p_size)
1440
{
H
Huang Rui 已提交
1441
	int ret;
1442
	struct ttm_mem_type_manager *man;
1443
	unsigned i;
1444

1445
	BUG_ON(type >= TTM_NUM_MEM_TYPES);
1446
	man = &bdev->man[type];
1447
	BUG_ON(man->has_type);
1448 1449 1450
	man->io_reserve_fastpath = true;
	man->use_io_reserve_lru = false;
	mutex_init(&man->io_reserve_mutex);
1451
	spin_lock_init(&man->move_lock);
1452
	INIT_LIST_HEAD(&man->io_reserve_lru);
1453 1454 1455 1456

	ret = bdev->driver->init_mem_type(bdev, type, man);
	if (ret)
		return ret;
1457
	man->bdev = bdev;
1458 1459

	if (type != TTM_PL_SYSTEM) {
1460
		ret = (*man->func->init)(man, p_size);
1461 1462 1463 1464 1465 1466 1467
		if (ret)
			return ret;
	}
	man->has_type = true;
	man->use_type = true;
	man->size = p_size;

1468 1469
	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
		INIT_LIST_HEAD(&man->lru[i]);
1470
	man->move = NULL;
1471 1472 1473 1474 1475

	return 0;
}
EXPORT_SYMBOL(ttm_bo_init_mm);

1476 1477 1478 1479 1480 1481 1482 1483 1484
static void ttm_bo_global_kobj_release(struct kobject *kobj)
{
	struct ttm_bo_global *glob =
		container_of(kobj, struct ttm_bo_global, kobj);

	__free_page(glob->dummy_read_page);
	kfree(glob);
}

1485
void ttm_bo_global_release(struct drm_global_reference *ref)
1486 1487 1488 1489 1490 1491 1492 1493
{
	struct ttm_bo_global *glob = ref->object;

	kobject_del(&glob->kobj);
	kobject_put(&glob->kobj);
}
EXPORT_SYMBOL(ttm_bo_global_release);

1494
int ttm_bo_global_init(struct drm_global_reference *ref)
1495 1496 1497 1498 1499
{
	struct ttm_bo_global_ref *bo_ref =
		container_of(ref, struct ttm_bo_global_ref, ref);
	struct ttm_bo_global *glob = ref->object;
	int ret;
1500
	unsigned i;
1501 1502 1503 1504

	mutex_init(&glob->device_list_mutex);
	spin_lock_init(&glob->lru_lock);
	glob->mem_glob = bo_ref->mem_glob;
1505
	glob->mem_glob->bo_glob = glob;
1506 1507 1508 1509 1510 1511 1512
	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);

	if (unlikely(glob->dummy_read_page == NULL)) {
		ret = -ENOMEM;
		goto out_no_drp;
	}

1513 1514
	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
		INIT_LIST_HEAD(&glob->swap_lru[i]);
1515 1516 1517
	INIT_LIST_HEAD(&glob->device_list);
	atomic_set(&glob->bo_count, 0);

1518 1519
	ret = kobject_init_and_add(
		&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1520 1521 1522 1523 1524 1525 1526 1527 1528 1529
	if (unlikely(ret != 0))
		kobject_put(&glob->kobj);
	return ret;
out_no_drp:
	kfree(glob);
	return ret;
}
EXPORT_SYMBOL(ttm_bo_global_init);


1530 1531 1532 1533 1534
int ttm_bo_device_release(struct ttm_bo_device *bdev)
{
	int ret = 0;
	unsigned i = TTM_NUM_MEM_TYPES;
	struct ttm_mem_type_manager *man;
1535
	struct ttm_bo_global *glob = bdev->glob;
1536 1537 1538 1539 1540 1541 1542

	while (i--) {
		man = &bdev->man[i];
		if (man->has_type) {
			man->use_type = false;
			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
				ret = -EBUSY;
J
Joe Perches 已提交
1543 1544
				pr_err("DRM memory manager type %d is not clean\n",
				       i);
1545 1546 1547 1548 1549
			}
			man->has_type = false;
		}
	}

1550 1551 1552 1553
	mutex_lock(&glob->device_list_mutex);
	list_del(&bdev->device_list);
	mutex_unlock(&glob->device_list_mutex);

1554
	cancel_delayed_work_sync(&bdev->wq);
1555

1556
	if (ttm_bo_delayed_delete(bdev, true))
1557
		pr_debug("Delayed destroy list was clean\n");
1558

1559
	spin_lock(&glob->lru_lock);
1560 1561
	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
		if (list_empty(&bdev->man[0].lru[0]))
1562
			pr_debug("Swap list %d was clean\n", i);
1563
	spin_unlock(&glob->lru_lock);
1564

1565
	drm_vma_offset_manager_destroy(&bdev->vma_manager);
1566 1567 1568 1569 1570 1571

	return ret;
}
EXPORT_SYMBOL(ttm_bo_device_release);

int ttm_bo_device_init(struct ttm_bo_device *bdev,
1572 1573
		       struct ttm_bo_global *glob,
		       struct ttm_bo_driver *driver,
1574
		       struct address_space *mapping,
D
Dave Airlie 已提交
1575
		       uint64_t file_page_offset,
D
Dave Airlie 已提交
1576
		       bool need_dma32)
1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587
{
	int ret = -EINVAL;

	bdev->driver = driver;

	memset(bdev->man, 0, sizeof(bdev->man));

	/*
	 * Initialize the system memory buffer type.
	 * Other types need to be driver / IOCTL initialized.
	 */
1588
	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1589
	if (unlikely(ret != 0))
1590
		goto out_no_sys;
1591

1592 1593
	drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
				    0x10000000);
1594 1595
	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
	INIT_LIST_HEAD(&bdev->ddestroy);
1596
	bdev->dev_mapping = mapping;
1597
	bdev->glob = glob;
D
Dave Airlie 已提交
1598
	bdev->need_dma32 = need_dma32;
1599 1600 1601
	mutex_lock(&glob->device_list_mutex);
	list_add_tail(&bdev->device_list, &glob->device_list);
	mutex_unlock(&glob->device_list_mutex);
1602 1603

	return 0;
1604
out_no_sys:
1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629
	return ret;
}
EXPORT_SYMBOL(ttm_bo_device_init);

/*
 * buffer object vm functions.
 */

bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];

	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
		if (mem->mem_type == TTM_PL_SYSTEM)
			return false;

		if (man->flags & TTM_MEMTYPE_FLAG_CMA)
			return false;

		if (mem->placement & TTM_PL_FLAG_CACHED)
			return false;
	}
	return true;
}

1630
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1631 1632 1633
{
	struct ttm_bo_device *bdev = bo->bdev;

1634
	drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
1635
	ttm_mem_io_free_vm(bo);
1636
}
1637 1638 1639 1640 1641 1642 1643 1644 1645

void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];

	ttm_mem_io_lock(man, false);
	ttm_bo_unmap_virtual_locked(bo);
	ttm_mem_io_unlock(man);
1646
}
1647 1648


1649
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1650 1651

int ttm_bo_wait(struct ttm_buffer_object *bo,
1652
		bool interruptible, bool no_wait)
1653
{
C
Christian König 已提交
1654 1655 1656 1657 1658 1659 1660 1661
	long timeout = 15 * HZ;

	if (no_wait) {
		if (reservation_object_test_signaled_rcu(bo->resv, true))
			return 0;
		else
			return -EBUSY;
	}
1662

C
Christian König 已提交
1663 1664
	timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
						      interruptible, timeout);
1665 1666 1667 1668 1669 1670
	if (timeout < 0)
		return timeout;

	if (timeout == 0)
		return -EBUSY;

C
Christian König 已提交
1671
	reservation_object_add_excl_fence(bo->resv, NULL);
1672
	return 0;
1673 1674 1675 1676 1677 1678 1679 1680
}
EXPORT_SYMBOL(ttm_bo_wait);

int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
{
	int ret = 0;

	/*
1681
	 * Using ttm_bo_reserve makes sure the lru lists are updated.
1682 1683
	 */

1684
	ret = ttm_bo_reserve(bo, true, no_wait, NULL);
1685 1686
	if (unlikely(ret != 0))
		return ret;
1687
	ret = ttm_bo_wait(bo, true, no_wait);
1688 1689 1690 1691 1692
	if (likely(ret == 0))
		atomic_inc(&bo->cpu_writers);
	ttm_bo_unreserve(bo);
	return ret;
}
1693
EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1694 1695 1696

void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
{
1697
	atomic_dec(&bo->cpu_writers);
1698
}
1699
EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1700 1701 1702 1703 1704

/**
 * A buffer object shrink method that tries to swap out the first
 * buffer object on the bo_global::swap_lru list.
 */
1705
int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
1706 1707 1708
{
	struct ttm_buffer_object *bo;
	int ret = -EBUSY;
1709
	bool locked;
1710
	unsigned i;
1711

1712
	spin_lock(&glob->lru_lock);
1713 1714
	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
		list_for_each_entry(bo, &glob->swap_lru[i], swap) {
1715 1716
			if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked)) {
				ret = 0;
1717
				break;
1718
			}
1719
		}
1720 1721 1722
		if (!ret)
			break;
	}
1723

1724 1725 1726 1727
	if (ret) {
		spin_unlock(&glob->lru_lock);
		return ret;
	}
1728

1729
	kref_get(&bo->list_kref);
1730

1731
	if (!list_empty(&bo->ddestroy)) {
1732
		ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1733 1734
		kref_put(&bo->list_kref, ttm_bo_release_list);
		return ret;
1735 1736
	}

1737
	ttm_bo_del_from_lru(bo);
1738
	spin_unlock(&glob->lru_lock);
1739 1740

	/**
1741
	 * Move to system cached
1742 1743
	 */

1744 1745
	if (bo->mem.mem_type != TTM_PL_SYSTEM ||
	    bo->ttm->caching_state != tt_cached) {
1746
		struct ttm_operation_ctx ctx = { false, false };
1747 1748 1749 1750 1751 1752 1753
		struct ttm_mem_reg evict_mem;

		evict_mem = bo->mem;
		evict_mem.mm_node = NULL;
		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
		evict_mem.mem_type = TTM_PL_SYSTEM;

1754
		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
1755 1756 1757 1758
		if (unlikely(ret != 0))
			goto out;
	}

1759 1760 1761 1762 1763 1764 1765 1766
	/**
	 * Make sure BO is idle.
	 */

	ret = ttm_bo_wait(bo, false, false);
	if (unlikely(ret != 0))
		goto out;

1767 1768 1769 1770 1771 1772 1773
	ttm_bo_unmap_virtual(bo);

	/**
	 * Swap out. Buffer will be swapped in again as soon as
	 * anyone tries to access a ttm page.
	 */

1774 1775 1776
	if (bo->bdev->driver->swap_notify)
		bo->bdev->driver->swap_notify(bo);

J
Jan Engelhardt 已提交
1777
	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1778 1779 1780 1781 1782 1783 1784
out:

	/**
	 *
	 * Unreserve without putting on LRU to avoid swapping out an
	 * already swapped buffer.
	 */
1785 1786
	if (locked)
		reservation_object_unlock(bo->resv);
1787 1788 1789
	kref_put(&bo->list_kref, ttm_bo_release_list);
	return ret;
}
1790
EXPORT_SYMBOL(ttm_bo_swapout);
1791 1792 1793

void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
{
1794 1795 1796 1797 1798 1799
	struct ttm_operation_ctx ctx = {
		.interruptible = false,
		.no_wait_gpu = false
	};

	while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
1800 1801
		;
}
1802
EXPORT_SYMBOL(ttm_bo_swapout_all);
1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825

/**
 * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
 * unreserved
 *
 * @bo: Pointer to buffer
 */
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
{
	int ret;

	/*
	 * In the absense of a wait_unlocked API,
	 * Use the bo::wu_mutex to avoid triggering livelocks due to
	 * concurrent use of this function. Note that this use of
	 * bo::wu_mutex can go away if we change locking order to
	 * mmap_sem -> bo::reserve.
	 */
	ret = mutex_lock_interruptible(&bo->wu_mutex);
	if (unlikely(ret != 0))
		return -ERESTARTSYS;
	if (!ww_mutex_is_locked(&bo->resv->lock))
		goto out_unlock;
1826 1827 1828
	ret = reservation_object_lock_interruptible(bo->resv, NULL);
	if (ret == -EINTR)
		ret = -ERESTARTSYS;
1829 1830
	if (unlikely(ret != 0))
		goto out_unlock;
1831
	reservation_object_unlock(bo->resv);
1832 1833 1834 1835 1836

out_unlock:
	mutex_unlock(&bo->wu_mutex);
	return ret;
}