ttm_bo_util.c 18.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/**************************************************************************
 *
 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 */

31 32
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
33
#include <drm/drm_vma_manager.h>
34 35 36
#include <linux/io.h>
#include <linux/highmem.h>
#include <linux/wait.h>
37
#include <linux/slab.h>
38 39
#include <linux/vmalloc.h>
#include <linux/module.h>
40
#include <linux/reservation.h>
41 42 43

void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
{
44
	ttm_bo_mem_put(bo, &bo->mem);
45 46 47
}

int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
48
		    bool evict,
49
		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
{
	struct ttm_tt *ttm = bo->ttm;
	struct ttm_mem_reg *old_mem = &bo->mem;
	int ret;

	if (old_mem->mem_type != TTM_PL_SYSTEM) {
		ttm_bo_free_old_node(bo);
		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
				TTM_PL_MASK_MEM);
		old_mem->mem_type = TTM_PL_SYSTEM;
	}

	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
	if (unlikely(ret != 0))
		return ret;

	if (new_mem->mem_type != TTM_PL_SYSTEM) {
		ret = ttm_tt_bind(ttm, new_mem);
		if (unlikely(ret != 0))
			return ret;
	}

	*old_mem = *new_mem;
	new_mem->mm_node = NULL;
74

75 76 77 78
	return 0;
}
EXPORT_SYMBOL(ttm_bo_move_ttm);

79
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
80
{
81 82 83 84 85 86 87 88 89
	if (likely(man->io_reserve_fastpath))
		return 0;

	if (interruptible)
		return mutex_lock_interruptible(&man->io_reserve_mutex);

	mutex_lock(&man->io_reserve_mutex);
	return 0;
}
90
EXPORT_SYMBOL(ttm_mem_io_lock);
91

92 93 94 95 96 97 98
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
{
	if (likely(man->io_reserve_fastpath))
		return;

	mutex_unlock(&man->io_reserve_mutex);
}
99
EXPORT_SYMBOL(ttm_mem_io_unlock);
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116

static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
{
	struct ttm_buffer_object *bo;

	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
		return -EAGAIN;

	bo = list_first_entry(&man->io_reserve_lru,
			      struct ttm_buffer_object,
			      io_reserve_lru);
	list_del_init(&bo->io_reserve_lru);
	ttm_bo_unmap_virtual_locked(bo);

	return 0;
}

117 118 119

int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
		       struct ttm_mem_reg *mem)
120 121 122 123 124 125 126 127 128 129 130 131
{
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
	int ret = 0;

	if (!bdev->driver->io_mem_reserve)
		return 0;
	if (likely(man->io_reserve_fastpath))
		return bdev->driver->io_mem_reserve(bdev, mem);

	if (bdev->driver->io_mem_reserve &&
	    mem->bus.io_reserved_count++ == 0) {
retry:
132
		ret = bdev->driver->io_mem_reserve(bdev, mem);
133 134 135 136 137 138 139 140
		if (ret == -EAGAIN) {
			ret = ttm_mem_io_evict(man);
			if (ret == 0)
				goto retry;
		}
	}
	return ret;
}
141
EXPORT_SYMBOL(ttm_mem_io_reserve);
142

143 144
void ttm_mem_io_free(struct ttm_bo_device *bdev,
		     struct ttm_mem_reg *mem)
145 146 147 148 149 150 151 152 153 154 155 156
{
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];

	if (likely(man->io_reserve_fastpath))
		return;

	if (bdev->driver->io_mem_reserve &&
	    --mem->bus.io_reserved_count == 0 &&
	    bdev->driver->io_mem_free)
		bdev->driver->io_mem_free(bdev, mem);

}
157
EXPORT_SYMBOL(ttm_mem_io_free);
158 159 160 161 162 163 164 165 166 167 168

int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{
	struct ttm_mem_reg *mem = &bo->mem;
	int ret;

	if (!mem->bus.io_reserved_vm) {
		struct ttm_mem_type_manager *man =
			&bo->bdev->man[mem->mem_type];

		ret = ttm_mem_io_reserve(bo->bdev, mem);
169 170
		if (unlikely(ret != 0))
			return ret;
171 172 173 174
		mem->bus.io_reserved_vm = true;
		if (man->use_io_reserve_lru)
			list_add_tail(&bo->io_reserve_lru,
				      &man->io_reserve_lru);
175 176 177 178
	}
	return 0;
}

179
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
180
{
181 182 183 184 185 186
	struct ttm_mem_reg *mem = &bo->mem;

	if (mem->bus.io_reserved_vm) {
		mem->bus.io_reserved_vm = false;
		list_del_init(&bo->io_reserve_lru);
		ttm_mem_io_free(bo->bdev, mem);
187 188 189
	}
}

190
static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
191 192
			void **virtual)
{
193
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
194 195 196 197
	int ret;
	void *addr;

	*virtual = NULL;
198
	(void) ttm_mem_io_lock(man, false);
199
	ret = ttm_mem_io_reserve(bdev, mem);
200
	ttm_mem_io_unlock(man);
201
	if (ret || !mem->bus.is_iomem)
202 203
		return ret;

204 205 206
	if (mem->bus.addr) {
		addr = mem->bus.addr;
	} else {
207
		if (mem->placement & TTM_PL_FLAG_WC)
208
			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
209
		else
210 211
			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
		if (!addr) {
212
			(void) ttm_mem_io_lock(man, false);
213
			ttm_mem_io_free(bdev, mem);
214
			ttm_mem_io_unlock(man);
215
			return -ENOMEM;
216
		}
217 218 219 220 221
	}
	*virtual = addr;
	return 0;
}

222
static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
223 224 225 226 227 228
			 void *virtual)
{
	struct ttm_mem_type_manager *man;

	man = &bdev->man[mem->mem_type];

229
	if (virtual && mem->bus.addr == NULL)
230
		iounmap(virtual);
231
	(void) ttm_mem_io_lock(man, false);
232
	ttm_mem_io_free(bdev, mem);
233
	ttm_mem_io_unlock(man);
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
}

static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
{
	uint32_t *dstP =
	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
	uint32_t *srcP =
	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));

	int i;
	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
		iowrite32(ioread32(srcP++), dstP++);
	return 0;
}

static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
250 251
				unsigned long page,
				pgprot_t prot)
252
{
253
	struct page *d = ttm->pages[page];
254 255 256 257 258 259
	void *dst;

	if (!d)
		return -ENOMEM;

	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
260 261

#ifdef CONFIG_X86
P
Peter Zijlstra 已提交
262
	dst = kmap_atomic_prot(d, prot);
263
#else
264
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
265 266 267 268
		dst = vmap(&d, 1, 0, prot);
	else
		dst = kmap(d);
#endif
269 270 271 272
	if (!dst)
		return -ENOMEM;

	memcpy_fromio(dst, src, PAGE_SIZE);
273 274

#ifdef CONFIG_X86
P
Peter Zijlstra 已提交
275
	kunmap_atomic(dst);
276
#else
277
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
278 279 280 281 282
		vunmap(dst);
	else
		kunmap(d);
#endif

283 284 285 286
	return 0;
}

static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
287 288
				unsigned long page,
				pgprot_t prot)
289
{
290
	struct page *s = ttm->pages[page];
291 292 293 294 295 296
	void *src;

	if (!s)
		return -ENOMEM;

	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
297
#ifdef CONFIG_X86
P
Peter Zijlstra 已提交
298
	src = kmap_atomic_prot(s, prot);
299
#else
300
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
301 302 303 304
		src = vmap(&s, 1, 0, prot);
	else
		src = kmap(s);
#endif
305 306 307 308
	if (!src)
		return -ENOMEM;

	memcpy_toio(dst, src, PAGE_SIZE);
309 310

#ifdef CONFIG_X86
P
Peter Zijlstra 已提交
311
	kunmap_atomic(src);
312
#else
313
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
314 315 316 317 318
		vunmap(src);
	else
		kunmap(s);
#endif

319 320 321 322
	return 0;
}

int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
323 324
		       bool evict, bool interruptible,
		       bool no_wait_gpu,
325
		       struct ttm_mem_reg *new_mem)
326 327 328 329 330
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
	struct ttm_tt *ttm = bo->ttm;
	struct ttm_mem_reg *old_mem = &bo->mem;
331
	struct ttm_mem_reg old_copy = *old_mem;
332 333 334 335 336 337 338 339
	void *old_iomap;
	void *new_iomap;
	int ret;
	unsigned long i;
	unsigned long page;
	unsigned long add = 0;
	int dir;

340 341 342 343
	ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
	if (ret)
		return ret;

344 345 346 347 348 349 350
	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
	if (ret)
		return ret;
	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
	if (ret)
		goto out;

T
Thomas Hellstrom 已提交
351 352 353
	/*
	 * Single TTM move. NOP.
	 */
354 355
	if (old_iomap == NULL && new_iomap == NULL)
		goto out2;
T
Thomas Hellstrom 已提交
356 357

	/*
358
	 * Don't move nonexistent data. Clear destination instead.
T
Thomas Hellstrom 已提交
359
	 */
360
	if (old_iomap == NULL &&
T
Thomas Hellstrom 已提交
361 362
	    (ttm == NULL || (ttm->state == tt_unpopulated &&
			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
363
		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
364
		goto out2;
365
	}
366

T
Thomas Hellstrom 已提交
367 368
	/*
	 * TTM might be null for moves within the same region.
369 370
	 */
	if (ttm && ttm->state == tt_unpopulated) {
371
		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
T
Thomas Hellstrom 已提交
372
		if (ret)
373 374 375
			goto out1;
	}

376 377 378 379
	add = 0;
	dir = 1;

	if ((old_mem->mem_type == new_mem->mem_type) &&
380
	    (new_mem->start < old_mem->start + old_mem->size)) {
381 382 383 384 385 386
		dir = -1;
		add = new_mem->num_pages - 1;
	}

	for (i = 0; i < new_mem->num_pages; ++i) {
		page = i * dir + add;
387 388 389 390 391 392 393 394 395 396 397
		if (old_iomap == NULL) {
			pgprot_t prot = ttm_io_prot(old_mem->placement,
						    PAGE_KERNEL);
			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
						   prot);
		} else if (new_iomap == NULL) {
			pgprot_t prot = ttm_io_prot(new_mem->placement,
						    PAGE_KERNEL);
			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
						   prot);
		} else
398
			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
T
Thomas Hellstrom 已提交
399
		if (ret)
400 401 402 403
			goto out1;
	}
	mb();
out2:
404
	old_copy = *old_mem;
405 406 407
	*old_mem = *new_mem;
	new_mem->mm_node = NULL;

408
	if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
409 410 411 412 413
		ttm_tt_destroy(ttm);
		bo->ttm = NULL;
	}

out1:
414
	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
415 416
out:
	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
T
Thomas Hellstrom 已提交
417 418 419 420 421 422

	/*
	 * On error, keep the mm node!
	 */
	if (!ret)
		ttm_bo_mem_put(bo, &old_copy);
423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
	return ret;
}
EXPORT_SYMBOL(ttm_bo_move_memcpy);

static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
{
	kfree(bo);
}

/**
 * ttm_buffer_object_transfer
 *
 * @bo: A pointer to a struct ttm_buffer_object.
 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
 * holding the data of @bo with the old placement.
 *
 * This is a utility function that may be called after an accelerated move
 * has been scheduled. A new buffer object is created as a placeholder for
 * the old data while it's being copied. When that buffer object is idle,
 * it can be destroyed, releasing the space of the old placement.
 * Returns:
 * !0: Failure.
 */

static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
				      struct ttm_buffer_object **new_obj)
{
	struct ttm_buffer_object *fbo;
451
	int ret;
452

453
	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
454 455 456 457 458 459 460 461 462 463 464 465 466
	if (!fbo)
		return -ENOMEM;

	*fbo = *bo;

	/**
	 * Fix up members that we shouldn't copy directly:
	 * TODO: Explicit member copy would probably be better here.
	 */

	INIT_LIST_HEAD(&fbo->ddestroy);
	INIT_LIST_HEAD(&fbo->lru);
	INIT_LIST_HEAD(&fbo->swap);
467
	INIT_LIST_HEAD(&fbo->io_reserve_lru);
468
	fbo->moving = NULL;
469
	drm_vma_node_reset(&fbo->vma_node);
470
	atomic_set(&fbo->cpu_writers, 0);
471 472 473 474

	kref_init(&fbo->list_kref);
	kref_init(&fbo->kref);
	fbo->destroy = &ttm_transfered_destroy;
475
	fbo->acc_size = 0;
476 477 478 479
	fbo->resv = &fbo->ttm_resv;
	reservation_object_init(fbo->resv);
	ret = ww_mutex_trylock(&fbo->resv->lock);
	WARN_ON(!ret);
480 481 482 483 484 485 486

	*new_obj = fbo;
	return 0;
}

pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
{
487 488 489 490
	/* Cached mappings need no adjustment */
	if (caching_flags & TTM_PL_FLAG_CACHED)
		return tmp;

491 492 493 494 495 496
#if defined(__i386__) || defined(__x86_64__)
	if (caching_flags & TTM_PL_FLAG_WC)
		tmp = pgprot_writecombine(tmp);
	else if (boot_cpu_data.x86 > 3)
		tmp = pgprot_noncached(tmp);
#endif
497 498
#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
    defined(__powerpc__)
499 500 501 502 503
	if (caching_flags & TTM_PL_FLAG_WC)
		tmp = pgprot_writecombine(tmp);
	else
		tmp = pgprot_noncached(tmp);
#endif
504
#if defined(__sparc__) || defined(__mips__)
505
	tmp = pgprot_noncached(tmp);
506 507 508
#endif
	return tmp;
}
509
EXPORT_SYMBOL(ttm_io_prot);
510 511

static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
512 513
			  unsigned long offset,
			  unsigned long size,
514 515 516 517
			  struct ttm_bo_kmap_obj *map)
{
	struct ttm_mem_reg *mem = &bo->mem;

518
	if (bo->mem.bus.addr) {
519
		map->bo_kmap_type = ttm_bo_map_premapped;
520
		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
521 522 523
	} else {
		map->bo_kmap_type = ttm_bo_map_iomap;
		if (mem->placement & TTM_PL_FLAG_WC)
524 525
			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
						  size);
526
		else
527 528
			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
						       size);
529 530 531 532 533 534 535 536 537 538 539
	}
	return (!map->virtual) ? -ENOMEM : 0;
}

static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
			   unsigned long start_page,
			   unsigned long num_pages,
			   struct ttm_bo_kmap_obj *map)
{
	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
	struct ttm_tt *ttm = bo->ttm;
540
	int ret;
541 542

	BUG_ON(!ttm);
543 544 545 546 547 548 549

	if (ttm->state == tt_unpopulated) {
		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
		if (ret)
			return ret;
	}

550 551 552 553 554 555 556
	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
		/*
		 * We're mapping a single page, and the desired
		 * page protection is consistent with the bo.
		 */

		map->bo_kmap_type = ttm_bo_map_kmap;
557
		map->page = ttm->pages[start_page];
558 559 560 561
		map->virtual = kmap(map->page);
	} else {
		/*
		 * We need to use vmap to get the desired page protection
562
		 * or to make the buffer object look contiguous.
563
		 */
564
		prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
565 566 567 568 569 570 571 572 573 574 575
		map->bo_kmap_type = ttm_bo_map_vmap;
		map->virtual = vmap(ttm->pages + start_page, num_pages,
				    0, prot);
	}
	return (!map->virtual) ? -ENOMEM : 0;
}

int ttm_bo_kmap(struct ttm_buffer_object *bo,
		unsigned long start_page, unsigned long num_pages,
		struct ttm_bo_kmap_obj *map)
{
576 577
	struct ttm_mem_type_manager *man =
		&bo->bdev->man[bo->mem.mem_type];
578
	unsigned long offset, size;
579 580 581 582
	int ret;

	BUG_ON(!list_empty(&bo->swap));
	map->virtual = NULL;
583
	map->bo = bo;
584 585 586 587 588
	if (num_pages > bo->num_pages)
		return -EINVAL;
	if (start_page > bo->num_pages)
		return -EINVAL;
#if 0
D
Daniel Vetter 已提交
589
	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
590 591
		return -EPERM;
#endif
592
	(void) ttm_mem_io_lock(man, false);
593
	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
594
	ttm_mem_io_unlock(man);
595 596
	if (ret)
		return ret;
597
	if (!bo->mem.bus.is_iomem) {
598 599
		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
	} else {
600 601 602
		offset = start_page << PAGE_SHIFT;
		size = num_pages << PAGE_SHIFT;
		return ttm_bo_ioremap(bo, offset, size, map);
603 604 605 606 607 608
	}
}
EXPORT_SYMBOL(ttm_bo_kmap);

void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{
609 610 611 612
	struct ttm_buffer_object *bo = map->bo;
	struct ttm_mem_type_manager *man =
		&bo->bdev->man[bo->mem.mem_type];

613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629
	if (!map->virtual)
		return;
	switch (map->bo_kmap_type) {
	case ttm_bo_map_iomap:
		iounmap(map->virtual);
		break;
	case ttm_bo_map_vmap:
		vunmap(map->virtual);
		break;
	case ttm_bo_map_kmap:
		kunmap(map->page);
		break;
	case ttm_bo_map_premapped:
		break;
	default:
		BUG();
	}
630 631 632
	(void) ttm_mem_io_lock(man, false);
	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
	ttm_mem_io_unlock(man);
633 634 635 636 637 638
	map->virtual = NULL;
	map->page = NULL;
}
EXPORT_SYMBOL(ttm_bo_kunmap);

int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
639
			      struct fence *fence,
640
			      bool evict,
641 642 643 644 645 646 647 648
			      struct ttm_mem_reg *new_mem)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
	struct ttm_mem_reg *old_mem = &bo->mem;
	int ret;
	struct ttm_buffer_object *ghost_obj;

649
	reservation_object_add_excl_fence(bo->resv, fence);
650
	if (evict) {
651
		ret = ttm_bo_wait(bo, false, false);
652 653 654
		if (ret)
			return ret;

655
		if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
656 657 658
			ttm_tt_destroy(bo->ttm);
			bo->ttm = NULL;
		}
659
		ttm_bo_free_old_node(bo);
660 661 662 663 664 665 666 667 668
	} else {
		/**
		 * This should help pipeline ordinary buffer moves.
		 *
		 * Hang old buffer memory on a new buffer object,
		 * and leave it to be released when the GPU
		 * operation has completed.
		 */

669 670
		fence_put(bo->moving);
		bo->moving = fence_get(fence);
671

672
		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
673 674 675
		if (ret)
			return ret;

676 677
		reservation_object_add_excl_fence(ghost_obj->resv, fence);

678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
		/**
		 * If we're not moving to fixed memory, the TTM object
		 * needs to stay alive. Otherwhise hang it on the ghost
		 * bo to be unbound and destroyed.
		 */

		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
			ghost_obj->ttm = NULL;
		else
			bo->ttm = NULL;

		ttm_bo_unreserve(ghost_obj);
		ttm_bo_unref(&ghost_obj);
	}

	*old_mem = *new_mem;
	new_mem->mm_node = NULL;
695

696 697 698
	return 0;
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755

int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
			 struct fence *fence, bool evict,
			 struct ttm_mem_reg *new_mem)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_reg *old_mem = &bo->mem;

	struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
	struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];

	int ret;

	reservation_object_add_excl_fence(bo->resv, fence);

	if (!evict) {
		struct ttm_buffer_object *ghost_obj;

		/**
		 * This should help pipeline ordinary buffer moves.
		 *
		 * Hang old buffer memory on a new buffer object,
		 * and leave it to be released when the GPU
		 * operation has completed.
		 */

		fence_put(bo->moving);
		bo->moving = fence_get(fence);

		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
		if (ret)
			return ret;

		reservation_object_add_excl_fence(ghost_obj->resv, fence);

		/**
		 * If we're not moving to fixed memory, the TTM object
		 * needs to stay alive. Otherwhise hang it on the ghost
		 * bo to be unbound and destroyed.
		 */

		if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
			ghost_obj->ttm = NULL;
		else
			bo->ttm = NULL;

		ttm_bo_unreserve(ghost_obj);
		ttm_bo_unref(&ghost_obj);

	} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {

		/**
		 * BO doesn't have a TTM we need to bind/unbind. Just remember
		 * this eviction and free up the allocation
		 */

		spin_lock(&from->move_lock);
756
		if (!from->move || fence_is_later(fence, from->move)) {
757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
			fence_put(from->move);
			from->move = fence_get(fence);
		}
		spin_unlock(&from->move_lock);

		ttm_bo_free_old_node(bo);

		fence_put(bo->moving);
		bo->moving = fence_get(fence);

	} else {
		/**
		 * Last resort, wait for the move to be completed.
		 *
		 * Should never happen in pratice.
		 */

		ret = ttm_bo_wait(bo, false, false);
		if (ret)
			return ret;

		if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
			ttm_tt_destroy(bo->ttm);
			bo->ttm = NULL;
		}
		ttm_bo_free_old_node(bo);
	}

	*old_mem = *new_mem;
	new_mem->mm_node = NULL;

	return 0;
}
EXPORT_SYMBOL(ttm_bo_pipeline_move);