ttm_bo_util.c 16.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/**************************************************************************
 *
 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 */

31 32
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
33
#include <drm/drm_vma_manager.h>
34 35 36
#include <linux/io.h>
#include <linux/highmem.h>
#include <linux/wait.h>
37
#include <linux/slab.h>
38 39
#include <linux/vmalloc.h>
#include <linux/module.h>
40
#include <linux/reservation.h>
41 42 43

void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
{
44
	ttm_bo_mem_put(bo, &bo->mem);
45 46 47
}

int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
48
		    bool evict,
49
		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
{
	struct ttm_tt *ttm = bo->ttm;
	struct ttm_mem_reg *old_mem = &bo->mem;
	int ret;

	if (old_mem->mem_type != TTM_PL_SYSTEM) {
		ttm_tt_unbind(ttm);
		ttm_bo_free_old_node(bo);
		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
				TTM_PL_MASK_MEM);
		old_mem->mem_type = TTM_PL_SYSTEM;
	}

	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
	if (unlikely(ret != 0))
		return ret;

	if (new_mem->mem_type != TTM_PL_SYSTEM) {
		ret = ttm_tt_bind(ttm, new_mem);
		if (unlikely(ret != 0))
			return ret;
	}

	*old_mem = *new_mem;
	new_mem->mm_node = NULL;
75

76 77 78 79
	return 0;
}
EXPORT_SYMBOL(ttm_bo_move_ttm);

80
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
81
{
82 83 84 85 86 87 88 89 90
	if (likely(man->io_reserve_fastpath))
		return 0;

	if (interruptible)
		return mutex_lock_interruptible(&man->io_reserve_mutex);

	mutex_lock(&man->io_reserve_mutex);
	return 0;
}
91
EXPORT_SYMBOL(ttm_mem_io_lock);
92

93 94 95 96 97 98 99
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
{
	if (likely(man->io_reserve_fastpath))
		return;

	mutex_unlock(&man->io_reserve_mutex);
}
100
EXPORT_SYMBOL(ttm_mem_io_unlock);
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117

static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
{
	struct ttm_buffer_object *bo;

	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
		return -EAGAIN;

	bo = list_first_entry(&man->io_reserve_lru,
			      struct ttm_buffer_object,
			      io_reserve_lru);
	list_del_init(&bo->io_reserve_lru);
	ttm_bo_unmap_virtual_locked(bo);

	return 0;
}

118 119 120

int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
		       struct ttm_mem_reg *mem)
121 122 123 124 125 126 127 128 129 130 131 132
{
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
	int ret = 0;

	if (!bdev->driver->io_mem_reserve)
		return 0;
	if (likely(man->io_reserve_fastpath))
		return bdev->driver->io_mem_reserve(bdev, mem);

	if (bdev->driver->io_mem_reserve &&
	    mem->bus.io_reserved_count++ == 0) {
retry:
133
		ret = bdev->driver->io_mem_reserve(bdev, mem);
134 135 136 137 138 139 140 141
		if (ret == -EAGAIN) {
			ret = ttm_mem_io_evict(man);
			if (ret == 0)
				goto retry;
		}
	}
	return ret;
}
142
EXPORT_SYMBOL(ttm_mem_io_reserve);
143

144 145
void ttm_mem_io_free(struct ttm_bo_device *bdev,
		     struct ttm_mem_reg *mem)
146 147 148 149 150 151 152 153 154 155 156 157
{
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];

	if (likely(man->io_reserve_fastpath))
		return;

	if (bdev->driver->io_mem_reserve &&
	    --mem->bus.io_reserved_count == 0 &&
	    bdev->driver->io_mem_free)
		bdev->driver->io_mem_free(bdev, mem);

}
158
EXPORT_SYMBOL(ttm_mem_io_free);
159 160 161 162 163 164 165 166 167 168 169

int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{
	struct ttm_mem_reg *mem = &bo->mem;
	int ret;

	if (!mem->bus.io_reserved_vm) {
		struct ttm_mem_type_manager *man =
			&bo->bdev->man[mem->mem_type];

		ret = ttm_mem_io_reserve(bo->bdev, mem);
170 171
		if (unlikely(ret != 0))
			return ret;
172 173 174 175
		mem->bus.io_reserved_vm = true;
		if (man->use_io_reserve_lru)
			list_add_tail(&bo->io_reserve_lru,
				      &man->io_reserve_lru);
176 177 178 179
	}
	return 0;
}

180
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
181
{
182 183 184 185 186 187
	struct ttm_mem_reg *mem = &bo->mem;

	if (mem->bus.io_reserved_vm) {
		mem->bus.io_reserved_vm = false;
		list_del_init(&bo->io_reserve_lru);
		ttm_mem_io_free(bo->bdev, mem);
188 189 190
	}
}

191
static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
192 193
			void **virtual)
{
194
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
195 196 197 198
	int ret;
	void *addr;

	*virtual = NULL;
199
	(void) ttm_mem_io_lock(man, false);
200
	ret = ttm_mem_io_reserve(bdev, mem);
201
	ttm_mem_io_unlock(man);
202
	if (ret || !mem->bus.is_iomem)
203 204
		return ret;

205 206 207
	if (mem->bus.addr) {
		addr = mem->bus.addr;
	} else {
208
		if (mem->placement & TTM_PL_FLAG_WC)
209
			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
210
		else
211 212
			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
		if (!addr) {
213
			(void) ttm_mem_io_lock(man, false);
214
			ttm_mem_io_free(bdev, mem);
215
			ttm_mem_io_unlock(man);
216
			return -ENOMEM;
217
		}
218 219 220 221 222
	}
	*virtual = addr;
	return 0;
}

223
static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
224 225 226 227 228 229
			 void *virtual)
{
	struct ttm_mem_type_manager *man;

	man = &bdev->man[mem->mem_type];

230
	if (virtual && mem->bus.addr == NULL)
231
		iounmap(virtual);
232
	(void) ttm_mem_io_lock(man, false);
233
	ttm_mem_io_free(bdev, mem);
234
	ttm_mem_io_unlock(man);
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
}

static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
{
	uint32_t *dstP =
	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
	uint32_t *srcP =
	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));

	int i;
	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
		iowrite32(ioread32(srcP++), dstP++);
	return 0;
}

static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
251 252
				unsigned long page,
				pgprot_t prot)
253
{
254
	struct page *d = ttm->pages[page];
255 256 257 258 259 260
	void *dst;

	if (!d)
		return -ENOMEM;

	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
261 262

#ifdef CONFIG_X86
P
Peter Zijlstra 已提交
263
	dst = kmap_atomic_prot(d, prot);
264
#else
265
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
266 267 268 269
		dst = vmap(&d, 1, 0, prot);
	else
		dst = kmap(d);
#endif
270 271 272 273
	if (!dst)
		return -ENOMEM;

	memcpy_fromio(dst, src, PAGE_SIZE);
274 275

#ifdef CONFIG_X86
P
Peter Zijlstra 已提交
276
	kunmap_atomic(dst);
277
#else
278
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
279 280 281 282 283
		vunmap(dst);
	else
		kunmap(d);
#endif

284 285 286 287
	return 0;
}

static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
288 289
				unsigned long page,
				pgprot_t prot)
290
{
291
	struct page *s = ttm->pages[page];
292 293 294 295 296 297
	void *src;

	if (!s)
		return -ENOMEM;

	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
298
#ifdef CONFIG_X86
P
Peter Zijlstra 已提交
299
	src = kmap_atomic_prot(s, prot);
300
#else
301
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
302 303 304 305
		src = vmap(&s, 1, 0, prot);
	else
		src = kmap(s);
#endif
306 307 308 309
	if (!src)
		return -ENOMEM;

	memcpy_toio(dst, src, PAGE_SIZE);
310 311

#ifdef CONFIG_X86
P
Peter Zijlstra 已提交
312
	kunmap_atomic(src);
313
#else
314
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
315 316 317 318 319
		vunmap(src);
	else
		kunmap(s);
#endif

320 321 322 323
	return 0;
}

int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
324
		       bool evict, bool no_wait_gpu,
325
		       struct ttm_mem_reg *new_mem)
326 327 328 329 330
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
	struct ttm_tt *ttm = bo->ttm;
	struct ttm_mem_reg *old_mem = &bo->mem;
331
	struct ttm_mem_reg old_copy = *old_mem;
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
	void *old_iomap;
	void *new_iomap;
	int ret;
	unsigned long i;
	unsigned long page;
	unsigned long add = 0;
	int dir;

	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
	if (ret)
		return ret;
	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
	if (ret)
		goto out;

T
Thomas Hellstrom 已提交
347 348 349
	/*
	 * Single TTM move. NOP.
	 */
350 351
	if (old_iomap == NULL && new_iomap == NULL)
		goto out2;
T
Thomas Hellstrom 已提交
352 353

	/*
354
	 * Don't move nonexistent data. Clear destination instead.
T
Thomas Hellstrom 已提交
355
	 */
356
	if (old_iomap == NULL &&
T
Thomas Hellstrom 已提交
357 358
	    (ttm == NULL || (ttm->state == tt_unpopulated &&
			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
359
		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
360
		goto out2;
361
	}
362

T
Thomas Hellstrom 已提交
363 364
	/*
	 * TTM might be null for moves within the same region.
365 366
	 */
	if (ttm && ttm->state == tt_unpopulated) {
367
		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
T
Thomas Hellstrom 已提交
368
		if (ret)
369 370 371
			goto out1;
	}

372 373 374 375
	add = 0;
	dir = 1;

	if ((old_mem->mem_type == new_mem->mem_type) &&
376
	    (new_mem->start < old_mem->start + old_mem->size)) {
377 378 379 380 381 382
		dir = -1;
		add = new_mem->num_pages - 1;
	}

	for (i = 0; i < new_mem->num_pages; ++i) {
		page = i * dir + add;
383 384 385 386 387 388 389 390 391 392 393
		if (old_iomap == NULL) {
			pgprot_t prot = ttm_io_prot(old_mem->placement,
						    PAGE_KERNEL);
			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
						   prot);
		} else if (new_iomap == NULL) {
			pgprot_t prot = ttm_io_prot(new_mem->placement,
						    PAGE_KERNEL);
			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
						   prot);
		} else
394
			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
T
Thomas Hellstrom 已提交
395
		if (ret)
396 397 398 399
			goto out1;
	}
	mb();
out2:
400
	old_copy = *old_mem;
401 402 403 404 405 406 407 408 409 410
	*old_mem = *new_mem;
	new_mem->mm_node = NULL;

	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
		ttm_tt_unbind(ttm);
		ttm_tt_destroy(ttm);
		bo->ttm = NULL;
	}

out1:
411
	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
412 413
out:
	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
T
Thomas Hellstrom 已提交
414 415 416 417 418 419

	/*
	 * On error, keep the mm node!
	 */
	if (!ret)
		ttm_bo_mem_put(bo, &old_copy);
420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
	return ret;
}
EXPORT_SYMBOL(ttm_bo_move_memcpy);

static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
{
	kfree(bo);
}

/**
 * ttm_buffer_object_transfer
 *
 * @bo: A pointer to a struct ttm_buffer_object.
 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
 * holding the data of @bo with the old placement.
 *
 * This is a utility function that may be called after an accelerated move
 * has been scheduled. A new buffer object is created as a placeholder for
 * the old data while it's being copied. When that buffer object is idle,
 * it can be destroyed, releasing the space of the old placement.
 * Returns:
 * !0: Failure.
 */

static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
				      struct ttm_buffer_object **new_obj)
{
	struct ttm_buffer_object *fbo;
448
	int ret;
449

450
	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
451 452 453 454 455 456 457 458 459 460 461 462 463
	if (!fbo)
		return -ENOMEM;

	*fbo = *bo;

	/**
	 * Fix up members that we shouldn't copy directly:
	 * TODO: Explicit member copy would probably be better here.
	 */

	INIT_LIST_HEAD(&fbo->ddestroy);
	INIT_LIST_HEAD(&fbo->lru);
	INIT_LIST_HEAD(&fbo->swap);
464
	INIT_LIST_HEAD(&fbo->io_reserve_lru);
465
	drm_vma_node_reset(&fbo->vma_node);
466
	atomic_set(&fbo->cpu_writers, 0);
467 468 469 470

	kref_init(&fbo->list_kref);
	kref_init(&fbo->kref);
	fbo->destroy = &ttm_transfered_destroy;
471
	fbo->acc_size = 0;
472 473 474 475
	fbo->resv = &fbo->ttm_resv;
	reservation_object_init(fbo->resv);
	ret = ww_mutex_trylock(&fbo->resv->lock);
	WARN_ON(!ret);
476 477 478 479 480 481 482

	*new_obj = fbo;
	return 0;
}

pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
{
483 484 485 486
	/* Cached mappings need no adjustment */
	if (caching_flags & TTM_PL_FLAG_CACHED)
		return tmp;

487 488 489 490 491 492
#if defined(__i386__) || defined(__x86_64__)
	if (caching_flags & TTM_PL_FLAG_WC)
		tmp = pgprot_writecombine(tmp);
	else if (boot_cpu_data.x86 > 3)
		tmp = pgprot_noncached(tmp);
#endif
493 494
#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
    defined(__powerpc__)
495 496 497 498 499
	if (caching_flags & TTM_PL_FLAG_WC)
		tmp = pgprot_writecombine(tmp);
	else
		tmp = pgprot_noncached(tmp);
#endif
500
#if defined(__sparc__) || defined(__mips__)
501
	tmp = pgprot_noncached(tmp);
502 503 504
#endif
	return tmp;
}
505
EXPORT_SYMBOL(ttm_io_prot);
506 507

static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
508 509
			  unsigned long offset,
			  unsigned long size,
510 511 512 513
			  struct ttm_bo_kmap_obj *map)
{
	struct ttm_mem_reg *mem = &bo->mem;

514
	if (bo->mem.bus.addr) {
515
		map->bo_kmap_type = ttm_bo_map_premapped;
516
		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
517 518 519
	} else {
		map->bo_kmap_type = ttm_bo_map_iomap;
		if (mem->placement & TTM_PL_FLAG_WC)
520 521
			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
						  size);
522
		else
523 524
			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
						       size);
525 526 527 528 529 530 531 532 533 534 535
	}
	return (!map->virtual) ? -ENOMEM : 0;
}

static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
			   unsigned long start_page,
			   unsigned long num_pages,
			   struct ttm_bo_kmap_obj *map)
{
	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
	struct ttm_tt *ttm = bo->ttm;
536
	int ret;
537 538

	BUG_ON(!ttm);
539 540 541 542 543 544 545

	if (ttm->state == tt_unpopulated) {
		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
		if (ret)
			return ret;
	}

546 547 548 549 550 551 552
	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
		/*
		 * We're mapping a single page, and the desired
		 * page protection is consistent with the bo.
		 */

		map->bo_kmap_type = ttm_bo_map_kmap;
553
		map->page = ttm->pages[start_page];
554 555 556 557
		map->virtual = kmap(map->page);
	} else {
		/*
		 * We need to use vmap to get the desired page protection
558
		 * or to make the buffer object look contiguous.
559
		 */
560
		prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
561 562 563 564 565 566 567 568 569 570 571
		map->bo_kmap_type = ttm_bo_map_vmap;
		map->virtual = vmap(ttm->pages + start_page, num_pages,
				    0, prot);
	}
	return (!map->virtual) ? -ENOMEM : 0;
}

int ttm_bo_kmap(struct ttm_buffer_object *bo,
		unsigned long start_page, unsigned long num_pages,
		struct ttm_bo_kmap_obj *map)
{
572 573
	struct ttm_mem_type_manager *man =
		&bo->bdev->man[bo->mem.mem_type];
574
	unsigned long offset, size;
575 576 577 578
	int ret;

	BUG_ON(!list_empty(&bo->swap));
	map->virtual = NULL;
579
	map->bo = bo;
580 581 582 583 584
	if (num_pages > bo->num_pages)
		return -EINVAL;
	if (start_page > bo->num_pages)
		return -EINVAL;
#if 0
D
Daniel Vetter 已提交
585
	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
586 587
		return -EPERM;
#endif
588
	(void) ttm_mem_io_lock(man, false);
589
	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
590
	ttm_mem_io_unlock(man);
591 592
	if (ret)
		return ret;
593
	if (!bo->mem.bus.is_iomem) {
594 595
		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
	} else {
596 597 598
		offset = start_page << PAGE_SHIFT;
		size = num_pages << PAGE_SHIFT;
		return ttm_bo_ioremap(bo, offset, size, map);
599 600 601 602 603 604
	}
}
EXPORT_SYMBOL(ttm_bo_kmap);

void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{
605 606 607 608
	struct ttm_buffer_object *bo = map->bo;
	struct ttm_mem_type_manager *man =
		&bo->bdev->man[bo->mem.mem_type];

609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
	if (!map->virtual)
		return;
	switch (map->bo_kmap_type) {
	case ttm_bo_map_iomap:
		iounmap(map->virtual);
		break;
	case ttm_bo_map_vmap:
		vunmap(map->virtual);
		break;
	case ttm_bo_map_kmap:
		kunmap(map->page);
		break;
	case ttm_bo_map_premapped:
		break;
	default:
		BUG();
	}
626 627 628
	(void) ttm_mem_io_lock(man, false);
	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
	ttm_mem_io_unlock(man);
629 630 631 632 633 634
	map->virtual = NULL;
	map->page = NULL;
}
EXPORT_SYMBOL(ttm_bo_kunmap);

int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
635
			      struct fence *fence,
636
			      bool evict,
637
			      bool no_wait_gpu,
638 639 640 641 642 643 644 645
			      struct ttm_mem_reg *new_mem)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
	struct ttm_mem_reg *old_mem = &bo->mem;
	int ret;
	struct ttm_buffer_object *ghost_obj;

646
	reservation_object_add_excl_fence(bo->resv, fence);
647 648 649 650 651 652 653 654 655 656 657
	if (evict) {
		ret = ttm_bo_wait(bo, false, false, false);
		if (ret)
			return ret;

		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
		    (bo->ttm != NULL)) {
			ttm_tt_unbind(bo->ttm);
			ttm_tt_destroy(bo->ttm);
			bo->ttm = NULL;
		}
658
		ttm_bo_free_old_node(bo);
659 660 661 662 663 664 665 666 667 668 669
	} else {
		/**
		 * This should help pipeline ordinary buffer moves.
		 *
		 * Hang old buffer memory on a new buffer object,
		 * and leave it to be released when the GPU
		 * operation has completed.
		 */

		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);

670
		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
671 672 673
		if (ret)
			return ret;

674 675
		reservation_object_add_excl_fence(ghost_obj->resv, fence);

676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
		/**
		 * If we're not moving to fixed memory, the TTM object
		 * needs to stay alive. Otherwhise hang it on the ghost
		 * bo to be unbound and destroyed.
		 */

		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
			ghost_obj->ttm = NULL;
		else
			bo->ttm = NULL;

		ttm_bo_unreserve(ghost_obj);
		ttm_bo_unref(&ghost_obj);
	}

	*old_mem = *new_mem;
	new_mem->mm_node = NULL;
693

694 695 696
	return 0;
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);