ttm_bo_util.c 17.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/**************************************************************************
 *
 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 */

31 32
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
33
#include <drm/drm_vma_manager.h>
34 35 36
#include <linux/io.h>
#include <linux/highmem.h>
#include <linux/wait.h>
37
#include <linux/slab.h>
38 39 40 41 42
#include <linux/vmalloc.h>
#include <linux/module.h>

void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
{
43
	ttm_bo_mem_put(bo, &bo->mem);
44 45 46
}

int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
47
		    bool evict,
48
		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
{
	struct ttm_tt *ttm = bo->ttm;
	struct ttm_mem_reg *old_mem = &bo->mem;
	int ret;

	if (old_mem->mem_type != TTM_PL_SYSTEM) {
		ttm_tt_unbind(ttm);
		ttm_bo_free_old_node(bo);
		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
				TTM_PL_MASK_MEM);
		old_mem->mem_type = TTM_PL_SYSTEM;
	}

	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
	if (unlikely(ret != 0))
		return ret;

	if (new_mem->mem_type != TTM_PL_SYSTEM) {
		ret = ttm_tt_bind(ttm, new_mem);
		if (unlikely(ret != 0))
			return ret;
	}

	*old_mem = *new_mem;
	new_mem->mm_node = NULL;
74

75 76 77 78
	return 0;
}
EXPORT_SYMBOL(ttm_bo_move_ttm);

79
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
80
{
81 82 83 84 85 86 87 88 89
	if (likely(man->io_reserve_fastpath))
		return 0;

	if (interruptible)
		return mutex_lock_interruptible(&man->io_reserve_mutex);

	mutex_lock(&man->io_reserve_mutex);
	return 0;
}
90
EXPORT_SYMBOL(ttm_mem_io_lock);
91

92 93 94 95 96 97 98
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
{
	if (likely(man->io_reserve_fastpath))
		return;

	mutex_unlock(&man->io_reserve_mutex);
}
99
EXPORT_SYMBOL(ttm_mem_io_unlock);
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116

static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
{
	struct ttm_buffer_object *bo;

	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
		return -EAGAIN;

	bo = list_first_entry(&man->io_reserve_lru,
			      struct ttm_buffer_object,
			      io_reserve_lru);
	list_del_init(&bo->io_reserve_lru);
	ttm_bo_unmap_virtual_locked(bo);

	return 0;
}

117 118 119

int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
		       struct ttm_mem_reg *mem)
120 121 122 123 124 125 126 127 128 129 130 131
{
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
	int ret = 0;

	if (!bdev->driver->io_mem_reserve)
		return 0;
	if (likely(man->io_reserve_fastpath))
		return bdev->driver->io_mem_reserve(bdev, mem);

	if (bdev->driver->io_mem_reserve &&
	    mem->bus.io_reserved_count++ == 0) {
retry:
132
		ret = bdev->driver->io_mem_reserve(bdev, mem);
133 134 135 136 137 138 139 140
		if (ret == -EAGAIN) {
			ret = ttm_mem_io_evict(man);
			if (ret == 0)
				goto retry;
		}
	}
	return ret;
}
141
EXPORT_SYMBOL(ttm_mem_io_reserve);
142

143 144
void ttm_mem_io_free(struct ttm_bo_device *bdev,
		     struct ttm_mem_reg *mem)
145 146 147 148 149 150 151 152 153 154 155 156
{
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];

	if (likely(man->io_reserve_fastpath))
		return;

	if (bdev->driver->io_mem_reserve &&
	    --mem->bus.io_reserved_count == 0 &&
	    bdev->driver->io_mem_free)
		bdev->driver->io_mem_free(bdev, mem);

}
157
EXPORT_SYMBOL(ttm_mem_io_free);
158 159 160 161 162 163 164 165 166 167 168

int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{
	struct ttm_mem_reg *mem = &bo->mem;
	int ret;

	if (!mem->bus.io_reserved_vm) {
		struct ttm_mem_type_manager *man =
			&bo->bdev->man[mem->mem_type];

		ret = ttm_mem_io_reserve(bo->bdev, mem);
169 170
		if (unlikely(ret != 0))
			return ret;
171 172 173 174
		mem->bus.io_reserved_vm = true;
		if (man->use_io_reserve_lru)
			list_add_tail(&bo->io_reserve_lru,
				      &man->io_reserve_lru);
175 176 177 178
	}
	return 0;
}

179
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
180
{
181 182 183 184 185 186
	struct ttm_mem_reg *mem = &bo->mem;

	if (mem->bus.io_reserved_vm) {
		mem->bus.io_reserved_vm = false;
		list_del_init(&bo->io_reserve_lru);
		ttm_mem_io_free(bo->bdev, mem);
187 188 189
	}
}

190 191 192
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
			void **virtual)
{
193
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
194 195 196 197
	int ret;
	void *addr;

	*virtual = NULL;
198
	(void) ttm_mem_io_lock(man, false);
199
	ret = ttm_mem_io_reserve(bdev, mem);
200
	ttm_mem_io_unlock(man);
201
	if (ret || !mem->bus.is_iomem)
202 203
		return ret;

204 205 206
	if (mem->bus.addr) {
		addr = mem->bus.addr;
	} else {
207
		if (mem->placement & TTM_PL_FLAG_WC)
208
			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
209
		else
210 211
			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
		if (!addr) {
212
			(void) ttm_mem_io_lock(man, false);
213
			ttm_mem_io_free(bdev, mem);
214
			ttm_mem_io_unlock(man);
215
			return -ENOMEM;
216
		}
217 218 219 220 221 222 223 224 225 226 227 228
	}
	*virtual = addr;
	return 0;
}

void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
			 void *virtual)
{
	struct ttm_mem_type_manager *man;

	man = &bdev->man[mem->mem_type];

229
	if (virtual && mem->bus.addr == NULL)
230
		iounmap(virtual);
231
	(void) ttm_mem_io_lock(man, false);
232
	ttm_mem_io_free(bdev, mem);
233
	ttm_mem_io_unlock(man);
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
}

static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
{
	uint32_t *dstP =
	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
	uint32_t *srcP =
	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));

	int i;
	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
		iowrite32(ioread32(srcP++), dstP++);
	return 0;
}

static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
250 251
				unsigned long page,
				pgprot_t prot)
252
{
253
	struct page *d = ttm->pages[page];
254 255 256 257 258 259
	void *dst;

	if (!d)
		return -ENOMEM;

	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
260 261

#ifdef CONFIG_X86
P
Peter Zijlstra 已提交
262
	dst = kmap_atomic_prot(d, prot);
263
#else
264
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
265 266 267 268
		dst = vmap(&d, 1, 0, prot);
	else
		dst = kmap(d);
#endif
269 270 271 272
	if (!dst)
		return -ENOMEM;

	memcpy_fromio(dst, src, PAGE_SIZE);
273 274

#ifdef CONFIG_X86
P
Peter Zijlstra 已提交
275
	kunmap_atomic(dst);
276
#else
277
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
278 279 280 281 282
		vunmap(dst);
	else
		kunmap(d);
#endif

283 284 285 286
	return 0;
}

static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
287 288
				unsigned long page,
				pgprot_t prot)
289
{
290
	struct page *s = ttm->pages[page];
291 292 293 294 295 296
	void *src;

	if (!s)
		return -ENOMEM;

	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
297
#ifdef CONFIG_X86
P
Peter Zijlstra 已提交
298
	src = kmap_atomic_prot(s, prot);
299
#else
300
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
301 302 303 304
		src = vmap(&s, 1, 0, prot);
	else
		src = kmap(s);
#endif
305 306 307 308
	if (!src)
		return -ENOMEM;

	memcpy_toio(dst, src, PAGE_SIZE);
309 310

#ifdef CONFIG_X86
P
Peter Zijlstra 已提交
311
	kunmap_atomic(src);
312
#else
313
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
314 315 316 317 318
		vunmap(src);
	else
		kunmap(s);
#endif

319 320 321 322
	return 0;
}

int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
323
		       bool evict, bool no_wait_gpu,
324
		       struct ttm_mem_reg *new_mem)
325 326 327 328 329
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
	struct ttm_tt *ttm = bo->ttm;
	struct ttm_mem_reg *old_mem = &bo->mem;
330
	struct ttm_mem_reg old_copy = *old_mem;
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
	void *old_iomap;
	void *new_iomap;
	int ret;
	unsigned long i;
	unsigned long page;
	unsigned long add = 0;
	int dir;

	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
	if (ret)
		return ret;
	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
	if (ret)
		goto out;

	if (old_iomap == NULL && new_iomap == NULL)
		goto out2;
	if (old_iomap == NULL && ttm == NULL)
		goto out2;

351 352
	if (ttm->state == tt_unpopulated) {
		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
353 354 355 356
		if (ret) {
			/* if we fail here don't nuke the mm node
			 * as the bo still owns it */
			old_copy.mm_node = NULL;
357
			goto out1;
358
		}
359 360
	}

361 362 363 364
	add = 0;
	dir = 1;

	if ((old_mem->mem_type == new_mem->mem_type) &&
365
	    (new_mem->start < old_mem->start + old_mem->size)) {
366 367 368 369 370 371
		dir = -1;
		add = new_mem->num_pages - 1;
	}

	for (i = 0; i < new_mem->num_pages; ++i) {
		page = i * dir + add;
372 373 374 375 376 377 378 379 380 381 382
		if (old_iomap == NULL) {
			pgprot_t prot = ttm_io_prot(old_mem->placement,
						    PAGE_KERNEL);
			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
						   prot);
		} else if (new_iomap == NULL) {
			pgprot_t prot = ttm_io_prot(new_mem->placement,
						    PAGE_KERNEL);
			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
						   prot);
		} else
383
			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
384 385 386
		if (ret) {
			/* failing here, means keep old copy as-is */
			old_copy.mm_node = NULL;
387
			goto out1;
388
		}
389 390 391
	}
	mb();
out2:
392
	old_copy = *old_mem;
393 394 395 396 397 398 399 400 401 402
	*old_mem = *new_mem;
	new_mem->mm_node = NULL;

	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
		ttm_tt_unbind(ttm);
		ttm_tt_destroy(ttm);
		bo->ttm = NULL;
	}

out1:
403
	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
404 405
out:
	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
406
	ttm_bo_mem_put(bo, &old_copy);
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
	return ret;
}
EXPORT_SYMBOL(ttm_bo_move_memcpy);

static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
{
	kfree(bo);
}

/**
 * ttm_buffer_object_transfer
 *
 * @bo: A pointer to a struct ttm_buffer_object.
 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
 * holding the data of @bo with the old placement.
 *
 * This is a utility function that may be called after an accelerated move
 * has been scheduled. A new buffer object is created as a placeholder for
 * the old data while it's being copied. When that buffer object is idle,
 * it can be destroyed, releasing the space of the old placement.
 * Returns:
 * !0: Failure.
 */

static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
				      struct ttm_buffer_object **new_obj)
{
	struct ttm_buffer_object *fbo;
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_bo_driver *driver = bdev->driver;
437
	int ret;
438

439
	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
440 441 442 443 444 445 446 447 448 449 450 451 452
	if (!fbo)
		return -ENOMEM;

	*fbo = *bo;

	/**
	 * Fix up members that we shouldn't copy directly:
	 * TODO: Explicit member copy would probably be better here.
	 */

	INIT_LIST_HEAD(&fbo->ddestroy);
	INIT_LIST_HEAD(&fbo->lru);
	INIT_LIST_HEAD(&fbo->swap);
453
	INIT_LIST_HEAD(&fbo->io_reserve_lru);
454
	drm_vma_node_reset(&fbo->vma_node);
455
	atomic_set(&fbo->cpu_writers, 0);
456

457 458 459 460 461 462
	spin_lock(&bdev->fence_lock);
	if (bo->sync_obj)
		fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
	else
		fbo->sync_obj = NULL;
	spin_unlock(&bdev->fence_lock);
463 464 465
	kref_init(&fbo->list_kref);
	kref_init(&fbo->kref);
	fbo->destroy = &ttm_transfered_destroy;
466
	fbo->acc_size = 0;
467 468 469 470
	fbo->resv = &fbo->ttm_resv;
	reservation_object_init(fbo->resv);
	ret = ww_mutex_trylock(&fbo->resv->lock);
	WARN_ON(!ret);
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496

	*new_obj = fbo;
	return 0;
}

pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
{
#if defined(__i386__) || defined(__x86_64__)
	if (caching_flags & TTM_PL_FLAG_WC)
		tmp = pgprot_writecombine(tmp);
	else if (boot_cpu_data.x86 > 3)
		tmp = pgprot_noncached(tmp);

#elif defined(__powerpc__)
	if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
		pgprot_val(tmp) |= _PAGE_NO_CACHE;
		if (caching_flags & TTM_PL_FLAG_UNCACHED)
			pgprot_val(tmp) |= _PAGE_GUARDED;
	}
#endif
#if defined(__ia64__)
	if (caching_flags & TTM_PL_FLAG_WC)
		tmp = pgprot_writecombine(tmp);
	else
		tmp = pgprot_noncached(tmp);
#endif
497
#if defined(__sparc__) || defined(__mips__)
498 499 500 501 502
	if (!(caching_flags & TTM_PL_FLAG_CACHED))
		tmp = pgprot_noncached(tmp);
#endif
	return tmp;
}
503
EXPORT_SYMBOL(ttm_io_prot);
504 505

static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
506 507
			  unsigned long offset,
			  unsigned long size,
508 509 510 511
			  struct ttm_bo_kmap_obj *map)
{
	struct ttm_mem_reg *mem = &bo->mem;

512
	if (bo->mem.bus.addr) {
513
		map->bo_kmap_type = ttm_bo_map_premapped;
514
		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
515 516 517
	} else {
		map->bo_kmap_type = ttm_bo_map_iomap;
		if (mem->placement & TTM_PL_FLAG_WC)
518 519
			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
						  size);
520
		else
521 522
			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
						       size);
523 524 525 526 527 528 529 530 531 532 533
	}
	return (!map->virtual) ? -ENOMEM : 0;
}

static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
			   unsigned long start_page,
			   unsigned long num_pages,
			   struct ttm_bo_kmap_obj *map)
{
	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
	struct ttm_tt *ttm = bo->ttm;
534
	int ret;
535 536

	BUG_ON(!ttm);
537 538 539 540 541 542 543

	if (ttm->state == tt_unpopulated) {
		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
		if (ret)
			return ret;
	}

544 545 546 547 548 549 550
	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
		/*
		 * We're mapping a single page, and the desired
		 * page protection is consistent with the bo.
		 */

		map->bo_kmap_type = ttm_bo_map_kmap;
551
		map->page = ttm->pages[start_page];
552 553 554 555
		map->virtual = kmap(map->page);
	} else {
		/*
		 * We need to use vmap to get the desired page protection
556
		 * or to make the buffer object look contiguous.
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
		 */
		prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
			PAGE_KERNEL :
			ttm_io_prot(mem->placement, PAGE_KERNEL);
		map->bo_kmap_type = ttm_bo_map_vmap;
		map->virtual = vmap(ttm->pages + start_page, num_pages,
				    0, prot);
	}
	return (!map->virtual) ? -ENOMEM : 0;
}

int ttm_bo_kmap(struct ttm_buffer_object *bo,
		unsigned long start_page, unsigned long num_pages,
		struct ttm_bo_kmap_obj *map)
{
572 573
	struct ttm_mem_type_manager *man =
		&bo->bdev->man[bo->mem.mem_type];
574
	unsigned long offset, size;
575 576 577 578
	int ret;

	BUG_ON(!list_empty(&bo->swap));
	map->virtual = NULL;
579
	map->bo = bo;
580 581 582 583 584 585 586 587
	if (num_pages > bo->num_pages)
		return -EINVAL;
	if (start_page > bo->num_pages)
		return -EINVAL;
#if 0
	if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
		return -EPERM;
#endif
588
	(void) ttm_mem_io_lock(man, false);
589
	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
590
	ttm_mem_io_unlock(man);
591 592
	if (ret)
		return ret;
593
	if (!bo->mem.bus.is_iomem) {
594 595
		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
	} else {
596 597 598
		offset = start_page << PAGE_SHIFT;
		size = num_pages << PAGE_SHIFT;
		return ttm_bo_ioremap(bo, offset, size, map);
599 600 601 602 603 604
	}
}
EXPORT_SYMBOL(ttm_bo_kmap);

void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{
605 606 607 608
	struct ttm_buffer_object *bo = map->bo;
	struct ttm_mem_type_manager *man =
		&bo->bdev->man[bo->mem.mem_type];

609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
	if (!map->virtual)
		return;
	switch (map->bo_kmap_type) {
	case ttm_bo_map_iomap:
		iounmap(map->virtual);
		break;
	case ttm_bo_map_vmap:
		vunmap(map->virtual);
		break;
	case ttm_bo_map_kmap:
		kunmap(map->page);
		break;
	case ttm_bo_map_premapped:
		break;
	default:
		BUG();
	}
626 627 628
	(void) ttm_mem_io_lock(man, false);
	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
	ttm_mem_io_unlock(man);
629 630 631 632 633 634 635
	map->virtual = NULL;
	map->page = NULL;
}
EXPORT_SYMBOL(ttm_bo_kunmap);

int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
			      void *sync_obj,
636
			      bool evict,
637
			      bool no_wait_gpu,
638 639 640 641 642 643 644 645 646 647
			      struct ttm_mem_reg *new_mem)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_bo_driver *driver = bdev->driver;
	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
	struct ttm_mem_reg *old_mem = &bo->mem;
	int ret;
	struct ttm_buffer_object *ghost_obj;
	void *tmp_obj = NULL;

648
	spin_lock(&bdev->fence_lock);
649 650 651 652 653 654 655
	if (bo->sync_obj) {
		tmp_obj = bo->sync_obj;
		bo->sync_obj = NULL;
	}
	bo->sync_obj = driver->sync_obj_ref(sync_obj);
	if (evict) {
		ret = ttm_bo_wait(bo, false, false, false);
656
		spin_unlock(&bdev->fence_lock);
657 658
		if (tmp_obj)
			driver->sync_obj_unref(&tmp_obj);
659 660 661 662 663 664 665 666 667
		if (ret)
			return ret;

		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
		    (bo->ttm != NULL)) {
			ttm_tt_unbind(bo->ttm);
			ttm_tt_destroy(bo->ttm);
			bo->ttm = NULL;
		}
668
		ttm_bo_free_old_node(bo);
669 670 671 672 673 674 675 676 677 678
	} else {
		/**
		 * This should help pipeline ordinary buffer moves.
		 *
		 * Hang old buffer memory on a new buffer object,
		 * and leave it to be released when the GPU
		 * operation has completed.
		 */

		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
679
		spin_unlock(&bdev->fence_lock);
680 681
		if (tmp_obj)
			driver->sync_obj_unref(&tmp_obj);
682

683
		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
		if (ret)
			return ret;

		/**
		 * If we're not moving to fixed memory, the TTM object
		 * needs to stay alive. Otherwhise hang it on the ghost
		 * bo to be unbound and destroyed.
		 */

		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
			ghost_obj->ttm = NULL;
		else
			bo->ttm = NULL;

		ttm_bo_unreserve(ghost_obj);
		ttm_bo_unref(&ghost_obj);
	}

	*old_mem = *new_mem;
	new_mem->mm_node = NULL;
704

705 706 707
	return 0;
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);