ttm_bo_util.c 17.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/**************************************************************************
 *
 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 */

31 32
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
33
#include <drm/drm_vma_manager.h>
34 35 36
#include <linux/io.h>
#include <linux/highmem.h>
#include <linux/wait.h>
37
#include <linux/slab.h>
38 39 40 41 42
#include <linux/vmalloc.h>
#include <linux/module.h>

void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
{
43
	ttm_bo_mem_put(bo, &bo->mem);
44 45 46
}

int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
47
		    bool evict,
48
		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
{
	struct ttm_tt *ttm = bo->ttm;
	struct ttm_mem_reg *old_mem = &bo->mem;
	int ret;

	if (old_mem->mem_type != TTM_PL_SYSTEM) {
		ttm_tt_unbind(ttm);
		ttm_bo_free_old_node(bo);
		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
				TTM_PL_MASK_MEM);
		old_mem->mem_type = TTM_PL_SYSTEM;
	}

	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
	if (unlikely(ret != 0))
		return ret;

	if (new_mem->mem_type != TTM_PL_SYSTEM) {
		ret = ttm_tt_bind(ttm, new_mem);
		if (unlikely(ret != 0))
			return ret;
	}

	*old_mem = *new_mem;
	new_mem->mm_node = NULL;
74

75 76 77 78
	return 0;
}
EXPORT_SYMBOL(ttm_bo_move_ttm);

79
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
80
{
81 82 83 84 85 86 87 88 89
	if (likely(man->io_reserve_fastpath))
		return 0;

	if (interruptible)
		return mutex_lock_interruptible(&man->io_reserve_mutex);

	mutex_lock(&man->io_reserve_mutex);
	return 0;
}
90
EXPORT_SYMBOL(ttm_mem_io_lock);
91

92 93 94 95 96 97 98
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
{
	if (likely(man->io_reserve_fastpath))
		return;

	mutex_unlock(&man->io_reserve_mutex);
}
99
EXPORT_SYMBOL(ttm_mem_io_unlock);
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116

static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
{
	struct ttm_buffer_object *bo;

	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
		return -EAGAIN;

	bo = list_first_entry(&man->io_reserve_lru,
			      struct ttm_buffer_object,
			      io_reserve_lru);
	list_del_init(&bo->io_reserve_lru);
	ttm_bo_unmap_virtual_locked(bo);

	return 0;
}

117 118 119

int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
		       struct ttm_mem_reg *mem)
120 121 122 123 124 125 126 127 128 129 130 131
{
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
	int ret = 0;

	if (!bdev->driver->io_mem_reserve)
		return 0;
	if (likely(man->io_reserve_fastpath))
		return bdev->driver->io_mem_reserve(bdev, mem);

	if (bdev->driver->io_mem_reserve &&
	    mem->bus.io_reserved_count++ == 0) {
retry:
132
		ret = bdev->driver->io_mem_reserve(bdev, mem);
133 134 135 136 137 138 139 140
		if (ret == -EAGAIN) {
			ret = ttm_mem_io_evict(man);
			if (ret == 0)
				goto retry;
		}
	}
	return ret;
}
141
EXPORT_SYMBOL(ttm_mem_io_reserve);
142

143 144
void ttm_mem_io_free(struct ttm_bo_device *bdev,
		     struct ttm_mem_reg *mem)
145 146 147 148 149 150 151 152 153 154 155 156
{
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];

	if (likely(man->io_reserve_fastpath))
		return;

	if (bdev->driver->io_mem_reserve &&
	    --mem->bus.io_reserved_count == 0 &&
	    bdev->driver->io_mem_free)
		bdev->driver->io_mem_free(bdev, mem);

}
157
EXPORT_SYMBOL(ttm_mem_io_free);
158 159 160 161 162 163 164 165 166 167 168

int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{
	struct ttm_mem_reg *mem = &bo->mem;
	int ret;

	if (!mem->bus.io_reserved_vm) {
		struct ttm_mem_type_manager *man =
			&bo->bdev->man[mem->mem_type];

		ret = ttm_mem_io_reserve(bo->bdev, mem);
169 170
		if (unlikely(ret != 0))
			return ret;
171 172 173 174
		mem->bus.io_reserved_vm = true;
		if (man->use_io_reserve_lru)
			list_add_tail(&bo->io_reserve_lru,
				      &man->io_reserve_lru);
175 176 177 178
	}
	return 0;
}

179
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
180
{
181 182 183 184 185 186
	struct ttm_mem_reg *mem = &bo->mem;

	if (mem->bus.io_reserved_vm) {
		mem->bus.io_reserved_vm = false;
		list_del_init(&bo->io_reserve_lru);
		ttm_mem_io_free(bo->bdev, mem);
187 188 189
	}
}

190
static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
191 192
			void **virtual)
{
193
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
194 195 196 197
	int ret;
	void *addr;

	*virtual = NULL;
198
	(void) ttm_mem_io_lock(man, false);
199
	ret = ttm_mem_io_reserve(bdev, mem);
200
	ttm_mem_io_unlock(man);
201
	if (ret || !mem->bus.is_iomem)
202 203
		return ret;

204 205 206
	if (mem->bus.addr) {
		addr = mem->bus.addr;
	} else {
207
		if (mem->placement & TTM_PL_FLAG_WC)
208
			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
209
		else
210 211
			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
		if (!addr) {
212
			(void) ttm_mem_io_lock(man, false);
213
			ttm_mem_io_free(bdev, mem);
214
			ttm_mem_io_unlock(man);
215
			return -ENOMEM;
216
		}
217 218 219 220 221
	}
	*virtual = addr;
	return 0;
}

222
static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
223 224 225 226 227 228
			 void *virtual)
{
	struct ttm_mem_type_manager *man;

	man = &bdev->man[mem->mem_type];

229
	if (virtual && mem->bus.addr == NULL)
230
		iounmap(virtual);
231
	(void) ttm_mem_io_lock(man, false);
232
	ttm_mem_io_free(bdev, mem);
233
	ttm_mem_io_unlock(man);
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
}

static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
{
	uint32_t *dstP =
	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
	uint32_t *srcP =
	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));

	int i;
	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
		iowrite32(ioread32(srcP++), dstP++);
	return 0;
}

static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
250 251
				unsigned long page,
				pgprot_t prot)
252
{
253
	struct page *d = ttm->pages[page];
254 255 256 257 258 259
	void *dst;

	if (!d)
		return -ENOMEM;

	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
260 261

#ifdef CONFIG_X86
P
Peter Zijlstra 已提交
262
	dst = kmap_atomic_prot(d, prot);
263
#else
264
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
265 266 267 268
		dst = vmap(&d, 1, 0, prot);
	else
		dst = kmap(d);
#endif
269 270 271 272
	if (!dst)
		return -ENOMEM;

	memcpy_fromio(dst, src, PAGE_SIZE);
273 274

#ifdef CONFIG_X86
P
Peter Zijlstra 已提交
275
	kunmap_atomic(dst);
276
#else
277
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
278 279 280 281 282
		vunmap(dst);
	else
		kunmap(d);
#endif

283 284 285 286
	return 0;
}

static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
287 288
				unsigned long page,
				pgprot_t prot)
289
{
290
	struct page *s = ttm->pages[page];
291 292 293 294 295 296
	void *src;

	if (!s)
		return -ENOMEM;

	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
297
#ifdef CONFIG_X86
P
Peter Zijlstra 已提交
298
	src = kmap_atomic_prot(s, prot);
299
#else
300
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
301 302 303 304
		src = vmap(&s, 1, 0, prot);
	else
		src = kmap(s);
#endif
305 306 307 308
	if (!src)
		return -ENOMEM;

	memcpy_toio(dst, src, PAGE_SIZE);
309 310

#ifdef CONFIG_X86
P
Peter Zijlstra 已提交
311
	kunmap_atomic(src);
312
#else
313
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
314 315 316 317 318
		vunmap(src);
	else
		kunmap(s);
#endif

319 320 321 322
	return 0;
}

int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
323
		       bool evict, bool no_wait_gpu,
324
		       struct ttm_mem_reg *new_mem)
325 326 327 328 329
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
	struct ttm_tt *ttm = bo->ttm;
	struct ttm_mem_reg *old_mem = &bo->mem;
330
	struct ttm_mem_reg old_copy = *old_mem;
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
	void *old_iomap;
	void *new_iomap;
	int ret;
	unsigned long i;
	unsigned long page;
	unsigned long add = 0;
	int dir;

	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
	if (ret)
		return ret;
	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
	if (ret)
		goto out;

T
Thomas Hellstrom 已提交
346 347 348
	/*
	 * Single TTM move. NOP.
	 */
349 350
	if (old_iomap == NULL && new_iomap == NULL)
		goto out2;
T
Thomas Hellstrom 已提交
351 352

	/*
353
	 * Don't move nonexistent data. Clear destination instead.
T
Thomas Hellstrom 已提交
354
	 */
355
	if (old_iomap == NULL &&
T
Thomas Hellstrom 已提交
356 357
	    (ttm == NULL || (ttm->state == tt_unpopulated &&
			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
358
		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
359
		goto out2;
360
	}
361

T
Thomas Hellstrom 已提交
362 363
	/*
	 * TTM might be null for moves within the same region.
364 365
	 */
	if (ttm && ttm->state == tt_unpopulated) {
366
		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
T
Thomas Hellstrom 已提交
367
		if (ret)
368 369 370
			goto out1;
	}

371 372 373 374
	add = 0;
	dir = 1;

	if ((old_mem->mem_type == new_mem->mem_type) &&
375
	    (new_mem->start < old_mem->start + old_mem->size)) {
376 377 378 379 380 381
		dir = -1;
		add = new_mem->num_pages - 1;
	}

	for (i = 0; i < new_mem->num_pages; ++i) {
		page = i * dir + add;
382 383 384 385 386 387 388 389 390 391 392
		if (old_iomap == NULL) {
			pgprot_t prot = ttm_io_prot(old_mem->placement,
						    PAGE_KERNEL);
			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
						   prot);
		} else if (new_iomap == NULL) {
			pgprot_t prot = ttm_io_prot(new_mem->placement,
						    PAGE_KERNEL);
			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
						   prot);
		} else
393
			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
T
Thomas Hellstrom 已提交
394
		if (ret)
395 396 397 398
			goto out1;
	}
	mb();
out2:
399
	old_copy = *old_mem;
400 401 402 403 404 405 406 407 408 409
	*old_mem = *new_mem;
	new_mem->mm_node = NULL;

	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
		ttm_tt_unbind(ttm);
		ttm_tt_destroy(ttm);
		bo->ttm = NULL;
	}

out1:
410
	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
411 412
out:
	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
T
Thomas Hellstrom 已提交
413 414 415 416 417 418

	/*
	 * On error, keep the mm node!
	 */
	if (!ret)
		ttm_bo_mem_put(bo, &old_copy);
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
	return ret;
}
EXPORT_SYMBOL(ttm_bo_move_memcpy);

static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
{
	kfree(bo);
}

/**
 * ttm_buffer_object_transfer
 *
 * @bo: A pointer to a struct ttm_buffer_object.
 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
 * holding the data of @bo with the old placement.
 *
 * This is a utility function that may be called after an accelerated move
 * has been scheduled. A new buffer object is created as a placeholder for
 * the old data while it's being copied. When that buffer object is idle,
 * it can be destroyed, releasing the space of the old placement.
 * Returns:
 * !0: Failure.
 */

static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
				      struct ttm_buffer_object **new_obj)
{
	struct ttm_buffer_object *fbo;
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_bo_driver *driver = bdev->driver;
449
	int ret;
450

451
	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
452 453 454 455 456 457 458 459 460 461 462 463 464
	if (!fbo)
		return -ENOMEM;

	*fbo = *bo;

	/**
	 * Fix up members that we shouldn't copy directly:
	 * TODO: Explicit member copy would probably be better here.
	 */

	INIT_LIST_HEAD(&fbo->ddestroy);
	INIT_LIST_HEAD(&fbo->lru);
	INIT_LIST_HEAD(&fbo->swap);
465
	INIT_LIST_HEAD(&fbo->io_reserve_lru);
466
	drm_vma_node_reset(&fbo->vma_node);
467
	atomic_set(&fbo->cpu_writers, 0);
468

469 470 471 472
	if (bo->sync_obj)
		fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
	else
		fbo->sync_obj = NULL;
473 474 475
	kref_init(&fbo->list_kref);
	kref_init(&fbo->kref);
	fbo->destroy = &ttm_transfered_destroy;
476
	fbo->acc_size = 0;
477 478 479 480
	fbo->resv = &fbo->ttm_resv;
	reservation_object_init(fbo->resv);
	ret = ww_mutex_trylock(&fbo->resv->lock);
	WARN_ON(!ret);
481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500

	*new_obj = fbo;
	return 0;
}

pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
{
#if defined(__i386__) || defined(__x86_64__)
	if (caching_flags & TTM_PL_FLAG_WC)
		tmp = pgprot_writecombine(tmp);
	else if (boot_cpu_data.x86 > 3)
		tmp = pgprot_noncached(tmp);

#elif defined(__powerpc__)
	if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
		pgprot_val(tmp) |= _PAGE_NO_CACHE;
		if (caching_flags & TTM_PL_FLAG_UNCACHED)
			pgprot_val(tmp) |= _PAGE_GUARDED;
	}
#endif
501
#if defined(__ia64__) || defined(__arm__)
502 503 504 505 506
	if (caching_flags & TTM_PL_FLAG_WC)
		tmp = pgprot_writecombine(tmp);
	else
		tmp = pgprot_noncached(tmp);
#endif
507
#if defined(__sparc__) || defined(__mips__)
508 509 510 511 512
	if (!(caching_flags & TTM_PL_FLAG_CACHED))
		tmp = pgprot_noncached(tmp);
#endif
	return tmp;
}
513
EXPORT_SYMBOL(ttm_io_prot);
514 515

static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
516 517
			  unsigned long offset,
			  unsigned long size,
518 519 520 521
			  struct ttm_bo_kmap_obj *map)
{
	struct ttm_mem_reg *mem = &bo->mem;

522
	if (bo->mem.bus.addr) {
523
		map->bo_kmap_type = ttm_bo_map_premapped;
524
		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
525 526 527
	} else {
		map->bo_kmap_type = ttm_bo_map_iomap;
		if (mem->placement & TTM_PL_FLAG_WC)
528 529
			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
						  size);
530
		else
531 532
			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
						       size);
533 534 535 536 537 538 539 540 541 542 543
	}
	return (!map->virtual) ? -ENOMEM : 0;
}

static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
			   unsigned long start_page,
			   unsigned long num_pages,
			   struct ttm_bo_kmap_obj *map)
{
	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
	struct ttm_tt *ttm = bo->ttm;
544
	int ret;
545 546

	BUG_ON(!ttm);
547 548 549 550 551 552 553

	if (ttm->state == tt_unpopulated) {
		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
		if (ret)
			return ret;
	}

554 555 556 557 558 559 560
	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
		/*
		 * We're mapping a single page, and the desired
		 * page protection is consistent with the bo.
		 */

		map->bo_kmap_type = ttm_bo_map_kmap;
561
		map->page = ttm->pages[start_page];
562 563 564 565
		map->virtual = kmap(map->page);
	} else {
		/*
		 * We need to use vmap to get the desired page protection
566
		 * or to make the buffer object look contiguous.
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
		 */
		prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
			PAGE_KERNEL :
			ttm_io_prot(mem->placement, PAGE_KERNEL);
		map->bo_kmap_type = ttm_bo_map_vmap;
		map->virtual = vmap(ttm->pages + start_page, num_pages,
				    0, prot);
	}
	return (!map->virtual) ? -ENOMEM : 0;
}

int ttm_bo_kmap(struct ttm_buffer_object *bo,
		unsigned long start_page, unsigned long num_pages,
		struct ttm_bo_kmap_obj *map)
{
582 583
	struct ttm_mem_type_manager *man =
		&bo->bdev->man[bo->mem.mem_type];
584
	unsigned long offset, size;
585 586 587 588
	int ret;

	BUG_ON(!list_empty(&bo->swap));
	map->virtual = NULL;
589
	map->bo = bo;
590 591 592 593 594
	if (num_pages > bo->num_pages)
		return -EINVAL;
	if (start_page > bo->num_pages)
		return -EINVAL;
#if 0
D
Daniel Vetter 已提交
595
	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
596 597
		return -EPERM;
#endif
598
	(void) ttm_mem_io_lock(man, false);
599
	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
600
	ttm_mem_io_unlock(man);
601 602
	if (ret)
		return ret;
603
	if (!bo->mem.bus.is_iomem) {
604 605
		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
	} else {
606 607 608
		offset = start_page << PAGE_SHIFT;
		size = num_pages << PAGE_SHIFT;
		return ttm_bo_ioremap(bo, offset, size, map);
609 610 611 612 613 614
	}
}
EXPORT_SYMBOL(ttm_bo_kmap);

void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{
615 616 617 618
	struct ttm_buffer_object *bo = map->bo;
	struct ttm_mem_type_manager *man =
		&bo->bdev->man[bo->mem.mem_type];

619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
	if (!map->virtual)
		return;
	switch (map->bo_kmap_type) {
	case ttm_bo_map_iomap:
		iounmap(map->virtual);
		break;
	case ttm_bo_map_vmap:
		vunmap(map->virtual);
		break;
	case ttm_bo_map_kmap:
		kunmap(map->page);
		break;
	case ttm_bo_map_premapped:
		break;
	default:
		BUG();
	}
636 637 638
	(void) ttm_mem_io_lock(man, false);
	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
	ttm_mem_io_unlock(man);
639 640 641 642 643 644 645
	map->virtual = NULL;
	map->page = NULL;
}
EXPORT_SYMBOL(ttm_bo_kunmap);

int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
			      void *sync_obj,
646
			      bool evict,
647
			      bool no_wait_gpu,
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
			      struct ttm_mem_reg *new_mem)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_bo_driver *driver = bdev->driver;
	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
	struct ttm_mem_reg *old_mem = &bo->mem;
	int ret;
	struct ttm_buffer_object *ghost_obj;
	void *tmp_obj = NULL;

	if (bo->sync_obj) {
		tmp_obj = bo->sync_obj;
		bo->sync_obj = NULL;
	}
	bo->sync_obj = driver->sync_obj_ref(sync_obj);
	if (evict) {
		ret = ttm_bo_wait(bo, false, false, false);
665 666
		if (tmp_obj)
			driver->sync_obj_unref(&tmp_obj);
667 668 669 670 671 672 673 674 675
		if (ret)
			return ret;

		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
		    (bo->ttm != NULL)) {
			ttm_tt_unbind(bo->ttm);
			ttm_tt_destroy(bo->ttm);
			bo->ttm = NULL;
		}
676
		ttm_bo_free_old_node(bo);
677 678 679 680 681 682 683 684 685 686
	} else {
		/**
		 * This should help pipeline ordinary buffer moves.
		 *
		 * Hang old buffer memory on a new buffer object,
		 * and leave it to be released when the GPU
		 * operation has completed.
		 */

		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
687 688
		if (tmp_obj)
			driver->sync_obj_unref(&tmp_obj);
689

690
		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710
		if (ret)
			return ret;

		/**
		 * If we're not moving to fixed memory, the TTM object
		 * needs to stay alive. Otherwhise hang it on the ghost
		 * bo to be unbound and destroyed.
		 */

		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
			ghost_obj->ttm = NULL;
		else
			bo->ttm = NULL;

		ttm_bo_unreserve(ghost_obj);
		ttm_bo_unref(&ghost_obj);
	}

	*old_mem = *new_mem;
	new_mem->mm_node = NULL;
711

712 713 714
	return 0;
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);