ttm_bo_util.c 14.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
/**************************************************************************
 *
 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 */

#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
#include <linux/io.h>
#include <linux/highmem.h>
#include <linux/wait.h>
36
#include <linux/slab.h>
37 38 39 40 41
#include <linux/vmalloc.h>
#include <linux/module.h>

void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
{
42
	ttm_bo_mem_put(bo, &bo->mem);
43 44 45
}

int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
46 47
		    bool evict, bool no_wait_reserve,
		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
{
	struct ttm_tt *ttm = bo->ttm;
	struct ttm_mem_reg *old_mem = &bo->mem;
	int ret;

	if (old_mem->mem_type != TTM_PL_SYSTEM) {
		ttm_tt_unbind(ttm);
		ttm_bo_free_old_node(bo);
		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
				TTM_PL_MASK_MEM);
		old_mem->mem_type = TTM_PL_SYSTEM;
	}

	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
	if (unlikely(ret != 0))
		return ret;

	if (new_mem->mem_type != TTM_PL_SYSTEM) {
		ret = ttm_tt_bind(ttm, new_mem);
		if (unlikely(ret != 0))
			return ret;
	}

	*old_mem = *new_mem;
	new_mem->mm_node = NULL;
73

74 75 76 77
	return 0;
}
EXPORT_SYMBOL(ttm_bo_move_ttm);

78 79 80 81
int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
	int ret;

82 83 84
	if (!mem->bus.io_reserved) {
		mem->bus.io_reserved = true;
		ret = bdev->driver->io_mem_reserve(bdev, mem);
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
		if (unlikely(ret != 0))
			return ret;
	}
	return 0;
}

void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
	if (bdev->driver->io_mem_reserve) {
		if (mem->bus.io_reserved) {
			mem->bus.io_reserved = false;
			bdev->driver->io_mem_free(bdev, mem);
		}
	}
}

101 102 103 104 105 106 107
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
			void **virtual)
{
	int ret;
	void *addr;

	*virtual = NULL;
108
	ret = ttm_mem_io_reserve(bdev, mem);
109
	if (ret || !mem->bus.is_iomem)
110 111
		return ret;

112 113 114
	if (mem->bus.addr) {
		addr = mem->bus.addr;
	} else {
115
		if (mem->placement & TTM_PL_FLAG_WC)
116
			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
117
		else
118 119 120
			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
		if (!addr) {
			ttm_mem_io_free(bdev, mem);
121
			return -ENOMEM;
122
		}
123 124 125 126 127 128 129 130 131 132 133 134
	}
	*virtual = addr;
	return 0;
}

void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
			 void *virtual)
{
	struct ttm_mem_type_manager *man;

	man = &bdev->man[mem->mem_type];

135
	if (virtual && mem->bus.addr == NULL)
136
		iounmap(virtual);
137
	ttm_mem_io_free(bdev, mem);
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
}

static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
{
	uint32_t *dstP =
	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
	uint32_t *srcP =
	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));

	int i;
	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
		iowrite32(ioread32(srcP++), dstP++);
	return 0;
}

static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
154 155
				unsigned long page,
				pgprot_t prot)
156 157 158 159 160 161 162 163
{
	struct page *d = ttm_tt_get_page(ttm, page);
	void *dst;

	if (!d)
		return -ENOMEM;

	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
164 165

#ifdef CONFIG_X86
P
Peter Zijlstra 已提交
166
	dst = kmap_atomic_prot(d, prot);
167
#else
168
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
169 170 171 172
		dst = vmap(&d, 1, 0, prot);
	else
		dst = kmap(d);
#endif
173 174 175 176
	if (!dst)
		return -ENOMEM;

	memcpy_fromio(dst, src, PAGE_SIZE);
177 178

#ifdef CONFIG_X86
P
Peter Zijlstra 已提交
179
	kunmap_atomic(dst);
180
#else
181
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
182 183 184 185 186
		vunmap(dst);
	else
		kunmap(d);
#endif

187 188 189 190
	return 0;
}

static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
191 192
				unsigned long page,
				pgprot_t prot)
193 194 195 196 197 198 199 200
{
	struct page *s = ttm_tt_get_page(ttm, page);
	void *src;

	if (!s)
		return -ENOMEM;

	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
201
#ifdef CONFIG_X86
P
Peter Zijlstra 已提交
202
	src = kmap_atomic_prot(s, prot);
203
#else
204
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
205 206 207 208
		src = vmap(&s, 1, 0, prot);
	else
		src = kmap(s);
#endif
209 210 211 212
	if (!src)
		return -ENOMEM;

	memcpy_toio(dst, src, PAGE_SIZE);
213 214

#ifdef CONFIG_X86
P
Peter Zijlstra 已提交
215
	kunmap_atomic(src);
216
#else
217
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
218 219 220 221 222
		vunmap(src);
	else
		kunmap(s);
#endif

223 224 225 226
	return 0;
}

int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
227 228
		       bool evict, bool no_wait_reserve, bool no_wait_gpu,
		       struct ttm_mem_reg *new_mem)
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
	struct ttm_tt *ttm = bo->ttm;
	struct ttm_mem_reg *old_mem = &bo->mem;
	struct ttm_mem_reg old_copy = *old_mem;
	void *old_iomap;
	void *new_iomap;
	int ret;
	unsigned long i;
	unsigned long page;
	unsigned long add = 0;
	int dir;

	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
	if (ret)
		return ret;
	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
	if (ret)
		goto out;

	if (old_iomap == NULL && new_iomap == NULL)
		goto out2;
	if (old_iomap == NULL && ttm == NULL)
		goto out2;

	add = 0;
	dir = 1;

	if ((old_mem->mem_type == new_mem->mem_type) &&
259
	    (new_mem->start < old_mem->start + old_mem->size)) {
260 261 262 263 264 265
		dir = -1;
		add = new_mem->num_pages - 1;
	}

	for (i = 0; i < new_mem->num_pages; ++i) {
		page = i * dir + add;
266 267 268 269 270 271 272 273 274 275 276
		if (old_iomap == NULL) {
			pgprot_t prot = ttm_io_prot(old_mem->placement,
						    PAGE_KERNEL);
			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
						   prot);
		} else if (new_iomap == NULL) {
			pgprot_t prot = ttm_io_prot(new_mem->placement,
						    PAGE_KERNEL);
			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
						   prot);
		} else
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
		if (ret)
			goto out1;
	}
	mb();
out2:
	ttm_bo_free_old_node(bo);

	*old_mem = *new_mem;
	new_mem->mm_node = NULL;

	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
		ttm_tt_unbind(ttm);
		ttm_tt_destroy(ttm);
		bo->ttm = NULL;
	}

out1:
	ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
out:
	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
	return ret;
}
EXPORT_SYMBOL(ttm_bo_move_memcpy);

static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
{
	kfree(bo);
}

/**
 * ttm_buffer_object_transfer
 *
 * @bo: A pointer to a struct ttm_buffer_object.
 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
 * holding the data of @bo with the old placement.
 *
 * This is a utility function that may be called after an accelerated move
 * has been scheduled. A new buffer object is created as a placeholder for
 * the old data while it's being copied. When that buffer object is idle,
 * it can be destroyed, releasing the space of the old placement.
 * Returns:
 * !0: Failure.
 */

static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
				      struct ttm_buffer_object **new_obj)
{
	struct ttm_buffer_object *fbo;
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_bo_driver *driver = bdev->driver;

	fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
	if (!fbo)
		return -ENOMEM;

	*fbo = *bo;

	/**
	 * Fix up members that we shouldn't copy directly:
	 * TODO: Explicit member copy would probably be better here.
	 */

	init_waitqueue_head(&fbo->event_queue);
	INIT_LIST_HEAD(&fbo->ddestroy);
	INIT_LIST_HEAD(&fbo->lru);
	INIT_LIST_HEAD(&fbo->swap);
	fbo->vm_node = NULL;
345
	atomic_set(&fbo->cpu_writers, 0);
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382

	fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
	kref_init(&fbo->list_kref);
	kref_init(&fbo->kref);
	fbo->destroy = &ttm_transfered_destroy;

	*new_obj = fbo;
	return 0;
}

pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
{
#if defined(__i386__) || defined(__x86_64__)
	if (caching_flags & TTM_PL_FLAG_WC)
		tmp = pgprot_writecombine(tmp);
	else if (boot_cpu_data.x86 > 3)
		tmp = pgprot_noncached(tmp);

#elif defined(__powerpc__)
	if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
		pgprot_val(tmp) |= _PAGE_NO_CACHE;
		if (caching_flags & TTM_PL_FLAG_UNCACHED)
			pgprot_val(tmp) |= _PAGE_GUARDED;
	}
#endif
#if defined(__ia64__)
	if (caching_flags & TTM_PL_FLAG_WC)
		tmp = pgprot_writecombine(tmp);
	else
		tmp = pgprot_noncached(tmp);
#endif
#if defined(__sparc__)
	if (!(caching_flags & TTM_PL_FLAG_CACHED))
		tmp = pgprot_noncached(tmp);
#endif
	return tmp;
}
383
EXPORT_SYMBOL(ttm_io_prot);
384 385

static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
386 387
			  unsigned long offset,
			  unsigned long size,
388 389 390 391
			  struct ttm_bo_kmap_obj *map)
{
	struct ttm_mem_reg *mem = &bo->mem;

392
	if (bo->mem.bus.addr) {
393
		map->bo_kmap_type = ttm_bo_map_premapped;
394
		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
395 396 397
	} else {
		map->bo_kmap_type = ttm_bo_map_iomap;
		if (mem->placement & TTM_PL_FLAG_WC)
398 399
			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
						  size);
400
		else
401 402
			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
						       size);
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
	}
	return (!map->virtual) ? -ENOMEM : 0;
}

static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
			   unsigned long start_page,
			   unsigned long num_pages,
			   struct ttm_bo_kmap_obj *map)
{
	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
	struct ttm_tt *ttm = bo->ttm;
	struct page *d;
	int i;

	BUG_ON(!ttm);
	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
		/*
		 * We're mapping a single page, and the desired
		 * page protection is consistent with the bo.
		 */

		map->bo_kmap_type = ttm_bo_map_kmap;
		map->page = ttm_tt_get_page(ttm, start_page);
		map->virtual = kmap(map->page);
	} else {
	    /*
	     * Populate the part we're mapping;
	     */
		for (i = start_page; i < start_page + num_pages; ++i) {
			d = ttm_tt_get_page(ttm, i);
			if (!d)
				return -ENOMEM;
		}

		/*
		 * We need to use vmap to get the desired page protection
439
		 * or to make the buffer object look contiguous.
440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
		 */
		prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
			PAGE_KERNEL :
			ttm_io_prot(mem->placement, PAGE_KERNEL);
		map->bo_kmap_type = ttm_bo_map_vmap;
		map->virtual = vmap(ttm->pages + start_page, num_pages,
				    0, prot);
	}
	return (!map->virtual) ? -ENOMEM : 0;
}

int ttm_bo_kmap(struct ttm_buffer_object *bo,
		unsigned long start_page, unsigned long num_pages,
		struct ttm_bo_kmap_obj *map)
{
455
	unsigned long offset, size;
456 457 458 459
	int ret;

	BUG_ON(!list_empty(&bo->swap));
	map->virtual = NULL;
460
	map->bo = bo;
461 462 463 464 465 466 467 468
	if (num_pages > bo->num_pages)
		return -EINVAL;
	if (start_page > bo->num_pages)
		return -EINVAL;
#if 0
	if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
		return -EPERM;
#endif
469
	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
470 471
	if (ret)
		return ret;
472
	if (!bo->mem.bus.is_iomem) {
473 474
		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
	} else {
475 476 477
		offset = start_page << PAGE_SHIFT;
		size = num_pages << PAGE_SHIFT;
		return ttm_bo_ioremap(bo, offset, size, map);
478 479 480 481 482 483 484 485 486 487 488
	}
}
EXPORT_SYMBOL(ttm_bo_kmap);

void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{
	if (!map->virtual)
		return;
	switch (map->bo_kmap_type) {
	case ttm_bo_map_iomap:
		iounmap(map->virtual);
489
		ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
		break;
	case ttm_bo_map_vmap:
		vunmap(map->virtual);
		break;
	case ttm_bo_map_kmap:
		kunmap(map->page);
		break;
	case ttm_bo_map_premapped:
		break;
	default:
		BUG();
	}
	map->virtual = NULL;
	map->page = NULL;
}
EXPORT_SYMBOL(ttm_bo_kunmap);

int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
			      void *sync_obj,
			      void *sync_obj_arg,
510 511
			      bool evict, bool no_wait_reserve,
			      bool no_wait_gpu,
512 513 514 515 516 517 518 519 520 521
			      struct ttm_mem_reg *new_mem)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_bo_driver *driver = bdev->driver;
	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
	struct ttm_mem_reg *old_mem = &bo->mem;
	int ret;
	struct ttm_buffer_object *ghost_obj;
	void *tmp_obj = NULL;

522
	spin_lock(&bdev->fence_lock);
523 524 525 526 527 528 529 530
	if (bo->sync_obj) {
		tmp_obj = bo->sync_obj;
		bo->sync_obj = NULL;
	}
	bo->sync_obj = driver->sync_obj_ref(sync_obj);
	bo->sync_obj_arg = sync_obj_arg;
	if (evict) {
		ret = ttm_bo_wait(bo, false, false, false);
531
		spin_unlock(&bdev->fence_lock);
532 533
		if (tmp_obj)
			driver->sync_obj_unref(&tmp_obj);
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
		if (ret)
			return ret;

		ttm_bo_free_old_node(bo);
		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
		    (bo->ttm != NULL)) {
			ttm_tt_unbind(bo->ttm);
			ttm_tt_destroy(bo->ttm);
			bo->ttm = NULL;
		}
	} else {
		/**
		 * This should help pipeline ordinary buffer moves.
		 *
		 * Hang old buffer memory on a new buffer object,
		 * and leave it to be released when the GPU
		 * operation has completed.
		 */

		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
554
		spin_unlock(&bdev->fence_lock);
555 556
		if (tmp_obj)
			driver->sync_obj_unref(&tmp_obj);
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578

		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
		if (ret)
			return ret;

		/**
		 * If we're not moving to fixed memory, the TTM object
		 * needs to stay alive. Otherwhise hang it on the ghost
		 * bo to be unbound and destroyed.
		 */

		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
			ghost_obj->ttm = NULL;
		else
			bo->ttm = NULL;

		ttm_bo_unreserve(ghost_obj);
		ttm_bo_unref(&ghost_obj);
	}

	*old_mem = *new_mem;
	new_mem->mm_node = NULL;
579

580 581 582
	return 0;
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);