You need to sign in or sign up before continuing.
nouveau_bo.c 28.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
/*
 * Copyright 2007 Dave Airlied
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */
/*
 * Authors: Dave Airlied <airlied@linux.ie>
 *	    Ben Skeggs   <darktama@iinet.net.au>
 *	    Jeremy Kolb  <jkolb@brandeis.edu>
 */

#include "drmP.h"

#include "nouveau_drm.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
35 36
#include "nouveau_mm.h"
#include "nouveau_vm.h"
37

38
#include <linux/log2.h>
39
#include <linux/slab.h>
40

41 42 43 44
static void
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
{
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
45
	struct drm_device *dev = dev_priv->dev;
46 47 48 49 50
	struct nouveau_bo *nvbo = nouveau_bo(bo);

	if (unlikely(nvbo->gem))
		DRM_ERROR("bo %p still attached to GEM object\n", bo);

51
	nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
52 53 54 55
	if (nvbo->vma.node) {
		nouveau_vm_unmap(&nvbo->vma);
		nouveau_vm_put(&nvbo->vma);
	}
56 57 58
	kfree(nvbo);
}

59
static void
60 61
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
		       int *align, int *size, int *page_shift)
62
{
63
	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
64

B
Ben Skeggs 已提交
65
	if (dev_priv->card_type < NV_50) {
66
		if (nvbo->tile_mode) {
67 68
			if (dev_priv->chipset >= 0x40) {
				*align = 65536;
69
				*size = roundup(*size, 64 * nvbo->tile_mode);
70 71 72

			} else if (dev_priv->chipset >= 0x30) {
				*align = 32768;
73
				*size = roundup(*size, 64 * nvbo->tile_mode);
74 75 76

			} else if (dev_priv->chipset >= 0x20) {
				*align = 16384;
77
				*size = roundup(*size, 64 * nvbo->tile_mode);
78 79 80

			} else if (dev_priv->chipset >= 0x10) {
				*align = 16384;
81
				*size = roundup(*size, 32 * nvbo->tile_mode);
82 83
			}
		}
84 85
	} else {
		if (likely(dev_priv->chan_vm)) {
86
			if (!(flags & TTM_PL_FLAG_TT) &&  *size > 256 * 1024)
87 88 89 90 91 92 93 94 95
				*page_shift = dev_priv->chan_vm->lpg_shift;
			else
				*page_shift = dev_priv->chan_vm->spg_shift;
		} else {
			*page_shift = 12;
		}

		*size = roundup(*size, (1 << *page_shift));
		*align = max((1 << *page_shift), *align);
96 97
	}

98
	*size = roundup(*size, PAGE_SIZE);
99 100
}

101 102 103
int
nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
	       int size, int align, uint32_t flags, uint32_t tile_mode,
104
	       uint32_t tile_flags, struct nouveau_bo **pnvbo)
105 106 107
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_bo *nvbo;
108
	int ret = 0, page_shift = 0;
109 110 111 112 113 114 115 116

	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
	if (!nvbo)
		return -ENOMEM;
	INIT_LIST_HEAD(&nvbo->head);
	INIT_LIST_HEAD(&nvbo->entry);
	nvbo->tile_mode = tile_mode;
	nvbo->tile_flags = tile_flags;
117
	nvbo->bo.bdev = &dev_priv->ttm.bdev;
118

119
	nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift);
120 121
	align >>= PAGE_SHIFT;

122
	if (dev_priv->chan_vm) {
123
		ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
124 125 126 127 128 129 130
				     NV_MEM_ACCESS_RW, &nvbo->vma);
		if (ret) {
			kfree(nvbo);
			return ret;
		}
	}

131
	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
132
	nouveau_bo_placement_set(nvbo, flags, 0);
133 134 135 136 137 138 139 140 141

	nvbo->channel = chan;
	ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
			  ttm_bo_type_device, &nvbo->placement, align, 0,
			  false, NULL, size, nouveau_bo_del_ttm);
	if (ret) {
		/* ttm will call nouveau_bo_del_ttm if it fails.. */
		return ret;
	}
142
	nvbo->channel = NULL;
143

144 145
	if (nvbo->vma.node)
		nvbo->bo.offset = nvbo->vma.offset;
146 147 148 149
	*pnvbo = nvbo;
	return 0;
}

150 151 152 153 154 155 156 157 158 159 160 161 162
static void
set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
{
	*n = 0;

	if (type & TTM_PL_FLAG_VRAM)
		pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
	if (type & TTM_PL_FLAG_TT)
		pl[(*n)++] = TTM_PL_FLAG_TT | flags;
	if (type & TTM_PL_FLAG_SYSTEM)
		pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
}

163 164 165 166
static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
{
	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
167
	int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
168 169

	if (dev_priv->card_type == NV_10 &&
170 171
	    nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
	    nvbo->bo.mem.num_pages < vram_pages / 2) {
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
		/*
		 * Make sure that the color and depth buffers are handled
		 * by independent memory controller units. Up to a 9x
		 * speed up when alpha-blending and depth-test are enabled
		 * at the same time.
		 */
		if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
			nvbo->placement.fpfn = vram_pages / 2;
			nvbo->placement.lpfn = ~0;
		} else {
			nvbo->placement.fpfn = 0;
			nvbo->placement.lpfn = vram_pages / 2;
		}
	}
}

188
void
189
nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
190
{
191 192 193 194 195 196 197 198 199 200 201
	struct ttm_placement *pl = &nvbo->placement;
	uint32_t flags = TTM_PL_MASK_CACHING |
		(nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);

	pl->placement = nvbo->placements;
	set_placement_list(nvbo->placements, &pl->num_placement,
			   type, flags);

	pl->busy_placement = nvbo->busy_placements;
	set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
			   type | busy, flags);
202 203

	set_placement_range(nvbo, type);
204 205 206 207 208 209 210
}

int
nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
{
	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
	struct ttm_buffer_object *bo = &nvbo->bo;
211
	int ret;
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226

	if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
		NV_ERROR(nouveau_bdev(bo->bdev)->dev,
			 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
			 1 << bo->mem.mem_type, memtype);
		return -EINVAL;
	}

	if (nvbo->pin_refcnt++)
		return 0;

	ret = ttm_bo_reserve(bo, false, false, false, 0);
	if (ret)
		goto out;

227
	nouveau_bo_placement_set(nvbo, memtype, 0);
228

229
	ret = nouveau_bo_validate(nvbo, false, false, false);
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
	if (ret == 0) {
		switch (bo->mem.mem_type) {
		case TTM_PL_VRAM:
			dev_priv->fb_aper_free -= bo->mem.size;
			break;
		case TTM_PL_TT:
			dev_priv->gart_info.aper_free -= bo->mem.size;
			break;
		default:
			break;
		}
	}
	ttm_bo_unreserve(bo);
out:
	if (unlikely(ret))
		nvbo->pin_refcnt--;
	return ret;
}

int
nouveau_bo_unpin(struct nouveau_bo *nvbo)
{
	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
	struct ttm_buffer_object *bo = &nvbo->bo;
254
	int ret;
255 256 257 258 259 260 261 262

	if (--nvbo->pin_refcnt)
		return 0;

	ret = ttm_bo_reserve(bo, false, false, false, 0);
	if (ret)
		return ret;

263
	nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
264

265
	ret = nouveau_bo_validate(nvbo, false, false, false);
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
	if (ret == 0) {
		switch (bo->mem.mem_type) {
		case TTM_PL_VRAM:
			dev_priv->fb_aper_free += bo->mem.size;
			break;
		case TTM_PL_TT:
			dev_priv->gart_info.aper_free += bo->mem.size;
			break;
		default:
			break;
		}
	}

	ttm_bo_unreserve(bo);
	return ret;
}

int
nouveau_bo_map(struct nouveau_bo *nvbo)
{
	int ret;

	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
	if (ret)
		return ret;

	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
	ttm_bo_unreserve(&nvbo->bo);
	return ret;
}

void
nouveau_bo_unmap(struct nouveau_bo *nvbo)
{
300 301
	if (nvbo)
		ttm_bo_kunmap(&nvbo->kmap);
302 303
}

304 305 306 307 308 309 310 311 312 313 314 315 316 317
int
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
		    bool no_wait_reserve, bool no_wait_gpu)
{
	int ret;

	ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
			      no_wait_reserve, no_wait_gpu);
	if (ret)
		return ret;

	return 0;
}

318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
u16
nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
{
	bool is_iomem;
	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
	mem = &mem[index];
	if (is_iomem)
		return ioread16_native((void __force __iomem *)mem);
	else
		return *mem;
}

void
nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
{
	bool is_iomem;
	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
	mem = &mem[index];
	if (is_iomem)
		iowrite16_native(val, (void __force __iomem *)mem);
	else
		*mem = val;
}

u32
nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
{
	bool is_iomem;
	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
	mem = &mem[index];
	if (is_iomem)
		return ioread32_native((void __force __iomem *)mem);
	else
		return *mem;
}

void
nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
{
	bool is_iomem;
	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
	mem = &mem[index];
	if (is_iomem)
		iowrite32_native(val, (void __force __iomem *)mem);
	else
		*mem = val;
}

static struct ttm_backend *
nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
{
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
	struct drm_device *dev = dev_priv->dev;

	switch (dev_priv->gart_info.type) {
373
#if __OS_HAS_AGP
374 375
	case NOUVEAU_GART_AGP:
		return ttm_agp_backend_init(bdev, dev->agp->bridge);
376
#endif
377 378
	case NOUVEAU_GART_PDMA:
	case NOUVEAU_GART_HW:
379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
		return nouveau_sgdma_init_ttm(dev);
	default:
		NV_ERROR(dev, "Unknown GART type %d\n",
			 dev_priv->gart_info.type);
		break;
	}

	return NULL;
}

static int
nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
	/* We'll do this from user space. */
	return 0;
}

static int
nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
			 struct ttm_mem_type_manager *man)
{
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
	struct drm_device *dev = dev_priv->dev;

	switch (type) {
	case TTM_PL_SYSTEM:
		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
		man->available_caching = TTM_PL_MASK_CACHING;
		man->default_caching = TTM_PL_FLAG_CACHED;
		break;
	case TTM_PL_VRAM:
410
		if (dev_priv->card_type >= NV_50) {
B
Ben Skeggs 已提交
411
			man->func = &nouveau_vram_manager;
412 413 414
			man->io_reserve_fastpath = false;
			man->use_io_reserve_lru = true;
		} else {
B
Ben Skeggs 已提交
415
			man->func = &ttm_bo_manager_func;
416
		}
417
		man->flags = TTM_MEMTYPE_FLAG_FIXED |
418
			     TTM_MEMTYPE_FLAG_MAPPABLE;
419 420 421 422 423
		man->available_caching = TTM_PL_FLAG_UNCACHED |
					 TTM_PL_FLAG_WC;
		man->default_caching = TTM_PL_FLAG_WC;
		break;
	case TTM_PL_TT:
424 425 426 427
		if (dev_priv->card_type >= NV_50)
			man->func = &nouveau_gart_manager;
		else
			man->func = &ttm_bo_manager_func;
428 429
		switch (dev_priv->gart_info.type) {
		case NOUVEAU_GART_AGP:
430
			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
431 432 433
			man->available_caching = TTM_PL_FLAG_UNCACHED |
				TTM_PL_FLAG_WC;
			man->default_caching = TTM_PL_FLAG_WC;
434
			break;
435 436
		case NOUVEAU_GART_PDMA:
		case NOUVEAU_GART_HW:
437 438 439 440
			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
				     TTM_MEMTYPE_FLAG_CMA;
			man->available_caching = TTM_PL_MASK_CACHING;
			man->default_caching = TTM_PL_FLAG_CACHED;
441
			man->gpu_offset = dev_priv->gart_info.aper_base;
442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
			break;
		default:
			NV_ERROR(dev, "Unknown GART type: %d\n",
				 dev_priv->gart_info.type);
			return -EINVAL;
		}
		break;
	default:
		NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
		return -EINVAL;
	}
	return 0;
}

static void
nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
{
	struct nouveau_bo *nvbo = nouveau_bo(bo);

	switch (bo->mem.mem_type) {
462
	case TTM_PL_VRAM:
463 464
		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
					 TTM_PL_FLAG_SYSTEM);
465
		break;
466
	default:
467
		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
468 469
		break;
	}
470 471

	*pl = nvbo->placement;
472 473 474 475 476 477
}


/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
 * TTM_PL_{VRAM,TT} directly.
 */
478

479 480
static int
nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
481 482
			      struct nouveau_bo *nvbo, bool evict,
			      bool no_wait_reserve, bool no_wait_gpu,
483 484 485 486 487 488 489 490 491
			      struct ttm_mem_reg *new_mem)
{
	struct nouveau_fence *fence = NULL;
	int ret;

	ret = nouveau_fence_new(chan, &fence, true);
	if (ret)
		return ret;

492
	ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
493
					no_wait_reserve, no_wait_gpu, new_mem);
494
	nouveau_fence_unref(&fence);
495 496 497
	return ret;
}

B
Ben Skeggs 已提交
498 499 500 501
static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
502 503
	struct nouveau_mem *old_node = old_mem->mm_node;
	struct nouveau_mem *new_node = new_mem->mm_node;
B
Ben Skeggs 已提交
504 505
	struct nouveau_bo *nvbo = nouveau_bo(bo);
	u32 page_count = new_mem->num_pages;
506
	u64 src_offset, dst_offset;
B
Ben Skeggs 已提交
507 508
	int ret;

509 510 511
	src_offset = old_node->tmp_vma.offset;
	if (new_node->tmp_vma.node)
		dst_offset = new_node->tmp_vma.offset;
512
	else
513
		dst_offset = nvbo->vma.offset;
B
Ben Skeggs 已提交
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543

	page_count = new_mem->num_pages;
	while (page_count) {
		int line_count = (page_count > 2047) ? 2047 : page_count;

		ret = RING_SPACE(chan, 12);
		if (ret)
			return ret;

		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
		OUT_RING  (chan, upper_32_bits(dst_offset));
		OUT_RING  (chan, lower_32_bits(dst_offset));
		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
		OUT_RING  (chan, upper_32_bits(src_offset));
		OUT_RING  (chan, lower_32_bits(src_offset));
		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
		OUT_RING  (chan, PAGE_SIZE); /* line_length */
		OUT_RING  (chan, line_count);
		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
		OUT_RING  (chan, 0x00100110);

		page_count -= line_count;
		src_offset += (PAGE_SIZE * line_count);
		dst_offset += (PAGE_SIZE * line_count);
	}

	return 0;
}

544
static int
545 546
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
547
{
548 549
	struct nouveau_mem *old_node = old_mem->mm_node;
	struct nouveau_mem *new_node = new_mem->mm_node;
550 551 552
	struct nouveau_bo *nvbo = nouveau_bo(bo);
	u64 length = (new_mem->num_pages << PAGE_SHIFT);
	u64 src_offset, dst_offset;
553 554
	int ret;

555 556 557
	src_offset = old_node->tmp_vma.offset;
	if (new_node->tmp_vma.node)
		dst_offset = new_node->tmp_vma.offset;
558
	else
559
		dst_offset = nvbo->vma.offset;
560

561 562 563
	while (length) {
		u32 amount, stride, height;

564 565
		amount  = min(length, (u64)(4 * 1024 * 1024));
		stride  = 16 * 4;
566 567
		height  = amount / stride;

568 569
		if (new_mem->mem_type == TTM_PL_VRAM &&
		    nouveau_bo_tile_layout(nvbo)) {
570 571 572 573 574 575
			ret = RING_SPACE(chan, 8);
			if (ret)
				return ret;

			BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
			OUT_RING  (chan, 0);
576
			OUT_RING  (chan, 0);
577 578 579 580 581 582 583 584 585 586 587 588 589
			OUT_RING  (chan, stride);
			OUT_RING  (chan, height);
			OUT_RING  (chan, 1);
			OUT_RING  (chan, 0);
			OUT_RING  (chan, 0);
		} else {
			ret = RING_SPACE(chan, 2);
			if (ret)
				return ret;

			BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
			OUT_RING  (chan, 1);
		}
590 591
		if (old_mem->mem_type == TTM_PL_VRAM &&
		    nouveau_bo_tile_layout(nvbo)) {
592 593 594 595 596 597
			ret = RING_SPACE(chan, 8);
			if (ret)
				return ret;

			BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
			OUT_RING  (chan, 0);
598
			OUT_RING  (chan, 0);
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
			OUT_RING  (chan, stride);
			OUT_RING  (chan, height);
			OUT_RING  (chan, 1);
			OUT_RING  (chan, 0);
			OUT_RING  (chan, 0);
		} else {
			ret = RING_SPACE(chan, 2);
			if (ret)
				return ret;

			BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
			OUT_RING  (chan, 1);
		}

		ret = RING_SPACE(chan, 14);
614 615
		if (ret)
			return ret;
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634

		BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
		OUT_RING  (chan, upper_32_bits(src_offset));
		OUT_RING  (chan, upper_32_bits(dst_offset));
		BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
		OUT_RING  (chan, lower_32_bits(src_offset));
		OUT_RING  (chan, lower_32_bits(dst_offset));
		OUT_RING  (chan, stride);
		OUT_RING  (chan, stride);
		OUT_RING  (chan, stride);
		OUT_RING  (chan, height);
		OUT_RING  (chan, 0x00000101);
		OUT_RING  (chan, 0x00000000);
		BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
		OUT_RING  (chan, 0);

		length -= amount;
		src_offset += amount;
		dst_offset += amount;
635 636
	}

637 638 639
	return 0;
}

640 641 642 643 644 645 646 647 648
static inline uint32_t
nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
		      struct nouveau_channel *chan, struct ttm_mem_reg *mem)
{
	if (mem->mem_type == TTM_PL_TT)
		return chan->gart_handle;
	return chan->vram_handle;
}

649 650 651 652
static int
nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
653 654
	u32 src_offset = old_mem->start << PAGE_SHIFT;
	u32 dst_offset = new_mem->start << PAGE_SHIFT;
655 656 657 658 659 660 661 662 663 664 665
	u32 page_count = new_mem->num_pages;
	int ret;

	ret = RING_SPACE(chan, 3);
	if (ret)
		return ret;

	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));

666 667 668 669 670 671 672
	page_count = new_mem->num_pages;
	while (page_count) {
		int line_count = (page_count > 2047) ? 2047 : page_count;

		ret = RING_SPACE(chan, 11);
		if (ret)
			return ret;
673

674 675
		BEGIN_RING(chan, NvSubM2MF,
				 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
676 677 678 679 680 681 682 683
		OUT_RING  (chan, src_offset);
		OUT_RING  (chan, dst_offset);
		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
		OUT_RING  (chan, PAGE_SIZE); /* line_length */
		OUT_RING  (chan, line_count);
		OUT_RING  (chan, 0x00000101);
		OUT_RING  (chan, 0x00000000);
684
		BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
685
		OUT_RING  (chan, 0);
686 687 688 689 690 691

		page_count -= line_count;
		src_offset += (PAGE_SIZE * line_count);
		dst_offset += (PAGE_SIZE * line_count);
	}

692 693 694 695 696 697 698 699 700 701
	return 0;
}

static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
		     bool no_wait_reserve, bool no_wait_gpu,
		     struct ttm_mem_reg *new_mem)
{
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
	struct nouveau_bo *nvbo = nouveau_bo(bo);
702
	struct ttm_mem_reg *old_mem = &bo->mem;
703 704 705 706
	struct nouveau_channel *chan;
	int ret;

	chan = nvbo->channel;
707
	if (!chan) {
708
		chan = dev_priv->channel;
709
		mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
710
	}
711

712 713 714
	/* create temporary vma for old memory, this will get cleaned
	 * up after ttm destroys the ttm_mem_reg
	 */
715
	if (dev_priv->card_type >= NV_50) {
716
		struct nouveau_mem *node = old_mem->mm_node;
717 718 719 720 721 722 723 724 725 726 727 728
		if (!node->tmp_vma.node) {
			u32 page_shift = nvbo->vma.node->type;
			if (old_mem->mem_type == TTM_PL_TT)
				page_shift = nvbo->vma.vm->spg_shift;

			ret = nouveau_vm_get(chan->vm,
					     old_mem->num_pages << PAGE_SHIFT,
					     page_shift, NV_MEM_ACCESS_RO,
					     &node->tmp_vma);
			if (ret)
				goto out;
		}
729

730 731 732 733 734 735 736
		if (old_mem->mem_type == TTM_PL_VRAM)
			nouveau_vm_map(&node->tmp_vma, node);
		else {
			nouveau_vm_map_sg(&node->tmp_vma, 0,
					  old_mem->num_pages << PAGE_SHIFT,
					  node, node->pages);
		}
737 738
	}

739 740 741
	if (dev_priv->card_type < NV_50)
		ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
	else
B
Ben Skeggs 已提交
742
	if (dev_priv->card_type < NV_C0)
743
		ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
B
Ben Skeggs 已提交
744 745
	else
		ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
746 747 748 749 750
	if (ret == 0) {
		ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
						    no_wait_reserve,
						    no_wait_gpu, new_mem);
	}
751

752
out:
753 754 755
	if (chan == dev_priv->channel)
		mutex_unlock(&chan->mutex);
	return ret;
756 757 758 759
}

static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
760 761
		      bool no_wait_reserve, bool no_wait_gpu,
		      struct ttm_mem_reg *new_mem)
762
{
763
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
764 765 766 767 768 769 770
	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
	struct ttm_placement placement;
	struct ttm_mem_reg tmp_mem;
	int ret;

	placement.fpfn = placement.lpfn = 0;
	placement.num_placement = placement.num_busy_placement = 1;
771
	placement.placement = placement.busy_placement = &placement_memtype;
772 773 774

	tmp_mem = *new_mem;
	tmp_mem.mm_node = NULL;
775
	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
776 777 778 779 780 781 782
	if (ret)
		return ret;

	ret = ttm_tt_bind(bo->ttm, &tmp_mem);
	if (ret)
		goto out;

783 784 785 786 787 788 789 790 791 792
	if (dev_priv->card_type >= NV_50) {
		struct nouveau_bo *nvbo = nouveau_bo(bo);
		struct nouveau_mem *node = tmp_mem.mm_node;
		struct nouveau_vma *vma = &nvbo->vma;
		if (vma->node->type != vma->vm->spg_shift)
			vma = &node->tmp_vma;
		nouveau_vm_map_sg(vma, 0, tmp_mem.num_pages << PAGE_SHIFT,
				  node, node->pages);
	}

793
	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
794 795 796 797 798 799

	if (dev_priv->card_type >= NV_50) {
		struct nouveau_bo *nvbo = nouveau_bo(bo);
		nouveau_vm_unmap(&nvbo->vma);
	}

800 801 802
	if (ret)
		goto out;

803
	ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
804
out:
805
	ttm_bo_mem_put(bo, &tmp_mem);
806 807 808 809 810
	return ret;
}

static int
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
811 812
		      bool no_wait_reserve, bool no_wait_gpu,
		      struct ttm_mem_reg *new_mem)
813 814 815 816 817 818 819 820
{
	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
	struct ttm_placement placement;
	struct ttm_mem_reg tmp_mem;
	int ret;

	placement.fpfn = placement.lpfn = 0;
	placement.num_placement = placement.num_busy_placement = 1;
821
	placement.placement = placement.busy_placement = &placement_memtype;
822 823 824

	tmp_mem = *new_mem;
	tmp_mem.mm_node = NULL;
825
	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
826 827 828
	if (ret)
		return ret;

829
	ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
830 831 832
	if (ret)
		goto out;

833
	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
834 835 836 837
	if (ret)
		goto out;

out:
838
	ttm_bo_mem_put(bo, &tmp_mem);
839 840 841
	return ret;
}

842 843 844
static void
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
{
845
	struct nouveau_mem *node = new_mem->mm_node;
846
	struct nouveau_bo *nvbo = nouveau_bo(bo);
847
	struct nouveau_vma *vma = &nvbo->vma;
848

849
	if (!vma->vm)
850 851 852 853
		return;

	switch (new_mem->mem_type) {
	case TTM_PL_VRAM:
854
		nouveau_vm_map(vma, node);
855 856
		break;
	case TTM_PL_TT:
857
		if (vma->node->type != vma->vm->spg_shift) {
858 859 860 861 862 863
			nouveau_vm_unmap(vma);
			vma = &node->tmp_vma;
		}
		nouveau_vm_map_sg(vma, 0, new_mem->num_pages << PAGE_SHIFT,
				  node, node->pages);
		break;
864
	default:
865
		nouveau_vm_unmap(&nvbo->vma);
866 867 868 869
		break;
	}
}

870
static int
871 872
nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
		   struct nouveau_tile_reg **new_tile)
873 874 875
{
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
	struct drm_device *dev = dev_priv->dev;
876
	struct nouveau_bo *nvbo = nouveau_bo(bo);
877
	u64 offset = new_mem->start << PAGE_SHIFT;
878

879 880
	*new_tile = NULL;
	if (new_mem->mem_type != TTM_PL_VRAM)
881 882
		return 0;

883
	if (dev_priv->card_type >= NV_10) {
884
		*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
885 886
						nvbo->tile_mode,
						nvbo->tile_flags);
887 888
	}

889 890 891 892 893 894 895 896 897 898 899
	return 0;
}

static void
nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
		      struct nouveau_tile_reg *new_tile,
		      struct nouveau_tile_reg **old_tile)
{
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
	struct drm_device *dev = dev_priv->dev;

900 901
	nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
	*old_tile = new_tile;
902 903 904 905
}

static int
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
906 907
		bool no_wait_reserve, bool no_wait_gpu,
		struct ttm_mem_reg *new_mem)
908 909 910 911 912 913 914
{
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
	struct nouveau_bo *nvbo = nouveau_bo(bo);
	struct ttm_mem_reg *old_mem = &bo->mem;
	struct nouveau_tile_reg *new_tile = NULL;
	int ret = 0;

915 916 917 918 919
	if (dev_priv->card_type < NV_50) {
		ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
		if (ret)
			return ret;
	}
920 921

	/* Fake bo copy. */
922 923 924 925
	if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
		BUG_ON(bo->mem.mm_node != NULL);
		bo->mem = *new_mem;
		new_mem->mm_node = NULL;
926
		goto out;
927 928
	}

929
	/* Software copy if the card isn't up and running yet. */
B
Ben Skeggs 已提交
930
	if (!dev_priv->channel) {
931 932 933 934
		ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
		goto out;
	}

935 936
	/* Hardware assisted copy. */
	if (new_mem->mem_type == TTM_PL_SYSTEM)
937
		ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
938
	else if (old_mem->mem_type == TTM_PL_SYSTEM)
939
		ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
940
	else
941
		ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
942

943 944 945 946
	if (!ret)
		goto out;

	/* Fallback to software copy. */
947
	ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
948 949

out:
950 951 952 953 954 955
	if (dev_priv->card_type < NV_50) {
		if (ret)
			nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
		else
			nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
	}
956 957

	return ret;
958 959 960 961 962 963 964 965
}

static int
nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
	return 0;
}

966 967 968 969 970 971
static int
nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
	struct drm_device *dev = dev_priv->dev;
972
	int ret;
973 974 975 976 977 978 979 980 981 982 983 984 985 986 987

	mem->bus.addr = NULL;
	mem->bus.offset = 0;
	mem->bus.size = mem->num_pages << PAGE_SHIFT;
	mem->bus.base = 0;
	mem->bus.is_iomem = false;
	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
		return -EINVAL;
	switch (mem->mem_type) {
	case TTM_PL_SYSTEM:
		/* System memory */
		return 0;
	case TTM_PL_TT:
#if __OS_HAS_AGP
		if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
988
			mem->bus.offset = mem->start << PAGE_SHIFT;
989 990 991 992 993 994
			mem->bus.base = dev_priv->gart_info.aper_base;
			mem->bus.is_iomem = true;
		}
#endif
		break;
	case TTM_PL_VRAM:
995
	{
996
		struct nouveau_mem *node = mem->mm_node;
997
		u8 page_shift;
998 999 1000 1001 1002 1003 1004 1005

		if (!dev_priv->bar1_vm) {
			mem->bus.offset = mem->start << PAGE_SHIFT;
			mem->bus.base = pci_resource_start(dev->pdev, 1);
			mem->bus.is_iomem = true;
			break;
		}

1006
		if (dev_priv->card_type == NV_C0)
1007
			page_shift = node->page_shift;
1008 1009 1010
		else
			page_shift = 12;

B
Ben Skeggs 已提交
1011
		ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
1012
				     page_shift, NV_MEM_ACCESS_RW,
1013
				     &node->bar_vma);
1014 1015 1016
		if (ret)
			return ret;

1017
		nouveau_vm_map(&node->bar_vma, node);
1018
		if (ret) {
1019
			nouveau_vm_put(&node->bar_vma);
1020 1021 1022
			return ret;
		}

1023
		mem->bus.offset = node->bar_vma.offset;
1024 1025
		if (dev_priv->card_type == NV_50) /*XXX*/
			mem->bus.offset -= 0x0020000000ULL;
1026
		mem->bus.base = pci_resource_start(dev->pdev, 1);
1027
		mem->bus.is_iomem = true;
1028
	}
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
		break;
	default:
		return -EINVAL;
	}
	return 0;
}

static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
1039
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
1040
	struct nouveau_mem *node = mem->mm_node;
1041 1042 1043 1044

	if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
		return;

1045
	if (!node->bar_vma.node)
1046 1047
		return;

1048 1049
	nouveau_vm_unmap(&node->bar_vma);
	nouveau_vm_put(&node->bar_vma);
1050 1051 1052 1053 1054
}

static int
nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
1055 1056 1057 1058 1059 1060 1061
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
	struct nouveau_bo *nvbo = nouveau_bo(bo);

	/* as long as the bo isn't in vram, and isn't tiled, we've got
	 * nothing to do here.
	 */
	if (bo->mem.mem_type != TTM_PL_VRAM) {
1062 1063
		if (dev_priv->card_type < NV_50 ||
		    !nouveau_bo_tile_layout(nvbo))
1064 1065 1066 1067
			return 0;
	}

	/* make sure bo is in mappable vram */
1068
	if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
1069 1070 1071 1072 1073 1074
		return 0;


	nvbo->placement.fpfn = 0;
	nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
	nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
1075
	return nouveau_bo_validate(nvbo, false, true, false);
1076 1077
}

1078 1079 1080
void
nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
{
1081
	struct nouveau_fence *old_fence;
1082 1083

	if (likely(fence))
1084
		nouveau_fence_ref(fence);
1085

1086 1087 1088
	spin_lock(&nvbo->bo.bdev->fence_lock);
	old_fence = nvbo->bo.sync_obj;
	nvbo->bo.sync_obj = fence;
1089
	spin_unlock(&nvbo->bo.bdev->fence_lock);
1090 1091

	nouveau_fence_unref(&old_fence);
1092 1093
}

1094 1095 1096 1097 1098
struct ttm_bo_driver nouveau_bo_driver = {
	.create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
	.invalidate_caches = nouveau_bo_invalidate_caches,
	.init_mem_type = nouveau_bo_init_mem_type,
	.evict_flags = nouveau_bo_evict_flags,
1099
	.move_notify = nouveau_bo_move_ntfy,
1100 1101
	.move = nouveau_bo_move,
	.verify_access = nouveau_bo_verify_access,
1102 1103 1104 1105 1106
	.sync_obj_signaled = __nouveau_fence_signalled,
	.sync_obj_wait = __nouveau_fence_wait,
	.sync_obj_flush = __nouveau_fence_flush,
	.sync_obj_unref = __nouveau_fence_unref,
	.sync_obj_ref = __nouveau_fence_ref,
1107 1108 1109
	.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
	.io_mem_free = &nouveau_ttm_io_mem_free,
1110 1111
};