nouveau_bo.c 41.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * Copyright 2007 Dave Airlied
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */
/*
 * Authors: Dave Airlied <airlied@linux.ie>
 *	    Ben Skeggs   <darktama@iinet.net.au>
 *	    Jeremy Kolb  <jkolb@brandeis.edu>
 */

30
#include <linux/dma-mapping.h>
31
#include <linux/swiotlb.h>
32

33
#include "nouveau_drv.h"
34
#include "nouveau_dma.h"
35
#include "nouveau_fence.h"
36

37 38 39
#include "nouveau_bo.h"
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
40

41 42 43 44 45
/*
 * NV10-NV40 tiling helpers
 */

static void
46 47
nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
			   u32 addr, u32 size, u32 pitch, u32 flags)
48
{
49
	struct nouveau_drm *drm = nouveau_drm(dev);
50
	int i = reg - drm->tile.reg;
51
	struct nvkm_device *device = nvxx_device(&drm->client.device);
52
	struct nvkm_fb *fb = device->fb;
B
Ben Skeggs 已提交
53
	struct nvkm_fb_tile *tile = &fb->tile.region[i];
54

55
	nouveau_fence_unref(&reg->fence);
56 57

	if (tile->pitch)
58
		nvkm_fb_tile_fini(fb, i, tile);
59 60

	if (pitch)
61
		nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
62

63
	nvkm_fb_tile_prog(fb, i, tile);
64 65
}

66
static struct nouveau_drm_tile *
67 68
nv10_bo_get_tile_region(struct drm_device *dev, int i)
{
69
	struct nouveau_drm *drm = nouveau_drm(dev);
70
	struct nouveau_drm_tile *tile = &drm->tile.reg[i];
71

72
	spin_lock(&drm->tile.lock);
73 74 75 76 77 78 79

	if (!tile->used &&
	    (!tile->fence || nouveau_fence_done(tile->fence)))
		tile->used = true;
	else
		tile = NULL;

80
	spin_unlock(&drm->tile.lock);
81 82 83 84
	return tile;
}

static void
85
nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
86
			struct dma_fence *fence)
87
{
88
	struct nouveau_drm *drm = nouveau_drm(dev);
89 90

	if (tile) {
91
		spin_lock(&drm->tile.lock);
92
		tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
93
		tile->used = false;
94
		spin_unlock(&drm->tile.lock);
95 96 97
	}
}

98 99
static struct nouveau_drm_tile *
nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
100
		   u32 size, u32 pitch, u32 zeta)
101
{
102
	struct nouveau_drm *drm = nouveau_drm(dev);
103
	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
104
	struct nouveau_drm_tile *tile, *found = NULL;
105 106
	int i;

B
Ben Skeggs 已提交
107
	for (i = 0; i < fb->tile.regions; i++) {
108 109 110 111 112 113
		tile = nv10_bo_get_tile_region(dev, i);

		if (pitch && !found) {
			found = tile;
			continue;

B
Ben Skeggs 已提交
114
		} else if (tile && fb->tile.region[i].pitch) {
115 116 117 118 119 120 121 122
			/* Kill an unused tile region. */
			nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
		}

		nv10_bo_put_tile_region(dev, tile, NULL);
	}

	if (found)
123
		nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
124 125 126
	return found;
}

127 128 129
static void
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
{
130 131
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
	struct drm_device *dev = drm->dev;
132 133
	struct nouveau_bo *nvbo = nouveau_bo(bo);

134
	if (unlikely(nvbo->gem.filp))
135
		DRM_ERROR("bo %p still attached to GEM object\n", bo);
136
	WARN_ON(nvbo->pin_refcnt > 0);
137
	nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
138 139 140
	kfree(nvbo);
}

B
Ben Skeggs 已提交
141 142 143 144 145 146 147 148
static inline u64
roundup_64(u64 x, u32 y)
{
	x += y - 1;
	do_div(x, y);
	return x * y;
}

149
static void
150
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
B
Ben Skeggs 已提交
151
		       int *align, u64 *size)
152
{
153
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
154
	struct nvif_device *device = &drm->client.device;
155

156
	if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
157
		if (nvbo->mode) {
158
			if (device->info.chipset >= 0x40) {
159
				*align = 65536;
160
				*size = roundup_64(*size, 64 * nvbo->mode);
161

162
			} else if (device->info.chipset >= 0x30) {
163
				*align = 32768;
164
				*size = roundup_64(*size, 64 * nvbo->mode);
165

166
			} else if (device->info.chipset >= 0x20) {
167
				*align = 16384;
168
				*size = roundup_64(*size, 64 * nvbo->mode);
169

170
			} else if (device->info.chipset >= 0x10) {
171
				*align = 16384;
172
				*size = roundup_64(*size, 32 * nvbo->mode);
173 174
			}
		}
175
	} else {
176 177
		*size = roundup_64(*size, (1 << nvbo->page));
		*align = max((1 <<  nvbo->page), *align);
178 179
	}

B
Ben Skeggs 已提交
180
	*size = roundup_64(*size, PAGE_SIZE);
181 182
}

183
int
B
Ben Skeggs 已提交
184
nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
185
	       uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
186
	       struct sg_table *sg, struct reservation_object *robj,
187
	       struct nouveau_bo **pnvbo)
188
{
189
	struct nouveau_drm *drm = cli->drm;
190
	struct nouveau_bo *nvbo;
191
	size_t acc_size;
192
	int ret;
D
Dave Airlie 已提交
193
	int type = ttm_bo_type_device;
194

B
Ben Skeggs 已提交
195 196
	if (!size) {
		NV_WARN(drm, "skipped size %016llx\n", size);
197 198
		return -EINVAL;
	}
D
Dave Airlie 已提交
199 200 201

	if (sg)
		type = ttm_bo_type_sg;
202 203 204 205 206 207

	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
	if (!nvbo)
		return -ENOMEM;
	INIT_LIST_HEAD(&nvbo->head);
	INIT_LIST_HEAD(&nvbo->entry);
208
	INIT_LIST_HEAD(&nvbo->vma_list);
209
	nvbo->bo.bdev = &drm->ttm.bdev;
210
	nvbo->cli = cli;
211

212
	if (!nvxx_device(&drm->client.device)->func->cpu_coherent)
213
		nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
214

215 216 217 218 219 220 221 222 223 224 225 226 227 228
	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
		nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
		nvbo->comp = gf100_pte_storage_type_map[nvbo->kind] != nvbo->kind;
	} else
	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
		nvbo->kind = (tile_flags & 0x00007f00) >> 8;
		nvbo->comp = (tile_flags & 0x00030000) >> 16;
	} else {
		nvbo->zeta = (tile_flags & 0x00000007);
	}
	nvbo->mode = tile_mode;
	nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);

	nvbo->page = 12;
229
	if (drm->client.vm) {
230
		if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
231 232 233 234 235 236
			nvbo->page = drm->client.vm->mmu->lpg_shift;
		else {
			if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
				nvbo->kind = gf100_pte_storage_type_map[nvbo->kind];
			nvbo->comp = 0;
		}
237 238 239
	}

	nouveau_bo_fixup_align(nvbo, flags, &align, &size);
240 241
	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
	nouveau_bo_placement_set(nvbo, flags, 0);
242

243
	acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
244 245
				       sizeof(struct nouveau_bo));

246
	ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
D
Dave Airlie 已提交
247
			  type, &nvbo->placement,
248
			  align >> PAGE_SHIFT, false, NULL, acc_size, sg,
249
			  robj, nouveau_bo_del_ttm);
250 251 252 253 254 255 256 257 258
	if (ret) {
		/* ttm will call nouveau_bo_del_ttm if it fails.. */
		return ret;
	}

	*pnvbo = nvbo;
	return 0;
}

259
static void
260
set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
261 262 263 264
{
	*n = 0;

	if (type & TTM_PL_FLAG_VRAM)
265
		pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
266
	if (type & TTM_PL_FLAG_TT)
267
		pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
268
	if (type & TTM_PL_FLAG_SYSTEM)
269
		pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
270 271
}

272 273 274
static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
{
275
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
276
	u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
277
	unsigned i, fpfn, lpfn;
278

279
	if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
280
	    nvbo->mode && (type & TTM_PL_FLAG_VRAM) &&
281
	    nvbo->bo.mem.num_pages < vram_pages / 4) {
282 283 284 285 286 287
		/*
		 * Make sure that the color and depth buffers are handled
		 * by independent memory controller units. Up to a 9x
		 * speed up when alpha-blending and depth-test are enabled
		 * at the same time.
		 */
288
		if (nvbo->zeta) {
289 290
			fpfn = vram_pages / 2;
			lpfn = ~0;
291
		} else {
292 293 294 295 296 297 298 299 300 301
			fpfn = 0;
			lpfn = vram_pages / 2;
		}
		for (i = 0; i < nvbo->placement.num_placement; ++i) {
			nvbo->placements[i].fpfn = fpfn;
			nvbo->placements[i].lpfn = lpfn;
		}
		for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
			nvbo->busy_placements[i].fpfn = fpfn;
			nvbo->busy_placements[i].lpfn = lpfn;
302 303 304 305
		}
	}
}

306
void
307
nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
308
{
309
	struct ttm_placement *pl = &nvbo->placement;
310 311 312
	uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
						 TTM_PL_MASK_CACHING) |
			 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
313 314 315 316 317 318 319 320

	pl->placement = nvbo->placements;
	set_placement_list(nvbo->placements, &pl->num_placement,
			   type, flags);

	pl->busy_placement = nvbo->busy_placements;
	set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
			   type | busy, flags);
321 322

	set_placement_range(nvbo, type);
323 324 325
}

int
326
nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
327
{
328
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
329
	struct ttm_buffer_object *bo = &nvbo->bo;
330
	bool force = false, evict = false;
331
	int ret;
332

333
	ret = ttm_bo_reserve(bo, false, false, NULL);
334
	if (ret)
335
		return ret;
336

337
	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
338
	    memtype == TTM_PL_FLAG_VRAM && contig) {
339 340
		if (!nvbo->contig) {
			nvbo->contig = true;
341
			force = true;
342
			evict = true;
343
		}
344 345
	}

346 347 348 349 350 351 352 353
	if (nvbo->pin_refcnt) {
		if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
			NV_ERROR(drm, "bo %p pinned elsewhere: "
				      "0x%08x vs 0x%08x\n", bo,
				 1 << bo->mem.mem_type, memtype);
			ret = -EBUSY;
		}
		nvbo->pin_refcnt++;
354
		goto out;
355 356 357 358 359 360 361 362
	}

	if (evict) {
		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
		ret = nouveau_bo_validate(nvbo, false, false);
		if (ret)
			goto out;
	}
363

364
	nvbo->pin_refcnt++;
365
	nouveau_bo_placement_set(nvbo, memtype, 0);
366

367 368 369 370 371
	/* drop pin_refcnt temporarily, so we don't trip the assertion
	 * in nouveau_bo_move() that makes sure we're not trying to
	 * move a pinned buffer
	 */
	nvbo->pin_refcnt--;
372
	ret = nouveau_bo_validate(nvbo, false, false);
373 374
	if (ret)
		goto out;
375
	nvbo->pin_refcnt++;
376 377 378 379 380 381 382 383 384 385

	switch (bo->mem.mem_type) {
	case TTM_PL_VRAM:
		drm->gem.vram_available -= bo->mem.size;
		break;
	case TTM_PL_TT:
		drm->gem.gart_available -= bo->mem.size;
		break;
	default:
		break;
386
	}
387

388
out:
389
	if (force && ret)
390
		nvbo->contig = false;
391
	ttm_bo_unreserve(bo);
392 393 394 395 396 397
	return ret;
}

int
nouveau_bo_unpin(struct nouveau_bo *nvbo)
{
398
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
399
	struct ttm_buffer_object *bo = &nvbo->bo;
400
	int ret, ref;
401

402
	ret = ttm_bo_reserve(bo, false, false, NULL);
403 404 405
	if (ret)
		return ret;

406 407 408
	ref = --nvbo->pin_refcnt;
	WARN_ON_ONCE(ref < 0);
	if (ref)
409 410
		goto out;

411
	nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
412

413
	ret = nouveau_bo_validate(nvbo, false, false);
414 415 416
	if (ret == 0) {
		switch (bo->mem.mem_type) {
		case TTM_PL_VRAM:
417
			drm->gem.vram_available += bo->mem.size;
418 419
			break;
		case TTM_PL_TT:
420
			drm->gem.gart_available += bo->mem.size;
421 422 423 424 425 426
			break;
		default:
			break;
		}
	}

427
out:
428 429 430 431 432 433 434 435 436
	ttm_bo_unreserve(bo);
	return ret;
}

int
nouveau_bo_map(struct nouveau_bo *nvbo)
{
	int ret;

437
	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
438 439 440
	if (ret)
		return ret;

441
	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
442

443 444 445 446 447 448 449
	ttm_bo_unreserve(&nvbo->bo);
	return ret;
}

void
nouveau_bo_unmap(struct nouveau_bo *nvbo)
{
450 451 452
	if (!nvbo)
		return;

453
	ttm_bo_kunmap(&nvbo->kmap);
454 455
}

456 457 458 459
void
nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
{
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
460
	struct nvkm_device *device = nvxx_device(&drm->client.device);
461 462 463 464 465 466 467 468 469 470 471
	struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
	int i;

	if (!ttm_dma)
		return;

	/* Don't waste time looping if the object is coherent */
	if (nvbo->force_coherent)
		return;

	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
472 473
		dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i],
					   PAGE_SIZE, DMA_TO_DEVICE);
474 475 476 477 478 479
}

void
nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
{
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
480
	struct nvkm_device *device = nvxx_device(&drm->client.device);
481 482 483 484 485 486 487 488 489 490 491
	struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
	int i;

	if (!ttm_dma)
		return;

	/* Don't waste time looping if the object is coherent */
	if (nvbo->force_coherent)
		return;

	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
492 493
		dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i],
					PAGE_SIZE, DMA_FROM_DEVICE);
494 495
}

496 497
int
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
498
		    bool no_wait_gpu)
499 500 501
{
	int ret;

502 503
	ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
			      interruptible, no_wait_gpu);
504 505 506
	if (ret)
		return ret;

507 508
	nouveau_bo_sync_for_device(nvbo);

509 510 511
	return 0;
}

512 513 514 515 516
void
nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
{
	bool is_iomem;
	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
517

518
	mem += index;
519

520 521 522 523 524 525 526 527 528 529 530
	if (is_iomem)
		iowrite16_native(val, (void __force __iomem *)mem);
	else
		*mem = val;
}

u32
nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
{
	bool is_iomem;
	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
531

532
	mem += index;
533

534 535 536 537 538 539 540 541 542 543 544
	if (is_iomem)
		return ioread32_native((void __force __iomem *)mem);
	else
		return *mem;
}

void
nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
{
	bool is_iomem;
	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
545

546
	mem += index;
547

548 549 550 551 552 553
	if (is_iomem)
		iowrite32_native(val, (void __force __iomem *)mem);
	else
		*mem = val;
}

554
static struct ttm_tt *
555 556
nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
		      uint32_t page_flags, struct page *dummy_read)
557
{
D
Daniel Vetter 已提交
558
#if IS_ENABLED(CONFIG_AGP)
559
	struct nouveau_drm *drm = nouveau_bdev(bdev);
560

561 562
	if (drm->agp.bridge) {
		return ttm_agp_tt_create(bdev, drm->agp.bridge, size,
563
					 page_flags, dummy_read);
564
	}
565
#endif
566

567
	return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
568 569 570 571 572 573 574 575 576 577 578 579 580
}

static int
nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
	/* We'll do this from user space. */
	return 0;
}

static int
nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
			 struct ttm_mem_type_manager *man)
{
581
	struct nouveau_drm *drm = nouveau_bdev(bdev);
582 583 584 585 586 587 588 589

	switch (type) {
	case TTM_PL_SYSTEM:
		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
		man->available_caching = TTM_PL_MASK_CACHING;
		man->default_caching = TTM_PL_FLAG_CACHED;
		break;
	case TTM_PL_VRAM:
590 591 592 593 594 595
		man->flags = TTM_MEMTYPE_FLAG_FIXED |
			     TTM_MEMTYPE_FLAG_MAPPABLE;
		man->available_caching = TTM_PL_FLAG_UNCACHED |
					 TTM_PL_FLAG_WC;
		man->default_caching = TTM_PL_FLAG_WC;

596
		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
597
			/* Some BARs do not support being ioremapped WC */
598
			if (nvxx_bar(&drm->client.device)->iomap_uncached) {
599 600 601 602
				man->available_caching = TTM_PL_FLAG_UNCACHED;
				man->default_caching = TTM_PL_FLAG_UNCACHED;
			}

B
Ben Skeggs 已提交
603
			man->func = &nouveau_vram_manager;
604 605 606
			man->io_reserve_fastpath = false;
			man->use_io_reserve_lru = true;
		} else {
B
Ben Skeggs 已提交
607
			man->func = &ttm_bo_manager_func;
608
		}
609 610
		break;
	case TTM_PL_TT:
611
		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
612
			man->func = &nouveau_gart_manager;
613
		else
614
		if (!drm->agp.bridge)
615
			man->func = &nv04_gart_manager;
616 617
		else
			man->func = &ttm_bo_manager_func;
618

619
		if (drm->agp.bridge) {
620
			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
621 622 623
			man->available_caching = TTM_PL_FLAG_UNCACHED |
				TTM_PL_FLAG_WC;
			man->default_caching = TTM_PL_FLAG_WC;
624
		} else {
625 626 627 628 629
			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
				     TTM_MEMTYPE_FLAG_CMA;
			man->available_caching = TTM_PL_MASK_CACHING;
			man->default_caching = TTM_PL_FLAG_CACHED;
		}
630

631 632 633 634 635 636 637 638 639 640 641 642 643
		break;
	default:
		return -EINVAL;
	}
	return 0;
}

static void
nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
{
	struct nouveau_bo *nvbo = nouveau_bo(bo);

	switch (bo->mem.mem_type) {
644
	case TTM_PL_VRAM:
645 646
		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
					 TTM_PL_FLAG_SYSTEM);
647
		break;
648
	default:
649
		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
650 651
		break;
	}
652 653

	*pl = nvbo->placement;
654 655 656
}


657 658 659 660 661 662
static int
nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
	int ret = RING_SPACE(chan, 2);
	if (ret == 0) {
		BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
663
		OUT_RING  (chan, handle & 0x0000ffff);
664 665 666 667 668
		FIRE_RING (chan);
	}
	return ret;
}

669 670
static int
nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
671
		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
672
{
673
	struct nvkm_mem *mem = old_reg->mm_node;
674 675
	int ret = RING_SPACE(chan, 10);
	if (ret == 0) {
676
		BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
677 678 679 680
		OUT_RING  (chan, upper_32_bits(mem->vma[0].offset));
		OUT_RING  (chan, lower_32_bits(mem->vma[0].offset));
		OUT_RING  (chan, upper_32_bits(mem->vma[1].offset));
		OUT_RING  (chan, lower_32_bits(mem->vma[1].offset));
681 682 683
		OUT_RING  (chan, PAGE_SIZE);
		OUT_RING  (chan, PAGE_SIZE);
		OUT_RING  (chan, PAGE_SIZE);
684
		OUT_RING  (chan, new_reg->num_pages);
685
		BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
686 687 688 689
	}
	return ret;
}

690 691 692 693 694 695 696 697 698 699 700
static int
nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
	int ret = RING_SPACE(chan, 2);
	if (ret == 0) {
		BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
		OUT_RING  (chan, handle);
	}
	return ret;
}

701 702
static int
nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
703
		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
704
{
705 706 707 708
	struct nvkm_mem *mem = old_reg->mm_node;
	u64 src_offset = mem->vma[0].offset;
	u64 dst_offset = mem->vma[1].offset;
	u32 page_count = new_reg->num_pages;
709 710
	int ret;

711
	page_count = new_reg->num_pages;
712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
	while (page_count) {
		int line_count = (page_count > 8191) ? 8191 : page_count;

		ret = RING_SPACE(chan, 11);
		if (ret)
			return ret;

		BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
		OUT_RING  (chan, upper_32_bits(src_offset));
		OUT_RING  (chan, lower_32_bits(src_offset));
		OUT_RING  (chan, upper_32_bits(dst_offset));
		OUT_RING  (chan, lower_32_bits(dst_offset));
		OUT_RING  (chan, PAGE_SIZE);
		OUT_RING  (chan, PAGE_SIZE);
		OUT_RING  (chan, PAGE_SIZE);
		OUT_RING  (chan, line_count);
		BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
		OUT_RING  (chan, 0x00000110);

		page_count -= line_count;
		src_offset += (PAGE_SIZE * line_count);
		dst_offset += (PAGE_SIZE * line_count);
	}

	return 0;
}

B
Ben Skeggs 已提交
739 740
static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
741
		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
B
Ben Skeggs 已提交
742
{
743 744 745 746
	struct nvkm_mem *mem = old_reg->mm_node;
	u64 src_offset = mem->vma[0].offset;
	u64 dst_offset = mem->vma[1].offset;
	u32 page_count = new_reg->num_pages;
B
Ben Skeggs 已提交
747 748
	int ret;

749
	page_count = new_reg->num_pages;
B
Ben Skeggs 已提交
750 751 752 753 754 755 756
	while (page_count) {
		int line_count = (page_count > 2047) ? 2047 : page_count;

		ret = RING_SPACE(chan, 12);
		if (ret)
			return ret;

757
		BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
B
Ben Skeggs 已提交
758 759
		OUT_RING  (chan, upper_32_bits(dst_offset));
		OUT_RING  (chan, lower_32_bits(dst_offset));
760
		BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
B
Ben Skeggs 已提交
761 762 763 764 765 766
		OUT_RING  (chan, upper_32_bits(src_offset));
		OUT_RING  (chan, lower_32_bits(src_offset));
		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
		OUT_RING  (chan, PAGE_SIZE); /* line_length */
		OUT_RING  (chan, line_count);
767
		BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
B
Ben Skeggs 已提交
768 769 770 771 772 773 774 775 776 777
		OUT_RING  (chan, 0x00100110);

		page_count -= line_count;
		src_offset += (PAGE_SIZE * line_count);
		dst_offset += (PAGE_SIZE * line_count);
	}

	return 0;
}

778 779
static int
nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
780
		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
781
{
782 783 784 785
	struct nvkm_mem *mem = old_reg->mm_node;
	u64 src_offset = mem->vma[0].offset;
	u64 dst_offset = mem->vma[1].offset;
	u32 page_count = new_reg->num_pages;
786 787
	int ret;

788
	page_count = new_reg->num_pages;
789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
	while (page_count) {
		int line_count = (page_count > 8191) ? 8191 : page_count;

		ret = RING_SPACE(chan, 11);
		if (ret)
			return ret;

		BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
		OUT_RING  (chan, upper_32_bits(src_offset));
		OUT_RING  (chan, lower_32_bits(src_offset));
		OUT_RING  (chan, upper_32_bits(dst_offset));
		OUT_RING  (chan, lower_32_bits(dst_offset));
		OUT_RING  (chan, PAGE_SIZE);
		OUT_RING  (chan, PAGE_SIZE);
		OUT_RING  (chan, PAGE_SIZE);
		OUT_RING  (chan, line_count);
		BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
		OUT_RING  (chan, 0x00000110);

		page_count -= line_count;
		src_offset += (PAGE_SIZE * line_count);
		dst_offset += (PAGE_SIZE * line_count);
	}

	return 0;
}

816 817
static int
nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
818
		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
819
{
820
	struct nvkm_mem *mem = old_reg->mm_node;
821 822 823
	int ret = RING_SPACE(chan, 7);
	if (ret == 0) {
		BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
824 825 826 827
		OUT_RING  (chan, upper_32_bits(mem->vma[0].offset));
		OUT_RING  (chan, lower_32_bits(mem->vma[0].offset));
		OUT_RING  (chan, upper_32_bits(mem->vma[1].offset));
		OUT_RING  (chan, lower_32_bits(mem->vma[1].offset));
828
		OUT_RING  (chan, 0x00000000 /* COPY */);
829
		OUT_RING  (chan, new_reg->num_pages << PAGE_SHIFT);
830 831 832 833
	}
	return ret;
}

834 835
static int
nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
836
		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
837
{
838
	struct nvkm_mem *mem = old_reg->mm_node;
839 840 841
	int ret = RING_SPACE(chan, 7);
	if (ret == 0) {
		BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
842 843 844 845 846
		OUT_RING  (chan, new_reg->num_pages << PAGE_SHIFT);
		OUT_RING  (chan, upper_32_bits(mem->vma[0].offset));
		OUT_RING  (chan, lower_32_bits(mem->vma[0].offset));
		OUT_RING  (chan, upper_32_bits(mem->vma[1].offset));
		OUT_RING  (chan, lower_32_bits(mem->vma[1].offset));
847 848 849 850 851
		OUT_RING  (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
	}
	return ret;
}

852 853 854
static int
nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
855
	int ret = RING_SPACE(chan, 6);
856
	if (ret == 0) {
857 858 859
		BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
		OUT_RING  (chan, handle);
		BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
860 861 862
		OUT_RING  (chan, chan->drm->ntfy.handle);
		OUT_RING  (chan, chan->vram.handle);
		OUT_RING  (chan, chan->vram.handle);
863 864 865 866 867
	}

	return ret;
}

868
static int
869
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
870
		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
871
{
872 873 874 875 876 877
	struct nvkm_mem *mem = old_reg->mm_node;
	u64 length = (new_reg->num_pages << PAGE_SHIFT);
	u64 src_offset = mem->vma[0].offset;
	u64 dst_offset = mem->vma[1].offset;
	int src_tiled = !!mem->memtype;
	int dst_tiled = !!((struct nvkm_mem *)new_reg->mm_node)->memtype;
878 879
	int ret;

880 881 882
	while (length) {
		u32 amount, stride, height;

883 884 885 886
		ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
		if (ret)
			return ret;

887 888
		amount  = min(length, (u64)(4 * 1024 * 1024));
		stride  = 16 * 4;
889 890
		height  = amount / stride;

891
		if (src_tiled) {
892
			BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
893
			OUT_RING  (chan, 0);
894
			OUT_RING  (chan, 0);
895 896 897 898 899 900
			OUT_RING  (chan, stride);
			OUT_RING  (chan, height);
			OUT_RING  (chan, 1);
			OUT_RING  (chan, 0);
			OUT_RING  (chan, 0);
		} else {
901
			BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
902 903
			OUT_RING  (chan, 1);
		}
904
		if (dst_tiled) {
905
			BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
906
			OUT_RING  (chan, 0);
907
			OUT_RING  (chan, 0);
908 909 910 911 912 913
			OUT_RING  (chan, stride);
			OUT_RING  (chan, height);
			OUT_RING  (chan, 1);
			OUT_RING  (chan, 0);
			OUT_RING  (chan, 0);
		} else {
914
			BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
915 916 917
			OUT_RING  (chan, 1);
		}

918
		BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
919 920
		OUT_RING  (chan, upper_32_bits(src_offset));
		OUT_RING  (chan, upper_32_bits(dst_offset));
921
		BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
922 923 924 925 926 927 928 929
		OUT_RING  (chan, lower_32_bits(src_offset));
		OUT_RING  (chan, lower_32_bits(dst_offset));
		OUT_RING  (chan, stride);
		OUT_RING  (chan, stride);
		OUT_RING  (chan, stride);
		OUT_RING  (chan, height);
		OUT_RING  (chan, 0x00000101);
		OUT_RING  (chan, 0x00000000);
930
		BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
931 932 933 934 935
		OUT_RING  (chan, 0);

		length -= amount;
		src_offset += amount;
		dst_offset += amount;
936 937
	}

938 939 940
	return 0;
}

941 942 943
static int
nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
944
	int ret = RING_SPACE(chan, 4);
945
	if (ret == 0) {
946 947 948
		BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
		OUT_RING  (chan, handle);
		BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
949
		OUT_RING  (chan, chan->drm->ntfy.handle);
950 951 952 953 954
	}

	return ret;
}

955 956
static inline uint32_t
nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
957
		      struct nouveau_channel *chan, struct ttm_mem_reg *reg)
958
{
959
	if (reg->mem_type == TTM_PL_TT)
960
		return NvDmaTT;
961
	return chan->vram.handle;
962 963
}

964 965
static int
nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
966
		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
967
{
968 969 970
	u32 src_offset = old_reg->start << PAGE_SHIFT;
	u32 dst_offset = new_reg->start << PAGE_SHIFT;
	u32 page_count = new_reg->num_pages;
971 972 973 974 975 976
	int ret;

	ret = RING_SPACE(chan, 3);
	if (ret)
		return ret;

977
	BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
978 979
	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_reg));
	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_reg));
980

981
	page_count = new_reg->num_pages;
982 983 984 985 986 987
	while (page_count) {
		int line_count = (page_count > 2047) ? 2047 : page_count;

		ret = RING_SPACE(chan, 11);
		if (ret)
			return ret;
988

989
		BEGIN_NV04(chan, NvSubCopy,
990
				 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
991 992 993 994 995 996 997 998
		OUT_RING  (chan, src_offset);
		OUT_RING  (chan, dst_offset);
		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
		OUT_RING  (chan, PAGE_SIZE); /* line_length */
		OUT_RING  (chan, line_count);
		OUT_RING  (chan, 0x00000101);
		OUT_RING  (chan, 0x00000000);
999
		BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
1000
		OUT_RING  (chan, 0);
1001 1002 1003 1004 1005 1006

		page_count -= line_count;
		src_offset += (PAGE_SIZE * line_count);
		dst_offset += (PAGE_SIZE * line_count);
	}

1007 1008 1009
	return 0;
}

1010
static int
1011
nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
1012
		     struct ttm_mem_reg *reg)
1013
{
1014 1015 1016
	struct nvkm_mem *old_mem = bo->mem.mm_node;
	struct nvkm_mem *new_mem = reg->mm_node;
	u64 size = (u64)reg->num_pages << PAGE_SHIFT;
1017 1018
	int ret;

1019 1020
	ret = nvkm_vm_get(drm->client.vm, size, old_mem->page_shift,
			  NV_MEM_ACCESS_RW, &old_mem->vma[0]);
1021 1022 1023
	if (ret)
		return ret;

1024 1025
	ret = nvkm_vm_get(drm->client.vm, size, new_mem->page_shift,
			  NV_MEM_ACCESS_RW, &old_mem->vma[1]);
1026
	if (ret) {
1027
		nvkm_vm_put(&old_mem->vma[0]);
1028 1029 1030
		return ret;
	}

1031 1032
	nvkm_vm_map(&old_mem->vma[0], old_mem);
	nvkm_vm_map(&old_mem->vma[1], new_mem);
1033 1034 1035
	return 0;
}

1036 1037
static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
1038
		     bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1039
{
1040
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1041
	struct nouveau_channel *chan = drm->ttm.chan;
1042
	struct nouveau_cli *cli = (void *)chan->user.client;
1043
	struct nouveau_fence *fence;
1044 1045
	int ret;

1046
	/* create temporary vmas for the transfer and attach them to the
1047
	 * old nvkm_mem node, these will get cleaned up after ttm has
1048
	 * destroyed the ttm_mem_reg
1049
	 */
1050
	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1051
		ret = nouveau_bo_move_prep(drm, bo, new_reg);
1052
		if (ret)
1053
			return ret;
1054 1055
	}

1056
	mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
1057
	ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
1058
	if (ret == 0) {
1059
		ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
1060 1061 1062
		if (ret == 0) {
			ret = nouveau_fence_new(chan, false, &fence);
			if (ret == 0) {
1063 1064
				ret = ttm_bo_move_accel_cleanup(bo,
								&fence->base,
1065
								evict,
1066
								new_reg);
1067 1068 1069
				nouveau_fence_unref(&fence);
			}
		}
1070
	}
1071
	mutex_unlock(&cli->mutex);
1072
	return ret;
1073 1074
}

1075
void
1076
nouveau_bo_move_init(struct nouveau_drm *drm)
1077 1078 1079
{
	static const struct {
		const char *name;
1080
		int engine;
1081
		s32 oclass;
1082 1083 1084 1085 1086
		int (*exec)(struct nouveau_channel *,
			    struct ttm_buffer_object *,
			    struct ttm_mem_reg *, struct ttm_mem_reg *);
		int (*init)(struct nouveau_channel *, u32 handle);
	} _methods[] = {
1087 1088
		{  "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
		{  "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
1089 1090
		{  "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
		{  "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1091 1092
		{  "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
		{  "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1093
		{  "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1094
		{  "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1095 1096 1097 1098 1099 1100 1101
		{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
		{ "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
		{  "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
		{ "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
		{  "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
		{  "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
		{  "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
1102
		{},
1103
		{ "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
1104 1105 1106 1107 1108
	}, *mthd = _methods;
	const char *name = "CPU";
	int ret;

	do {
1109
		struct nouveau_channel *chan;
1110

1111
		if (mthd->engine)
1112 1113 1114 1115 1116 1117
			chan = drm->cechan;
		else
			chan = drm->channel;
		if (chan == NULL)
			continue;

1118
		ret = nvif_object_init(&chan->user,
1119 1120 1121
				       mthd->oclass | (mthd->engine << 16),
				       mthd->oclass, NULL, 0,
				       &drm->ttm.copy);
1122
		if (ret == 0) {
1123
			ret = mthd->init(chan, drm->ttm.copy.handle);
1124
			if (ret) {
1125
				nvif_object_fini(&drm->ttm.copy);
1126
				continue;
1127
			}
1128 1129

			drm->ttm.move = mthd->exec;
1130
			drm->ttm.chan = chan;
1131 1132
			name = mthd->name;
			break;
1133 1134 1135
		}
	} while ((++mthd)->exec);

1136
	NV_INFO(drm, "MM: using %s for buffer copies\n", name);
1137 1138
}

1139 1140
static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1141
		      bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1142
{
1143 1144 1145 1146 1147
	struct ttm_place placement_memtype = {
		.fpfn = 0,
		.lpfn = 0,
		.flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
	};
1148
	struct ttm_placement placement;
1149
	struct ttm_mem_reg tmp_reg;
1150 1151 1152
	int ret;

	placement.num_placement = placement.num_busy_placement = 1;
1153
	placement.placement = placement.busy_placement = &placement_memtype;
1154

1155 1156 1157
	tmp_reg = *new_reg;
	tmp_reg.mm_node = NULL;
	ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu);
1158 1159 1160
	if (ret)
		return ret;

1161
	ret = ttm_tt_bind(bo->ttm, &tmp_reg);
1162 1163 1164
	if (ret)
		goto out;

1165
	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg);
1166 1167 1168
	if (ret)
		goto out;

1169
	ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_reg);
1170
out:
1171
	ttm_bo_mem_put(bo, &tmp_reg);
1172 1173 1174 1175 1176
	return ret;
}

static int
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1177
		      bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1178
{
1179 1180 1181 1182 1183
	struct ttm_place placement_memtype = {
		.fpfn = 0,
		.lpfn = 0,
		.flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
	};
1184
	struct ttm_placement placement;
1185
	struct ttm_mem_reg tmp_reg;
1186 1187 1188
	int ret;

	placement.num_placement = placement.num_busy_placement = 1;
1189
	placement.placement = placement.busy_placement = &placement_memtype;
1190

1191 1192 1193
	tmp_reg = *new_reg;
	tmp_reg.mm_node = NULL;
	ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu);
1194 1195 1196
	if (ret)
		return ret;

1197
	ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_reg);
1198 1199 1200
	if (ret)
		goto out;

1201
	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg);
1202 1203 1204 1205
	if (ret)
		goto out;

out:
1206
	ttm_bo_mem_put(bo, &tmp_reg);
1207 1208 1209
	return ret;
}

1210
static void
1211
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
1212
		     struct ttm_mem_reg *new_reg)
1213 1214
{
	struct nouveau_bo *nvbo = nouveau_bo(bo);
1215
	struct nvkm_mem *mem = new_reg ? new_reg->mm_node : NULL;
1216
	struct nvkm_vma *vma;
1217

1218 1219 1220 1221
	/* ttm can now (stupidly) pass the driver bos it didn't create... */
	if (bo->destroy != nouveau_bo_del_ttm)
		return;

1222 1223 1224
	if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
	    mem->page_shift == nvbo->page) {
		list_for_each_entry(vma, &nvbo->vma_list, head) {
1225
			nvkm_vm_map(vma, mem);
1226 1227 1228
		}
	} else {
		list_for_each_entry(vma, &nvbo->vma_list, head) {
1229
			WARN_ON(ttm_bo_wait(bo, false, false));
1230
			nvkm_vm_unmap(vma);
1231
		}
1232 1233 1234
	}
}

1235
static int
1236
nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg,
1237
		   struct nouveau_drm_tile **new_tile)
1238
{
1239 1240
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
	struct drm_device *dev = drm->dev;
1241
	struct nouveau_bo *nvbo = nouveau_bo(bo);
1242
	u64 offset = new_reg->start << PAGE_SHIFT;
1243

1244
	*new_tile = NULL;
1245
	if (new_reg->mem_type != TTM_PL_VRAM)
1246 1247
		return 0;

1248
	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
1249
		*new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
1250
					       nvbo->mode, nvbo->zeta);
1251 1252
	}

1253 1254 1255 1256 1257
	return 0;
}

static void
nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1258 1259
		      struct nouveau_drm_tile *new_tile,
		      struct nouveau_drm_tile **old_tile)
1260
{
1261 1262
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
	struct drm_device *dev = drm->dev;
1263
	struct dma_fence *fence = reservation_object_get_excl(bo->resv);
1264

1265
	nv10_bo_put_tile_region(dev, *old_tile, fence);
1266
	*old_tile = new_tile;
1267 1268 1269 1270
}

static int
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1271
		bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1272
{
1273
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1274
	struct nouveau_bo *nvbo = nouveau_bo(bo);
1275
	struct ttm_mem_reg *old_reg = &bo->mem;
1276
	struct nouveau_drm_tile *new_tile = NULL;
1277 1278
	int ret = 0;

1279 1280 1281 1282
	ret = ttm_bo_wait(bo, intr, no_wait_gpu);
	if (ret)
		return ret;

1283 1284 1285
	if (nvbo->pin_refcnt)
		NV_WARN(drm, "Moving pinned object %p!\n", nvbo);

1286
	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1287
		ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
1288 1289 1290
		if (ret)
			return ret;
	}
1291 1292

	/* Fake bo copy. */
1293
	if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1294
		BUG_ON(bo->mem.mm_node != NULL);
1295 1296
		bo->mem = *new_reg;
		new_reg->mm_node = NULL;
1297
		goto out;
1298 1299
	}

1300
	/* Hardware assisted copy. */
1301
	if (drm->ttm.move) {
1302
		if (new_reg->mem_type == TTM_PL_SYSTEM)
1303
			ret = nouveau_bo_move_flipd(bo, evict, intr,
1304 1305
						    no_wait_gpu, new_reg);
		else if (old_reg->mem_type == TTM_PL_SYSTEM)
1306
			ret = nouveau_bo_move_flips(bo, evict, intr,
1307
						    no_wait_gpu, new_reg);
1308 1309
		else
			ret = nouveau_bo_move_m2mf(bo, evict, intr,
1310
						   no_wait_gpu, new_reg);
1311 1312 1313
		if (!ret)
			goto out;
	}
1314 1315

	/* Fallback to software copy. */
1316
	ret = ttm_bo_wait(bo, intr, no_wait_gpu);
1317
	if (ret == 0)
1318
		ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_reg);
1319 1320

out:
1321
	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1322 1323 1324 1325 1326
		if (ret)
			nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
		else
			nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
	}
1327 1328

	return ret;
1329 1330 1331 1332 1333
}

static int
nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
1334 1335
	struct nouveau_bo *nvbo = nouveau_bo(bo);

D
David Herrmann 已提交
1336 1337
	return drm_vma_node_verify_access(&nvbo->gem.vma_node,
					  filp->private_data);
1338 1339
}

1340
static int
1341
nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
1342
{
1343
	struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
1344
	struct nouveau_drm *drm = nouveau_bdev(bdev);
1345
	struct nvkm_device *device = nvxx_device(&drm->client.device);
1346
	struct nvkm_mem *mem = reg->mm_node;
1347
	int ret;
1348

1349 1350 1351 1352 1353
	reg->bus.addr = NULL;
	reg->bus.offset = 0;
	reg->bus.size = reg->num_pages << PAGE_SHIFT;
	reg->bus.base = 0;
	reg->bus.is_iomem = false;
1354 1355
	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
		return -EINVAL;
1356
	switch (reg->mem_type) {
1357 1358 1359 1360
	case TTM_PL_SYSTEM:
		/* System memory */
		return 0;
	case TTM_PL_TT:
D
Daniel Vetter 已提交
1361
#if IS_ENABLED(CONFIG_AGP)
1362
		if (drm->agp.bridge) {
1363 1364 1365
			reg->bus.offset = reg->start << PAGE_SHIFT;
			reg->bus.base = drm->agp.base;
			reg->bus.is_iomem = !drm->agp.cma;
1366 1367
		}
#endif
1368
		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !mem->memtype)
1369 1370 1371
			/* untiled */
			break;
		/* fallthrough, tiled memory */
1372
	case TTM_PL_VRAM:
1373 1374 1375
		reg->bus.offset = reg->start << PAGE_SHIFT;
		reg->bus.base = device->func->resource_addr(device, 1);
		reg->bus.is_iomem = true;
1376
		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1377
			struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
1378
			int page_shift = 12;
1379
			if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
1380
				page_shift = mem->page_shift;
1381

1382 1383
			ret = nvkm_vm_get(bar, mem->size << 12, page_shift,
					  NV_MEM_ACCESS_RW, &mem->bar_vma);
1384 1385
			if (ret)
				return ret;
1386

1387 1388
			nvkm_vm_map(&mem->bar_vma, mem);
			reg->bus.offset = mem->bar_vma.offset;
1389
		}
1390 1391 1392 1393 1394 1395 1396 1397
		break;
	default:
		return -EINVAL;
	}
	return 0;
}

static void
1398
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
1399
{
1400
	struct nvkm_mem *mem = reg->mm_node;
1401

1402
	if (!mem->bar_vma.node)
1403 1404
		return;

1405 1406
	nvkm_vm_unmap(&mem->bar_vma);
	nvkm_vm_put(&mem->bar_vma);
1407 1408 1409 1410 1411
}

static int
nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
1412
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1413
	struct nouveau_bo *nvbo = nouveau_bo(bo);
1414
	struct nvkm_device *device = nvxx_device(&drm->client.device);
1415
	u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
1416
	int i, ret;
1417 1418 1419 1420 1421

	/* as long as the bo isn't in vram, and isn't tiled, we've got
	 * nothing to do here.
	 */
	if (bo->mem.mem_type != TTM_PL_VRAM) {
1422
		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
1423
		    !nvbo->kind)
1424
			return 0;
1425 1426 1427 1428 1429 1430 1431 1432 1433

		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
			nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);

			ret = nouveau_bo_validate(nvbo, false, false);
			if (ret)
				return ret;
		}
		return 0;
1434 1435 1436
	}

	/* make sure bo is in mappable vram */
1437
	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
1438
	    bo->mem.start + bo->mem.num_pages < mappable)
1439 1440
		return 0;

1441 1442 1443 1444 1445 1446 1447 1448 1449
	for (i = 0; i < nvbo->placement.num_placement; ++i) {
		nvbo->placements[i].fpfn = 0;
		nvbo->placements[i].lpfn = mappable;
	}

	for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
		nvbo->busy_placements[i].fpfn = 0;
		nvbo->busy_placements[i].lpfn = mappable;
	}
1450

1451
	nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1452
	return nouveau_bo_validate(nvbo, false, false);
1453 1454
}

1455 1456 1457
static int
nouveau_ttm_tt_populate(struct ttm_tt *ttm)
{
1458
	struct ttm_dma_tt *ttm_dma = (void *)ttm;
1459
	struct nouveau_drm *drm;
1460
	struct nvkm_device *device;
1461
	struct drm_device *dev;
1462
	struct device *pdev;
1463 1464
	unsigned i;
	int r;
D
Dave Airlie 已提交
1465
	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1466 1467 1468 1469

	if (ttm->state != tt_unpopulated)
		return 0;

D
Dave Airlie 已提交
1470 1471 1472 1473 1474 1475 1476 1477
	if (slave && ttm->sg) {
		/* make userspace faulting work */
		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
						 ttm_dma->dma_address, ttm->num_pages);
		ttm->state = tt_unbound;
		return 0;
	}

1478
	drm = nouveau_bdev(ttm->bdev);
1479
	device = nvxx_device(&drm->client.device);
1480
	dev = drm->dev;
1481
	pdev = device->dev;
1482

D
Daniel Vetter 已提交
1483
#if IS_ENABLED(CONFIG_AGP)
1484
	if (drm->agp.bridge) {
J
Jerome Glisse 已提交
1485 1486 1487 1488
		return ttm_agp_tt_populate(ttm);
	}
#endif

1489
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1490
	if (swiotlb_nr_tbl()) {
1491
		return ttm_dma_populate((void *)ttm, dev->dev);
1492 1493 1494 1495 1496 1497 1498 1499 1500
	}
#endif

	r = ttm_pool_populate(ttm);
	if (r) {
		return r;
	}

	for (i = 0; i < ttm->num_pages; i++) {
1501 1502 1503 1504 1505 1506
		dma_addr_t addr;

		addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
				    DMA_BIDIRECTIONAL);

		if (dma_mapping_error(pdev, addr)) {
1507
			while (i--) {
1508 1509
				dma_unmap_page(pdev, ttm_dma->dma_address[i],
					       PAGE_SIZE, DMA_BIDIRECTIONAL);
1510
				ttm_dma->dma_address[i] = 0;
1511 1512 1513 1514
			}
			ttm_pool_unpopulate(ttm);
			return -EFAULT;
		}
1515 1516

		ttm_dma->dma_address[i] = addr;
1517 1518 1519 1520 1521 1522 1523
	}
	return 0;
}

static void
nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
1524
	struct ttm_dma_tt *ttm_dma = (void *)ttm;
1525
	struct nouveau_drm *drm;
1526
	struct nvkm_device *device;
1527
	struct drm_device *dev;
1528
	struct device *pdev;
1529
	unsigned i;
D
Dave Airlie 已提交
1530 1531 1532 1533
	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);

	if (slave)
		return;
1534

1535
	drm = nouveau_bdev(ttm->bdev);
1536
	device = nvxx_device(&drm->client.device);
1537
	dev = drm->dev;
1538
	pdev = device->dev;
1539

D
Daniel Vetter 已提交
1540
#if IS_ENABLED(CONFIG_AGP)
1541
	if (drm->agp.bridge) {
J
Jerome Glisse 已提交
1542 1543 1544 1545 1546
		ttm_agp_tt_unpopulate(ttm);
		return;
	}
#endif

1547
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1548
	if (swiotlb_nr_tbl()) {
1549
		ttm_dma_unpopulate((void *)ttm, dev->dev);
1550 1551 1552 1553 1554
		return;
	}
#endif

	for (i = 0; i < ttm->num_pages; i++) {
1555
		if (ttm_dma->dma_address[i]) {
1556 1557
			dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
				       DMA_BIDIRECTIONAL);
1558 1559 1560 1561 1562 1563
		}
	}

	ttm_pool_unpopulate(ttm);
}

1564
void
1565
nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1566
{
1567
	struct reservation_object *resv = nvbo->bo.resv;
1568

1569 1570 1571 1572
	if (exclusive)
		reservation_object_add_excl_fence(resv, &fence->base);
	else if (fence)
		reservation_object_add_shared_fence(resv, &fence->base);
1573 1574
}

1575
struct ttm_bo_driver nouveau_bo_driver = {
1576
	.ttm_tt_create = &nouveau_ttm_tt_create,
1577 1578
	.ttm_tt_populate = &nouveau_ttm_tt_populate,
	.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1579 1580
	.invalidate_caches = nouveau_bo_invalidate_caches,
	.init_mem_type = nouveau_bo_init_mem_type,
1581
	.eviction_valuable = ttm_bo_eviction_valuable,
1582
	.evict_flags = nouveau_bo_evict_flags,
1583
	.move_notify = nouveau_bo_move_ntfy,
1584 1585
	.move = nouveau_bo_move,
	.verify_access = nouveau_bo_verify_access,
1586 1587 1588
	.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
	.io_mem_free = &nouveau_ttm_io_mem_free,
1589
	.io_mem_pfn = ttm_bo_default_io_mem_pfn,
1590 1591
};

1592 1593
struct nvkm_vma *
nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm)
1594
{
1595
	struct nvkm_vma *vma;
1596 1597 1598 1599 1600 1601 1602 1603 1604
	list_for_each_entry(vma, &nvbo->vma_list, head) {
		if (vma->vm == vm)
			return vma;
	}

	return NULL;
}

int
1605 1606
nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
		   struct nvkm_vma *vma)
1607 1608
{
	const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1609
	struct nvkm_mem *mem = nvbo->bo.mem.mm_node;
1610 1611
	int ret;

1612
	ret = nvkm_vm_get(vm, size, nvbo->page, NV_MEM_ACCESS_RW, vma);
1613 1614 1615
	if (ret)
		return ret;

1616 1617
	if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
	    mem->page_shift == nvbo->page)
1618
		nvkm_vm_map(vma, nvbo->bo.mem.mm_node);
1619 1620

	list_add_tail(&vma->head, &nvbo->vma_list);
1621
	vma->refcount = 1;
1622 1623 1624 1625
	return 0;
}

void
1626
nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
1627 1628
{
	if (vma->node) {
1629
		if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
1630 1631
			nvkm_vm_unmap(vma);
		nvkm_vm_put(vma);
1632 1633 1634
		list_del(&vma->head);
	}
}