nouveau_bo.c 34.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * Copyright 2007 Dave Airlied
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */
/*
 * Authors: Dave Airlied <airlied@linux.ie>
 *	    Ben Skeggs   <darktama@iinet.net.au>
 *	    Jeremy Kolb  <jkolb@brandeis.edu>
 */

30
#include <linux/dma-mapping.h>
31
#include <linux/swiotlb.h>
32

33
#include "nouveau_drv.h"
34
#include "nouveau_chan.h"
35
#include "nouveau_fence.h"
36

37 38 39
#include "nouveau_bo.h"
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
40
#include "nouveau_mem.h"
41
#include "nouveau_vmm.h"
42

43 44 45 46
#include <nvif/class.h>
#include <nvif/if500b.h>
#include <nvif/if900b.h>

47 48 49
static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
			       struct ttm_resource *reg);

50 51 52 53 54
/*
 * NV10-NV40 tiling helpers
 */

static void
55 56
nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
			   u32 addr, u32 size, u32 pitch, u32 flags)
57
{
58
	struct nouveau_drm *drm = nouveau_drm(dev);
59
	int i = reg - drm->tile.reg;
60
	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
B
Ben Skeggs 已提交
61
	struct nvkm_fb_tile *tile = &fb->tile.region[i];
62

63
	nouveau_fence_unref(&reg->fence);
64 65

	if (tile->pitch)
66
		nvkm_fb_tile_fini(fb, i, tile);
67 68

	if (pitch)
69
		nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
70

71
	nvkm_fb_tile_prog(fb, i, tile);
72 73
}

74
static struct nouveau_drm_tile *
75 76
nv10_bo_get_tile_region(struct drm_device *dev, int i)
{
77
	struct nouveau_drm *drm = nouveau_drm(dev);
78
	struct nouveau_drm_tile *tile = &drm->tile.reg[i];
79

80
	spin_lock(&drm->tile.lock);
81 82 83 84 85 86 87

	if (!tile->used &&
	    (!tile->fence || nouveau_fence_done(tile->fence)))
		tile->used = true;
	else
		tile = NULL;

88
	spin_unlock(&drm->tile.lock);
89 90 91 92
	return tile;
}

static void
93
nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
94
			struct dma_fence *fence)
95
{
96
	struct nouveau_drm *drm = nouveau_drm(dev);
97 98

	if (tile) {
99
		spin_lock(&drm->tile.lock);
100
		tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
101
		tile->used = false;
102
		spin_unlock(&drm->tile.lock);
103 104 105
	}
}

106 107
static struct nouveau_drm_tile *
nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
108
		   u32 size, u32 pitch, u32 zeta)
109
{
110
	struct nouveau_drm *drm = nouveau_drm(dev);
111
	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
112
	struct nouveau_drm_tile *tile, *found = NULL;
113 114
	int i;

B
Ben Skeggs 已提交
115
	for (i = 0; i < fb->tile.regions; i++) {
116 117 118 119 120 121
		tile = nv10_bo_get_tile_region(dev, i);

		if (pitch && !found) {
			found = tile;
			continue;

B
Ben Skeggs 已提交
122
		} else if (tile && fb->tile.region[i].pitch) {
123 124 125 126 127 128 129 130
			/* Kill an unused tile region. */
			nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
		}

		nv10_bo_put_tile_region(dev, tile, NULL);
	}

	if (found)
131
		nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
132 133 134
	return found;
}

135 136 137
static void
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
{
138 139
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
	struct drm_device *dev = drm->dev;
140 141
	struct nouveau_bo *nvbo = nouveau_bo(bo);

142
	WARN_ON(nvbo->pin_refcnt > 0);
143
	nouveau_bo_del_io_reserve_lru(bo);
144
	nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
145 146 147 148 149 150 151 152

	/*
	 * If nouveau_bo_new() allocated this buffer, the GEM object was never
	 * initialized, so don't attempt to release it.
	 */
	if (bo->base.dev)
		drm_gem_object_release(&bo->base);

153 154 155
	kfree(nvbo);
}

B
Ben Skeggs 已提交
156 157 158 159 160 161 162 163
static inline u64
roundup_64(u64 x, u32 y)
{
	x += y - 1;
	do_div(x, y);
	return x * y;
}

164
static void
165
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size)
166
{
167
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
168
	struct nvif_device *device = &drm->client.device;
169

170
	if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
171
		if (nvbo->mode) {
172
			if (device->info.chipset >= 0x40) {
173
				*align = 65536;
174
				*size = roundup_64(*size, 64 * nvbo->mode);
175

176
			} else if (device->info.chipset >= 0x30) {
177
				*align = 32768;
178
				*size = roundup_64(*size, 64 * nvbo->mode);
179

180
			} else if (device->info.chipset >= 0x20) {
181
				*align = 16384;
182
				*size = roundup_64(*size, 64 * nvbo->mode);
183

184
			} else if (device->info.chipset >= 0x10) {
185
				*align = 16384;
186
				*size = roundup_64(*size, 32 * nvbo->mode);
187 188
			}
		}
189
	} else {
190 191
		*size = roundup_64(*size, (1 << nvbo->page));
		*align = max((1 <<  nvbo->page), *align);
192 193
	}

B
Ben Skeggs 已提交
194
	*size = roundup_64(*size, PAGE_SIZE);
195 196
}

197
struct nouveau_bo *
198
nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
199
		 u32 tile_mode, u32 tile_flags)
200
{
201
	struct nouveau_drm *drm = cli->drm;
202
	struct nouveau_bo *nvbo;
203
	struct nvif_mmu *mmu = &cli->mmu;
204
	struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
205
	int i, pi = -1;
206

207 208
	if (!*size) {
		NV_WARN(drm, "skipped size %016llx\n", *size);
209
		return ERR_PTR(-EINVAL);
210
	}
D
Dave Airlie 已提交
211

212 213
	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
	if (!nvbo)
214
		return ERR_PTR(-ENOMEM);
215 216
	INIT_LIST_HEAD(&nvbo->head);
	INIT_LIST_HEAD(&nvbo->entry);
217
	INIT_LIST_HEAD(&nvbo->vma_list);
218
	nvbo->bo.bdev = &drm->ttm.bdev;
219

220 221 222 223
	/* This is confusing, and doesn't actually mean we want an uncached
	 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
	 * into in nouveau_gem_new().
	 */
224
	if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) {
225 226 227
		/* Determine if we can get a cache-coherent map, forcing
		 * uncached mapping if we can't.
		 */
228
		if (!nouveau_drm_use_coherent_gpu_mapping(drm))
229 230
			nvbo->force_coherent = true;
	}
231

232 233
	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
		nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
234 235
		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
			kfree(nvbo);
236
			return ERR_PTR(-EINVAL);
237 238 239
		}

		nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
240 241 242 243
	} else
	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
		nvbo->kind = (tile_flags & 0x00007f00) >> 8;
		nvbo->comp = (tile_flags & 0x00030000) >> 16;
244 245
		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
			kfree(nvbo);
246
			return ERR_PTR(-EINVAL);
247
		}
248 249 250 251 252 253
	} else {
		nvbo->zeta = (tile_flags & 0x00000007);
	}
	nvbo->mode = tile_mode;
	nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);

254 255 256 257 258 259 260 261 262 263
	/* Determine the desirable target GPU page size for the buffer. */
	for (i = 0; i < vmm->page_nr; i++) {
		/* Because we cannot currently allow VMM maps to fail
		 * during buffer migration, we need to determine page
		 * size for the buffer up-front, and pre-allocate its
		 * page tables.
		 *
		 * Skip page sizes that can't support needed domains.
		 */
		if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
264
		    (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
265
			continue;
266
		if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
267
		    (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
268 269 270 271 272 273 274 275 276 277
			continue;

		/* Select this page size if it's the first that supports
		 * the potential memory domains, or when it's compatible
		 * with the requested compression settings.
		 */
		if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
			pi = i;

		/* Stop once the buffer is larger than the current page size. */
278
		if (*size >= 1ULL << vmm->page[i].shift)
279 280 281 282
			break;
	}

	if (WARN_ON(pi < 0))
283
		return ERR_PTR(-EINVAL);
284 285 286 287 288 289

	/* Disable compression if suitable settings couldn't be found. */
	if (nvbo->comp && !vmm->page[pi].comp) {
		if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
			nvbo->kind = mmu->kind[nvbo->kind];
		nvbo->comp = 0;
290
	}
291
	nvbo->page = vmm->page[pi].shift;
292

293
	nouveau_bo_fixup_align(nvbo, align, size);
294

295 296 297 298
	return nvbo;
}

int
299
nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
300 301 302 303 304 305 306 307
		struct sg_table *sg, struct dma_resv *robj)
{
	int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
	size_t acc_size;
	int ret;

	acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));

308
	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
309
	nouveau_bo_placement_set(nvbo, domain, 0);
310
	INIT_LIST_HEAD(&nvbo->io_reserve_lru);
311

312 313 314
	ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
			  &nvbo->placement, align >> PAGE_SHIFT, false,
			  acc_size, sg, robj, nouveau_bo_del_ttm);
315 316 317 318 319
	if (ret) {
		/* ttm will call nouveau_bo_del_ttm if it fails.. */
		return ret;
	}

320 321 322 323 324
	return 0;
}

int
nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
325
	       uint32_t domain, uint32_t tile_mode, uint32_t tile_flags,
326 327 328 329 330 331
	       struct sg_table *sg, struct dma_resv *robj,
	       struct nouveau_bo **pnvbo)
{
	struct nouveau_bo *nvbo;
	int ret;

332
	nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
333
				tile_flags);
334 335 336
	if (IS_ERR(nvbo))
		return PTR_ERR(nvbo);

337
	ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
338 339 340
	if (ret)
		return ret;

341 342 343 344
	*pnvbo = nvbo;
	return 0;
}

345
static void
346 347
set_placement_list(struct nouveau_drm *drm, struct ttm_place *pl, unsigned *n,
		   uint32_t domain, uint32_t flags)
348 349 350
{
	*n = 0;

351
	if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
352 353 354
		struct nvif_mmu *mmu = &drm->client.mmu;
		const u8 type = mmu->type[drm->ttm.type_vram].type;

355
		pl[*n].mem_type = TTM_PL_VRAM;
356 357 358 359 360 361 362 363
		pl[*n].flags = flags & ~TTM_PL_FLAG_CACHED;

		/* Some BARs do not support being ioremapped WC */
		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
		    type & NVIF_MEM_UNCACHED)
			pl[*n].flags &= ~TTM_PL_FLAG_WC;

		(*n)++;
364 365 366
	}
	if (domain & NOUVEAU_GEM_DOMAIN_GART) {
		pl[*n].mem_type = TTM_PL_TT;
367 368 369 370 371 372
		pl[*n].flags = flags;

		if (drm->agp.bridge)
			pl[*n].flags &= ~TTM_PL_FLAG_CACHED;

		(*n)++;
373 374 375 376 377
	}
	if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
		pl[*n].mem_type = TTM_PL_SYSTEM;
		pl[(*n)++].flags = flags;
	}
378 379
}

380
static void
381
set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
382
{
383
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
384
	u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
385
	unsigned i, fpfn, lpfn;
386

387
	if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
388
	    nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) &&
389
	    nvbo->bo.mem.num_pages < vram_pages / 4) {
390 391 392 393 394 395
		/*
		 * Make sure that the color and depth buffers are handled
		 * by independent memory controller units. Up to a 9x
		 * speed up when alpha-blending and depth-test are enabled
		 * at the same time.
		 */
396
		if (nvbo->zeta) {
397 398
			fpfn = vram_pages / 2;
			lpfn = ~0;
399
		} else {
400 401 402 403 404 405 406 407 408 409
			fpfn = 0;
			lpfn = vram_pages / 2;
		}
		for (i = 0; i < nvbo->placement.num_placement; ++i) {
			nvbo->placements[i].fpfn = fpfn;
			nvbo->placements[i].lpfn = lpfn;
		}
		for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
			nvbo->busy_placements[i].fpfn = fpfn;
			nvbo->busy_placements[i].lpfn = lpfn;
410 411 412 413
		}
	}
}

414
void
415 416
nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
			 uint32_t busy)
417
{
418
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
419
	struct ttm_placement *pl = &nvbo->placement;
420 421 422
	uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
						 TTM_PL_MASK_CACHING) |
			 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
423 424

	pl->placement = nvbo->placements;
425
	set_placement_list(drm, nvbo->placements, &pl->num_placement,
426
			   domain, flags);
427 428

	pl->busy_placement = nvbo->busy_placements;
429
	set_placement_list(drm, nvbo->busy_placements, &pl->num_busy_placement,
430
			   domain | busy, flags);
431

432
	set_placement_range(nvbo, domain);
433 434 435
}

int
436
nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
437
{
438
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
439
	struct ttm_buffer_object *bo = &nvbo->bo;
440
	bool force = false, evict = false;
441
	int ret;
442

443
	ret = ttm_bo_reserve(bo, false, false, NULL);
444
	if (ret)
445
		return ret;
446

447
	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
448
	    domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) {
449 450
		if (!nvbo->contig) {
			nvbo->contig = true;
451
			force = true;
452
			evict = true;
453
		}
454 455
	}

456
	if (nvbo->pin_refcnt) {
457 458 459 460 461 462 463 464 465 466 467 468 469
		bool error = evict;

		switch (bo->mem.mem_type) {
		case TTM_PL_VRAM:
			error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
			break;
		case TTM_PL_TT:
			error |= !(domain & NOUVEAU_GEM_DOMAIN_GART);
		default:
			break;
		}

		if (error) {
470 471
			NV_ERROR(drm, "bo %p pinned elsewhere: "
				      "0x%08x vs 0x%08x\n", bo,
472
				 bo->mem.mem_type, domain);
473 474 475
			ret = -EBUSY;
		}
		nvbo->pin_refcnt++;
476
		goto out;
477 478 479
	}

	if (evict) {
480
		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
481 482 483 484
		ret = nouveau_bo_validate(nvbo, false, false);
		if (ret)
			goto out;
	}
485

486
	nvbo->pin_refcnt++;
487
	nouveau_bo_placement_set(nvbo, domain, 0);
488

489 490 491 492 493
	/* drop pin_refcnt temporarily, so we don't trip the assertion
	 * in nouveau_bo_move() that makes sure we're not trying to
	 * move a pinned buffer
	 */
	nvbo->pin_refcnt--;
494
	ret = nouveau_bo_validate(nvbo, false, false);
495 496
	if (ret)
		goto out;
497
	nvbo->pin_refcnt++;
498 499 500 501 502 503 504 505 506 507

	switch (bo->mem.mem_type) {
	case TTM_PL_VRAM:
		drm->gem.vram_available -= bo->mem.size;
		break;
	case TTM_PL_TT:
		drm->gem.gart_available -= bo->mem.size;
		break;
	default:
		break;
508
	}
509

510
out:
511
	if (force && ret)
512
		nvbo->contig = false;
513
	ttm_bo_unreserve(bo);
514 515 516 517 518 519
	return ret;
}

int
nouveau_bo_unpin(struct nouveau_bo *nvbo)
{
520
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
521
	struct ttm_buffer_object *bo = &nvbo->bo;
522
	int ret, ref;
523

524
	ret = ttm_bo_reserve(bo, false, false, NULL);
525 526 527
	if (ret)
		return ret;

528 529 530
	ref = --nvbo->pin_refcnt;
	WARN_ON_ONCE(ref < 0);
	if (ref)
531 532
		goto out;

533 534 535 536 537 538 539 540 541 542
	switch (bo->mem.mem_type) {
	case TTM_PL_VRAM:
		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
		break;
	case TTM_PL_TT:
		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
		break;
	default:
		break;
	}
543

544
	ret = nouveau_bo_validate(nvbo, false, false);
545 546 547
	if (ret == 0) {
		switch (bo->mem.mem_type) {
		case TTM_PL_VRAM:
548
			drm->gem.vram_available += bo->mem.size;
549 550
			break;
		case TTM_PL_TT:
551
			drm->gem.gart_available += bo->mem.size;
552 553 554 555 556 557
			break;
		default:
			break;
		}
	}

558
out:
559 560 561 562 563 564 565 566 567
	ttm_bo_unreserve(bo);
	return ret;
}

int
nouveau_bo_map(struct nouveau_bo *nvbo)
{
	int ret;

568
	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
569 570 571
	if (ret)
		return ret;

572
	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
573

574 575 576 577 578 579 580
	ttm_bo_unreserve(&nvbo->bo);
	return ret;
}

void
nouveau_bo_unmap(struct nouveau_bo *nvbo)
{
581 582 583
	if (!nvbo)
		return;

584
	ttm_bo_kunmap(&nvbo->kmap);
585 586
}

587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
void
nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
{
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
	struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
	int i;

	if (!ttm_dma)
		return;

	/* Don't waste time looping if the object is coherent */
	if (nvbo->force_coherent)
		return;

	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
602 603
		dma_sync_single_for_device(drm->dev->dev,
					   ttm_dma->dma_address[i],
604
					   PAGE_SIZE, DMA_TO_DEVICE);
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
}

void
nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
{
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
	struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
	int i;

	if (!ttm_dma)
		return;

	/* Don't waste time looping if the object is coherent */
	if (nvbo->force_coherent)
		return;

	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
622
		dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
623
					PAGE_SIZE, DMA_FROM_DEVICE);
624 625
}

626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
{
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
	struct nouveau_bo *nvbo = nouveau_bo(bo);

	mutex_lock(&drm->ttm.io_reserve_mutex);
	list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru);
	mutex_unlock(&drm->ttm.io_reserve_mutex);
}

void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo)
{
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
	struct nouveau_bo *nvbo = nouveau_bo(bo);

	mutex_lock(&drm->ttm.io_reserve_mutex);
	list_del_init(&nvbo->io_reserve_lru);
	mutex_unlock(&drm->ttm.io_reserve_mutex);
}

646 647
int
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
648
		    bool no_wait_gpu)
649
{
650
	struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
651 652
	int ret;

653
	ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx);
654 655 656
	if (ret)
		return ret;

657 658
	nouveau_bo_sync_for_device(nvbo);

659 660 661
	return 0;
}

662 663 664 665 666
void
nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
{
	bool is_iomem;
	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
667

668
	mem += index;
669

670 671 672 673 674 675 676 677 678 679 680
	if (is_iomem)
		iowrite16_native(val, (void __force __iomem *)mem);
	else
		*mem = val;
}

u32
nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
{
	bool is_iomem;
	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
681

682
	mem += index;
683

684 685 686 687 688 689 690 691 692 693 694
	if (is_iomem)
		return ioread32_native((void __force __iomem *)mem);
	else
		return *mem;
}

void
nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
{
	bool is_iomem;
	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
695

696
	mem += index;
697

698 699 700 701 702 703
	if (is_iomem)
		iowrite32_native(val, (void __force __iomem *)mem);
	else
		*mem = val;
}

704
static struct ttm_tt *
705
nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
706
{
D
Daniel Vetter 已提交
707
#if IS_ENABLED(CONFIG_AGP)
708
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
709

710
	if (drm->agp.bridge) {
711
		return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags);
712
	}
713
#endif
714

715
	return nouveau_sgdma_create_ttm(bo, page_flags);
716 717
}

718 719 720 721 722 723
static int
nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
		    struct ttm_resource *reg)
{
#if IS_ENABLED(CONFIG_AGP)
	struct nouveau_drm *drm = nouveau_bdev(bdev);
724 725 726 727
#endif
	if (!reg)
		return -EINVAL;
#if IS_ENABLED(CONFIG_AGP)
728
	if (drm->agp.bridge)
729
		return ttm_agp_bind(ttm, reg);
730 731 732 733 734 735 736 737 738 739 740
#endif
	return nouveau_sgdma_bind(bdev, ttm, reg);
}

static void
nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
#if IS_ENABLED(CONFIG_AGP)
	struct nouveau_drm *drm = nouveau_bdev(bdev);

	if (drm->agp.bridge) {
741
		ttm_agp_unbind(ttm);
742 743 744 745 746 747
		return;
	}
#endif
	nouveau_sgdma_unbind(bdev, ttm);
}

748 749 750 751 752 753
static void
nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
{
	struct nouveau_bo *nvbo = nouveau_bo(bo);

	switch (bo->mem.mem_type) {
754
	case TTM_PL_VRAM:
755 756
		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
					 NOUVEAU_GEM_DOMAIN_CPU);
757
		break;
758
	default:
759
		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0);
760 761
		break;
	}
762 763

	*pl = nvbo->placement;
764 765
}

766
static int
767
nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
768
		     struct ttm_resource *reg)
769
{
770 771
	struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
	struct nouveau_mem *new_mem = nouveau_mem(reg);
772
	struct nvif_vmm *vmm = &drm->client.vmm.vmm;
773 774
	int ret;

775 776
	ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0,
			   old_mem->mem.size, &old_mem->vma[0]);
777 778 779
	if (ret)
		return ret;

780 781 782 783
	ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0,
			   new_mem->mem.size, &old_mem->vma[1]);
	if (ret)
		goto done;
784

785 786 787 788 789 790 791
	ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
	if (ret)
		goto done;

	ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
done:
	if (ret) {
792 793
		nvif_vmm_put(vmm, &old_mem->vma[1]);
		nvif_vmm_put(vmm, &old_mem->vma[0]);
794
	}
795 796 797
	return 0;
}

798 799
static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
800
		     bool no_wait_gpu, struct ttm_resource *new_reg)
801
{
802
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
803
	struct nouveau_channel *chan = drm->ttm.chan;
804
	struct nouveau_cli *cli = (void *)chan->user.client;
805
	struct nouveau_fence *fence;
806 807
	int ret;

808
	/* create temporary vmas for the transfer and attach them to the
809
	 * old nvkm_mem node, these will get cleaned up after ttm has
810
	 * destroyed the ttm_resource
811
	 */
812
	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
813
		ret = nouveau_bo_move_prep(drm, bo, new_reg);
814
		if (ret)
815
			return ret;
816 817
	}

818
	mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
819
	ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
820
	if (ret == 0) {
821
		ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
822 823 824
		if (ret == 0) {
			ret = nouveau_fence_new(chan, false, &fence);
			if (ret == 0) {
825 826
				ret = ttm_bo_move_accel_cleanup(bo,
								&fence->base,
827
								evict,
828
								new_reg);
829 830 831
				nouveau_fence_unref(&fence);
			}
		}
832
	}
833
	mutex_unlock(&cli->mutex);
834
	return ret;
835 836
}

837
void
838
nouveau_bo_move_init(struct nouveau_drm *drm)
839
{
840
	static const struct _method_table {
841
		const char *name;
842
		int engine;
843
		s32 oclass;
844 845
		int (*exec)(struct nouveau_channel *,
			    struct ttm_buffer_object *,
846
			    struct ttm_resource *, struct ttm_resource *);
847 848
		int (*init)(struct nouveau_channel *, u32 handle);
	} _methods[] = {
B
Ben Skeggs 已提交
849 850
		{  "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
		{  "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
851 852
		{  "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
		{  "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
853 854
		{  "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
		{  "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
855 856
		{  "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
		{  "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
857 858
		{  "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
		{  "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
859
		{  "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
860
		{  "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
861 862 863 864 865 866 867
		{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
		{ "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
		{  "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
		{ "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
		{  "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
		{  "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
		{  "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
868
		{},
869 870
	};
	const struct _method_table *mthd = _methods;
871 872 873 874
	const char *name = "CPU";
	int ret;

	do {
875
		struct nouveau_channel *chan;
876

877
		if (mthd->engine)
878 879 880 881 882 883
			chan = drm->cechan;
		else
			chan = drm->channel;
		if (chan == NULL)
			continue;

884
		ret = nvif_object_ctor(&chan->user, "ttmBoMove",
885 886 887
				       mthd->oclass | (mthd->engine << 16),
				       mthd->oclass, NULL, 0,
				       &drm->ttm.copy);
888
		if (ret == 0) {
889
			ret = mthd->init(chan, drm->ttm.copy.handle);
890
			if (ret) {
891
				nvif_object_dtor(&drm->ttm.copy);
892
				continue;
893
			}
894 895

			drm->ttm.move = mthd->exec;
896
			drm->ttm.chan = chan;
897 898
			name = mthd->name;
			break;
899 900 901
		}
	} while ((++mthd)->exec);

902
	NV_INFO(drm, "MM: using %s for buffer copies\n", name);
903 904
}

905 906
static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
907
		      bool no_wait_gpu, struct ttm_resource *new_reg)
908
{
909
	struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
910 911 912
	struct ttm_place placement_memtype = {
		.fpfn = 0,
		.lpfn = 0,
913 914
		.mem_type = TTM_PL_TT,
		.flags = TTM_PL_MASK_CACHING
915
	};
916
	struct ttm_placement placement;
917
	struct ttm_resource tmp_reg;
918 919 920
	int ret;

	placement.num_placement = placement.num_busy_placement = 1;
921
	placement.placement = placement.busy_placement = &placement_memtype;
922

923 924
	tmp_reg = *new_reg;
	tmp_reg.mm_node = NULL;
925
	ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
926 927 928
	if (ret)
		return ret;

929 930 931 932
	ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
	if (ret)
		goto out;

933
	ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg);
934 935 936
	if (ret)
		goto out;

937
	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg);
938 939 940
	if (ret)
		goto out;

941
	ret = ttm_bo_move_ttm(bo, &ctx, new_reg);
942
out:
943
	ttm_resource_free(bo, &tmp_reg);
944 945 946 947 948
	return ret;
}

static int
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
949
		      bool no_wait_gpu, struct ttm_resource *new_reg)
950
{
951
	struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
952 953 954
	struct ttm_place placement_memtype = {
		.fpfn = 0,
		.lpfn = 0,
955 956
		.mem_type = TTM_PL_TT,
		.flags = TTM_PL_MASK_CACHING
957
	};
958
	struct ttm_placement placement;
959
	struct ttm_resource tmp_reg;
960 961 962
	int ret;

	placement.num_placement = placement.num_busy_placement = 1;
963
	placement.placement = placement.busy_placement = &placement_memtype;
964

965 966
	tmp_reg = *new_reg;
	tmp_reg.mm_node = NULL;
967
	ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
968 969 970
	if (ret)
		return ret;

971
	ret = ttm_bo_move_ttm(bo, &ctx, &tmp_reg);
972 973 974
	if (ret)
		goto out;

975
	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg);
976 977 978 979
	if (ret)
		goto out;

out:
980
	ttm_resource_free(bo, &tmp_reg);
981 982 983
	return ret;
}

984
static void
985
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
986
		     struct ttm_resource *new_reg)
987
{
988
	struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
989
	struct nouveau_bo *nvbo = nouveau_bo(bo);
990
	struct nouveau_vma *vma;
991

992 993 994 995
	/* ttm can now (stupidly) pass the driver bos it didn't create... */
	if (bo->destroy != nouveau_bo_del_ttm)
		return;

996 997
	nouveau_bo_del_io_reserve_lru(bo);

998
	if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
999
	    mem->mem.page == nvbo->page) {
1000
		list_for_each_entry(vma, &nvbo->vma_list, head) {
1001
			nouveau_vma_map(vma, mem);
1002 1003 1004
		}
	} else {
		list_for_each_entry(vma, &nvbo->vma_list, head) {
1005
			WARN_ON(ttm_bo_wait(bo, false, false));
1006
			nouveau_vma_unmap(vma);
1007
		}
1008
	}
1009 1010 1011 1012 1013 1014 1015 1016

	if (new_reg) {
		if (new_reg->mm_node)
			nvbo->offset = (new_reg->start << PAGE_SHIFT);
		else
			nvbo->offset = 0;
	}

1017 1018
}

1019
static int
1020
nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
1021
		   struct nouveau_drm_tile **new_tile)
1022
{
1023 1024
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
	struct drm_device *dev = drm->dev;
1025
	struct nouveau_bo *nvbo = nouveau_bo(bo);
1026
	u64 offset = new_reg->start << PAGE_SHIFT;
1027

1028
	*new_tile = NULL;
1029
	if (new_reg->mem_type != TTM_PL_VRAM)
1030 1031
		return 0;

1032
	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
1033
		*new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
1034
					       nvbo->mode, nvbo->zeta);
1035 1036
	}

1037 1038 1039 1040 1041
	return 0;
}

static void
nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1042 1043
		      struct nouveau_drm_tile *new_tile,
		      struct nouveau_drm_tile **old_tile)
1044
{
1045 1046
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
	struct drm_device *dev = drm->dev;
1047
	struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
1048

1049
	nv10_bo_put_tile_region(dev, *old_tile, fence);
1050
	*old_tile = new_tile;
1051 1052 1053
}

static int
1054 1055
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
		struct ttm_operation_ctx *ctx,
1056
		struct ttm_resource *new_reg)
1057
{
1058
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1059
	struct nouveau_bo *nvbo = nouveau_bo(bo);
1060
	struct ttm_resource *old_reg = &bo->mem;
1061
	struct nouveau_drm_tile *new_tile = NULL;
1062 1063
	int ret = 0;

1064
	ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
1065 1066 1067
	if (ret)
		return ret;

1068 1069 1070
	if (nvbo->pin_refcnt)
		NV_WARN(drm, "Moving pinned object %p!\n", nvbo);

1071
	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1072
		ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
1073 1074 1075
		if (ret)
			return ret;
	}
1076 1077

	/* Fake bo copy. */
1078
	if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1079
		ttm_bo_move_null(bo, new_reg);
1080
		goto out;
1081 1082
	}

1083
	/* Hardware assisted copy. */
1084
	if (drm->ttm.move) {
1085
		if (new_reg->mem_type == TTM_PL_SYSTEM)
1086 1087 1088
			ret = nouveau_bo_move_flipd(bo, evict,
						    ctx->interruptible,
						    ctx->no_wait_gpu, new_reg);
1089
		else if (old_reg->mem_type == TTM_PL_SYSTEM)
1090 1091 1092
			ret = nouveau_bo_move_flips(bo, evict,
						    ctx->interruptible,
						    ctx->no_wait_gpu, new_reg);
1093
		else
1094 1095 1096
			ret = nouveau_bo_move_m2mf(bo, evict,
						   ctx->interruptible,
						   ctx->no_wait_gpu, new_reg);
1097 1098 1099
		if (!ret)
			goto out;
	}
1100 1101

	/* Fallback to software copy. */
1102
	ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
1103
	if (ret == 0)
1104
		ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
1105 1106

out:
1107
	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1108 1109 1110 1111 1112
		if (ret)
			nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
		else
			nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
	}
1113 1114

	return ret;
1115 1116 1117 1118 1119
}

static int
nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
1120 1121
	struct nouveau_bo *nvbo = nouveau_bo(bo);

1122
	return drm_vma_node_verify_access(&nvbo->bo.base.vma_node,
D
David Herrmann 已提交
1123
					  filp->private_data);
1124 1125
}

1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
static void
nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm,
			       struct ttm_resource *reg)
{
	struct nouveau_mem *mem = nouveau_mem(reg);

	if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
		switch (reg->mem_type) {
		case TTM_PL_TT:
			if (mem->kind)
				nvif_object_unmap_handle(&mem->mem.object);
			break;
		case TTM_PL_VRAM:
			nvif_object_unmap_handle(&mem->mem.object);
			break;
		default:
			break;
		}
	}
}

1147
static int
1148
nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
1149
{
1150
	struct nouveau_drm *drm = nouveau_bdev(bdev);
1151
	struct nvkm_device *device = nvxx_device(&drm->client.device);
1152
	struct nouveau_mem *mem = nouveau_mem(reg);
1153
	int ret;
1154

1155 1156
	mutex_lock(&drm->ttm.io_reserve_mutex);
retry:
1157
	switch (reg->mem_type) {
1158 1159
	case TTM_PL_SYSTEM:
		/* System memory */
1160 1161
		ret = 0;
		goto out;
1162
	case TTM_PL_TT:
D
Daniel Vetter 已提交
1163
#if IS_ENABLED(CONFIG_AGP)
1164
		if (drm->agp.bridge) {
1165 1166
			reg->bus.offset = (reg->start << PAGE_SHIFT) +
				drm->agp.base;
1167
			reg->bus.is_iomem = !drm->agp.cma;
1168 1169
		}
#endif
1170 1171
		if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 ||
		    !mem->kind) {
1172
			/* untiled */
1173
			ret = 0;
1174
			break;
1175
		}
1176
		fallthrough;	/* tiled memory */
1177
	case TTM_PL_VRAM:
1178 1179
		reg->bus.offset = (reg->start << PAGE_SHIFT) +
			device->func->resource_addr(device, 1);
1180
		reg->bus.is_iomem = true;
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
		if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
			union {
				struct nv50_mem_map_v0 nv50;
				struct gf100_mem_map_v0 gf100;
			} args;
			u64 handle, length;
			u32 argc = 0;

			switch (mem->mem.object.oclass) {
			case NVIF_CLASS_MEM_NV50:
				args.nv50.version = 0;
				args.nv50.ro = 0;
				args.nv50.kind = mem->kind;
				args.nv50.comp = mem->comp;
1195
				argc = sizeof(args.nv50);
1196 1197 1198 1199 1200
				break;
			case NVIF_CLASS_MEM_GF100:
				args.gf100.version = 0;
				args.gf100.ro = 0;
				args.gf100.kind = mem->kind;
1201
				argc = sizeof(args.gf100);
1202 1203 1204 1205 1206 1207 1208
				break;
			default:
				WARN_ON(1);
				break;
			}

			ret = nvif_object_map_handle(&mem->mem.object,
1209
						     &args, argc,
1210
						     &handle, &length);
1211 1212
			if (ret != 1) {
				if (WARN_ON(ret == 0))
1213 1214
					ret = -EINVAL;
				goto out;
1215
			}
1216 1217

			reg->bus.offset = handle;
1218
			ret = 0;
1219
		}
1220 1221
		break;
	default:
1222
		ret = -EINVAL;
1223
	}
1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242

out:
	if (ret == -ENOSPC) {
		struct nouveau_bo *nvbo;

		nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru,
						typeof(*nvbo),
						io_reserve_lru);
		if (nvbo) {
			list_del_init(&nvbo->io_reserve_lru);
			drm_vma_node_unmap(&nvbo->bo.base.vma_node,
					   bdev->dev_mapping);
			nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem);
			goto retry;
		}

	}
	mutex_unlock(&drm->ttm.io_reserve_mutex);
	return ret;
1243 1244 1245
}

static void
1246
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg)
1247
{
1248
	struct nouveau_drm *drm = nouveau_bdev(bdev);
1249

1250 1251 1252
	mutex_lock(&drm->ttm.io_reserve_mutex);
	nouveau_ttm_io_mem_free_locked(drm, reg);
	mutex_unlock(&drm->ttm.io_reserve_mutex);
1253 1254 1255 1256 1257
}

static int
nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
1258
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1259
	struct nouveau_bo *nvbo = nouveau_bo(bo);
1260
	struct nvkm_device *device = nvxx_device(&drm->client.device);
1261
	u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
1262
	int i, ret;
1263 1264 1265 1266 1267

	/* as long as the bo isn't in vram, and isn't tiled, we've got
	 * nothing to do here.
	 */
	if (bo->mem.mem_type != TTM_PL_VRAM) {
1268
		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
1269
		    !nvbo->kind)
1270
			return 0;
1271 1272

		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1273 1274
			nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
						 0);
1275 1276 1277 1278 1279 1280

			ret = nouveau_bo_validate(nvbo, false, false);
			if (ret)
				return ret;
		}
		return 0;
1281 1282 1283
	}

	/* make sure bo is in mappable vram */
1284
	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
1285
	    bo->mem.start + bo->mem.num_pages < mappable)
1286 1287
		return 0;

1288 1289 1290 1291 1292 1293 1294 1295 1296
	for (i = 0; i < nvbo->placement.num_placement; ++i) {
		nvbo->placements[i].fpfn = 0;
		nvbo->placements[i].lpfn = mappable;
	}

	for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
		nvbo->busy_placements[i].fpfn = 0;
		nvbo->busy_placements[i].lpfn = mappable;
	}
1297

1298
	nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
1299
	return nouveau_bo_validate(nvbo, false, false);
1300 1301
}

1302
static int
D
Dave Airlie 已提交
1303 1304
nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
			struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
1305
{
1306
	struct ttm_dma_tt *ttm_dma = (void *)ttm;
1307
	struct nouveau_drm *drm;
1308
	struct device *dev;
D
Dave Airlie 已提交
1309
	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1310

1311
	if (ttm_tt_is_populated(ttm))
1312 1313
		return 0;

D
Dave Airlie 已提交
1314 1315 1316 1317
	if (slave && ttm->sg) {
		/* make userspace faulting work */
		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
						 ttm_dma->dma_address, ttm->num_pages);
1318
		ttm_tt_set_populated(ttm);
D
Dave Airlie 已提交
1319 1320 1321
		return 0;
	}

D
Dave Airlie 已提交
1322
	drm = nouveau_bdev(bdev);
1323
	dev = drm->dev->dev;
1324

D
Daniel Vetter 已提交
1325
#if IS_ENABLED(CONFIG_AGP)
1326
	if (drm->agp.bridge) {
1327
		return ttm_pool_populate(ttm, ctx);
J
Jerome Glisse 已提交
1328 1329 1330
	}
#endif

1331
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1332
	if (swiotlb_nr_tbl()) {
1333
		return ttm_dma_populate((void *)ttm, dev, ctx);
1334 1335
	}
#endif
1336
	return ttm_populate_and_map_pages(dev, ttm_dma, ctx);
1337 1338 1339
}

static void
D
Dave Airlie 已提交
1340 1341
nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
			  struct ttm_tt *ttm)
1342
{
1343
	struct ttm_dma_tt *ttm_dma = (void *)ttm;
1344
	struct nouveau_drm *drm;
1345
	struct device *dev;
D
Dave Airlie 已提交
1346 1347 1348 1349
	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);

	if (slave)
		return;
1350

D
Dave Airlie 已提交
1351
	drm = nouveau_bdev(bdev);
1352
	dev = drm->dev->dev;
1353

D
Daniel Vetter 已提交
1354
#if IS_ENABLED(CONFIG_AGP)
1355
	if (drm->agp.bridge) {
1356
		ttm_pool_unpopulate(ttm);
J
Jerome Glisse 已提交
1357 1358 1359 1360
		return;
	}
#endif

1361
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1362
	if (swiotlb_nr_tbl()) {
1363
		ttm_dma_unpopulate((void *)ttm, dev);
1364 1365 1366 1367
		return;
	}
#endif

1368
	ttm_unmap_and_unpopulate_pages(dev, ttm_dma);
1369 1370
}

1371 1372 1373 1374 1375 1376 1377
static void
nouveau_ttm_tt_destroy(struct ttm_bo_device *bdev,
		       struct ttm_tt *ttm)
{
#if IS_ENABLED(CONFIG_AGP)
	struct nouveau_drm *drm = nouveau_bdev(bdev);
	if (drm->agp.bridge) {
1378
		ttm_agp_unbind(ttm);
D
Dave Airlie 已提交
1379
		ttm_tt_destroy_common(bdev, ttm);
1380
		ttm_agp_destroy(ttm);
1381 1382 1383 1384 1385 1386
		return;
	}
#endif
	nouveau_sgdma_destroy(bdev, ttm);
}

1387
void
1388
nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1389
{
1390
	struct dma_resv *resv = nvbo->bo.base.resv;
1391

1392
	if (exclusive)
1393
		dma_resv_add_excl_fence(resv, &fence->base);
1394
	else if (fence)
1395
		dma_resv_add_shared_fence(resv, &fence->base);
1396 1397
}

1398
struct ttm_bo_driver nouveau_bo_driver = {
1399
	.ttm_tt_create = &nouveau_ttm_tt_create,
1400 1401
	.ttm_tt_populate = &nouveau_ttm_tt_populate,
	.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1402 1403 1404
	.ttm_tt_bind = &nouveau_ttm_tt_bind,
	.ttm_tt_unbind = &nouveau_ttm_tt_unbind,
	.ttm_tt_destroy = &nouveau_ttm_tt_destroy,
1405
	.eviction_valuable = ttm_bo_eviction_valuable,
1406
	.evict_flags = nouveau_bo_evict_flags,
1407
	.move_notify = nouveau_bo_move_ntfy,
1408 1409
	.move = nouveau_bo_move,
	.verify_access = nouveau_bo_verify_access,
1410 1411 1412
	.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
	.io_mem_free = &nouveau_ttm_io_mem_free,
1413
};