base.c 18.0 KB
Newer Older
B
Ben Skeggs 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright 2010 Red Hat Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Ben Skeggs
 */
24
#include "priv.h"
25
#include "vmm.h"
B
Ben Skeggs 已提交
26

27
#include <core/gpuobj.h>
28
#include <subdev/fb.h>
B
Ben Skeggs 已提交
29

30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
struct nvkm_mmu_ptp {
	struct nvkm_mmu_pt *pt;
	struct list_head head;
	u8  shift;
	u16 mask;
	u16 free;
};

static void
nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt)
{
	const int slot = pt->base >> pt->ptp->shift;
	struct nvkm_mmu_ptp *ptp = pt->ptp;

	/* If there were no free slots in the parent allocation before,
	 * there will be now, so return PTP to the cache.
	 */
	if (!ptp->free)
		list_add(&ptp->head, &mmu->ptp.list);
	ptp->free |= BIT(slot);

	/* If there's no more sub-allocations, destroy PTP. */
	if (ptp->free == ptp->mask) {
		nvkm_mmu_ptc_put(mmu, force, &ptp->pt);
		list_del(&ptp->head);
		kfree(ptp);
	}

	kfree(pt);
}

struct nvkm_mmu_pt *
nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero)
{
	struct nvkm_mmu_pt *pt;
	struct nvkm_mmu_ptp *ptp;
	int slot;

	if (!(pt = kzalloc(sizeof(*pt), GFP_KERNEL)))
		return NULL;

	ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head);
	if (!ptp) {
		/* Need to allocate a new parent to sub-allocate from. */
		if (!(ptp = kmalloc(sizeof(*ptp), GFP_KERNEL))) {
			kfree(pt);
			return NULL;
		}

		ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false);
		if (!ptp->pt) {
			kfree(ptp);
			kfree(pt);
			return NULL;
		}

		ptp->shift = order_base_2(size);
		slot = nvkm_memory_size(ptp->pt->memory) >> ptp->shift;
		ptp->mask = (1 << slot) - 1;
		ptp->free = ptp->mask;
		list_add(&ptp->head, &mmu->ptp.list);
	}
	pt->ptp = ptp;
	pt->sub = true;

	/* Sub-allocate from parent object, removing PTP from cache
	 * if there's no more free slots left.
	 */
	slot = __ffs(ptp->free);
	ptp->free &= ~BIT(slot);
	if (!ptp->free)
		list_del(&ptp->head);

	pt->memory = pt->ptp->pt->memory;
	pt->base = slot << ptp->shift;
	pt->addr = pt->ptp->pt->addr + pt->base;
	return pt;
}

109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
struct nvkm_mmu_ptc {
	struct list_head head;
	struct list_head item;
	u32 size;
	u32 refs;
};

static inline struct nvkm_mmu_ptc *
nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size)
{
	struct nvkm_mmu_ptc *ptc;

	list_for_each_entry(ptc, &mmu->ptc.list, head) {
		if (ptc->size == size)
			return ptc;
	}

	ptc = kmalloc(sizeof(*ptc), GFP_KERNEL);
	if (ptc) {
		INIT_LIST_HEAD(&ptc->item);
		ptc->size = size;
		ptc->refs = 0;
		list_add(&ptc->head, &mmu->ptc.list);
	}

	return ptc;
}

void
nvkm_mmu_ptc_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt **ppt)
{
	struct nvkm_mmu_pt *pt = *ppt;
	if (pt) {
142 143 144 145 146 147 148 149
		/* Handle sub-allocated page tables. */
		if (pt->sub) {
			mutex_lock(&mmu->ptp.mutex);
			nvkm_mmu_ptp_put(mmu, force, pt);
			mutex_unlock(&mmu->ptp.mutex);
			return;
		}

150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
		/* Either cache or free the object. */
		mutex_lock(&mmu->ptc.mutex);
		if (pt->ptc->refs < 8 /* Heuristic. */ && !force) {
			list_add_tail(&pt->head, &pt->ptc->item);
			pt->ptc->refs++;
		} else {
			nvkm_memory_unref(&pt->memory);
			kfree(pt);
		}
		mutex_unlock(&mmu->ptc.mutex);
	}
}

struct nvkm_mmu_pt *
nvkm_mmu_ptc_get(struct nvkm_mmu *mmu, u32 size, u32 align, bool zero)
{
	struct nvkm_mmu_ptc *ptc;
	struct nvkm_mmu_pt *pt;
	int ret;

170 171 172 173 174 175 176 177
	/* Sub-allocated page table (ie. GP100 LPT). */
	if (align < 0x1000) {
		mutex_lock(&mmu->ptp.mutex);
		pt = nvkm_mmu_ptp_get(mmu, align, zero);
		mutex_unlock(&mmu->ptp.mutex);
		return pt;
	}

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
	/* Lookup cache for this page table size. */
	mutex_lock(&mmu->ptc.mutex);
	ptc = nvkm_mmu_ptc_find(mmu, size);
	if (!ptc) {
		mutex_unlock(&mmu->ptc.mutex);
		return NULL;
	}

	/* If there's a free PT in the cache, reuse it. */
	pt = list_first_entry_or_null(&ptc->item, typeof(*pt), head);
	if (pt) {
		if (zero)
			nvkm_fo64(pt->memory, 0, 0, size >> 3);
		list_del(&pt->head);
		ptc->refs--;
		mutex_unlock(&mmu->ptc.mutex);
		return pt;
	}
	mutex_unlock(&mmu->ptc.mutex);

	/* No such luck, we need to allocate. */
	if (!(pt = kmalloc(sizeof(*pt), GFP_KERNEL)))
		return NULL;
	pt->ptc = ptc;
202
	pt->sub = false;
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246

	ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
			      size, align, zero, &pt->memory);
	if (ret) {
		kfree(pt);
		return NULL;
	}

	pt->base = 0;
	pt->addr = nvkm_memory_addr(pt->memory);
	return pt;
}

void
nvkm_mmu_ptc_dump(struct nvkm_mmu *mmu)
{
	struct nvkm_mmu_ptc *ptc;
	list_for_each_entry(ptc, &mmu->ptc.list, head) {
		struct nvkm_mmu_pt *pt, *tt;
		list_for_each_entry_safe(pt, tt, &ptc->item, head) {
			nvkm_memory_unref(&pt->memory);
			list_del(&pt->head);
			kfree(pt);
		}
	}
}

static void
nvkm_mmu_ptc_fini(struct nvkm_mmu *mmu)
{
	struct nvkm_mmu_ptc *ptc, *ptct;

	list_for_each_entry_safe(ptc, ptct, &mmu->ptc.list, head) {
		WARN_ON(!list_empty(&ptc->item));
		list_del(&ptc->head);
		kfree(ptc);
	}
}

static void
nvkm_mmu_ptc_init(struct nvkm_mmu *mmu)
{
	mutex_init(&mmu->ptc.mutex);
	INIT_LIST_HEAD(&mmu->ptc.list);
247 248
	mutex_init(&mmu->ptp.mutex);
	INIT_LIST_HEAD(&mmu->ptp.list);
249 250
}

B
Ben Skeggs 已提交
251
void
252
nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
B
Ben Skeggs 已提交
253
{
254 255
	struct nvkm_vm *vm = vma->vm;
	struct nvkm_mmu *mmu = vm->mmu;
256
	struct nvkm_mm_node *r = node->mem;
257
	int big = vma->node->type != mmu->func->spg_shift;
B
Ben Skeggs 已提交
258 259
	u32 offset = vma->node->offset + (delta >> 12);
	u32 bits = vma->node->type - 12;
260 261 262
	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
	u32 max  = 1 << (mmu->func->pgt_bits - bits);
B
Ben Skeggs 已提交
263 264
	u32 end, len;

B
Ben Skeggs 已提交
265
	delta = 0;
266
	while (r) {
B
Ben Skeggs 已提交
267 268 269 270
		u64 phys = (u64)r->offset << 12;
		u32 num  = r->length >> bits;

		while (num) {
271
			struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
B
Ben Skeggs 已提交
272 273 274 275 276 277

			end = (pte + num);
			if (unlikely(end >= max))
				end = max;
			len = end - pte;

278
			mmu->func->map(vma, pgt, node, pte, len, phys, delta);
B
Ben Skeggs 已提交
279 280 281 282

			num -= len;
			pte += len;
			if (unlikely(end >= max)) {
283
				phys += len << (bits + 12);
B
Ben Skeggs 已提交
284 285 286
				pde++;
				pte = 0;
			}
B
Ben Skeggs 已提交
287 288

			delta += (u64)len << vma->node->type;
B
Ben Skeggs 已提交
289
		}
290
		r = r->next;
B
Ben Skeggs 已提交
291
	}
B
Ben Skeggs 已提交
292

293
	mmu->func->flush(vm);
B
Ben Skeggs 已提交
294 295
}

296
static void
297 298
nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
		     struct nvkm_mem *mem)
D
Dave Airlie 已提交
299
{
300 301
	struct nvkm_vm *vm = vma->vm;
	struct nvkm_mmu *mmu = vm->mmu;
302
	int big = vma->node->type != mmu->func->spg_shift;
D
Dave Airlie 已提交
303 304 305
	u32 offset = vma->node->offset + (delta >> 12);
	u32 bits = vma->node->type - 12;
	u32 num  = length >> vma->node->type;
306 307 308
	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
	u32 max  = 1 << (mmu->func->pgt_bits - bits);
D
Dave Airlie 已提交
309 310 311 312 313 314
	unsigned m, sglen;
	u32 end, len;
	int i;
	struct scatterlist *sg;

	for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
315
		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
D
Dave Airlie 已提交
316 317 318 319 320 321 322 323 324 325
		sglen = sg_dma_len(sg) >> PAGE_SHIFT;

		end = pte + sglen;
		if (unlikely(end >= max))
			end = max;
		len = end - pte;

		for (m = 0; m < len; m++) {
			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);

326
			mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
D
Dave Airlie 已提交
327 328 329 330 331 332 333 334 335 336 337 338 339 340
			num--;
			pte++;

			if (num == 0)
				goto finish;
		}
		if (unlikely(end >= max)) {
			pde++;
			pte = 0;
		}
		if (m < sglen) {
			for (; m < sglen; m++) {
				dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);

341
				mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
D
Dave Airlie 已提交
342 343 344 345 346 347 348 349 350
				num--;
				pte++;
				if (num == 0)
					goto finish;
			}
		}

	}
finish:
351
	mmu->func->flush(vm);
D
Dave Airlie 已提交
352 353
}

354
static void
355 356
nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
	       struct nvkm_mem *mem)
B
Ben Skeggs 已提交
357
{
358 359
	struct nvkm_vm *vm = vma->vm;
	struct nvkm_mmu *mmu = vm->mmu;
360
	dma_addr_t *list = mem->pages;
361
	int big = vma->node->type != mmu->func->spg_shift;
B
Ben Skeggs 已提交
362 363 364
	u32 offset = vma->node->offset + (delta >> 12);
	u32 bits = vma->node->type - 12;
	u32 num  = length >> vma->node->type;
365 366 367
	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
	u32 max  = 1 << (mmu->func->pgt_bits - bits);
B
Ben Skeggs 已提交
368 369 370
	u32 end, len;

	while (num) {
371
		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
B
Ben Skeggs 已提交
372 373 374 375 376 377

		end = (pte + num);
		if (unlikely(end >= max))
			end = max;
		len = end - pte;

378
		mmu->func->map_sg(vma, pgt, mem, pte, len, list);
B
Ben Skeggs 已提交
379 380 381 382 383 384 385 386 387 388

		num  -= len;
		pte  += len;
		list += len;
		if (unlikely(end >= max)) {
			pde++;
			pte = 0;
		}
	}

389
	mmu->func->flush(vm);
B
Ben Skeggs 已提交
390 391
}

392
void
393
nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node)
394 395
{
	if (node->sg)
396
		nvkm_vm_map_sg_table(vma, 0, node->size << 12, node);
397 398
	else
	if (node->pages)
399
		nvkm_vm_map_sg(vma, 0, node->size << 12, node);
400
	else
401
		nvkm_vm_map_at(vma, 0, node);
402 403
}

B
Ben Skeggs 已提交
404
void
405
nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
B
Ben Skeggs 已提交
406
{
407 408
	struct nvkm_vm *vm = vma->vm;
	struct nvkm_mmu *mmu = vm->mmu;
409
	int big = vma->node->type != mmu->func->spg_shift;
B
Ben Skeggs 已提交
410 411 412
	u32 offset = vma->node->offset + (delta >> 12);
	u32 bits = vma->node->type - 12;
	u32 num  = length >> vma->node->type;
413 414 415
	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
	u32 max  = 1 << (mmu->func->pgt_bits - bits);
B
Ben Skeggs 已提交
416 417 418
	u32 end, len;

	while (num) {
419
		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
B
Ben Skeggs 已提交
420 421 422 423 424 425

		end = (pte + num);
		if (unlikely(end >= max))
			end = max;
		len = end - pte;

426
		mmu->func->unmap(vma, pgt, pte, len);
B
Ben Skeggs 已提交
427 428 429 430 431 432 433 434 435

		num -= len;
		pte += len;
		if (unlikely(end >= max)) {
			pde++;
			pte = 0;
		}
	}

436
	mmu->func->flush(vm);
B
Ben Skeggs 已提交
437 438 439
}

void
440
nvkm_vm_unmap(struct nvkm_vma *vma)
B
Ben Skeggs 已提交
441
{
442
	nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
B
Ben Skeggs 已提交
443 444 445
}

static void
446
nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
B
Ben Skeggs 已提交
447
{
448 449 450
	struct nvkm_mmu *mmu = vm->mmu;
	struct nvkm_vm_pgd *vpgd;
	struct nvkm_vm_pgt *vpgt;
451
	struct nvkm_memory *pgt;
B
Ben Skeggs 已提交
452 453 454 455
	u32 pde;

	for (pde = fpde; pde <= lpde; pde++) {
		vpgt = &vm->pgt[pde - vm->fpde];
456
		if (--vpgt->refcount[big])
B
Ben Skeggs 已提交
457 458
			continue;

459 460
		pgt = vpgt->mem[big];
		vpgt->mem[big] = NULL;
461

B
Ben Skeggs 已提交
462
		list_for_each_entry(vpgd, &vm->pgd_list, head) {
463
			mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
B
Ben Skeggs 已提交
464 465
		}

466 467
		mmu->func->flush(vm);

468
		nvkm_memory_unref(&pgt);
B
Ben Skeggs 已提交
469 470 471 472
	}
}

static int
473
nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
B
Ben Skeggs 已提交
474
{
475 476 477
	struct nvkm_mmu *mmu = vm->mmu;
	struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
	struct nvkm_vm_pgd *vpgd;
478
	int big = (type != mmu->func->spg_shift);
B
Ben Skeggs 已提交
479 480 481
	u32 pgt_size;
	int ret;

482
	pgt_size  = (1 << (mmu->func->pgt_bits + 12)) >> type;
B
Ben Skeggs 已提交
483 484
	pgt_size *= 8;

485 486
	ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
			      pgt_size, 0x1000, true, &vpgt->mem[big]);
B
Ben Skeggs 已提交
487 488 489 490
	if (unlikely(ret))
		return ret;

	list_for_each_entry(vpgd, &vm->pgd_list, head) {
491
		mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
B
Ben Skeggs 已提交
492 493
	}

494
	vpgt->refcount[big]++;
B
Ben Skeggs 已提交
495 496 497 498
	return 0;
}

int
499 500
nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
	    struct nvkm_vma *vma)
B
Ben Skeggs 已提交
501
{
502
	struct nvkm_mmu *mmu = vm->mmu;
B
Ben Skeggs 已提交
503 504 505 506 507
	u32 align = (1 << page_shift) >> 12;
	u32 msize = size >> 12;
	u32 fpde, lpde, pde;
	int ret;

508
	mutex_lock(&vm->mutex);
509 510
	ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
			   &vma->node);
B
Ben Skeggs 已提交
511
	if (unlikely(ret != 0)) {
512
		mutex_unlock(&vm->mutex);
B
Ben Skeggs 已提交
513 514 515
		return ret;
	}

516 517
	fpde = (vma->node->offset >> mmu->func->pgt_bits);
	lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
518

B
Ben Skeggs 已提交
519
	for (pde = fpde; pde <= lpde; pde++) {
520
		struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
521
		int big = (vma->node->type != mmu->func->spg_shift);
B
Ben Skeggs 已提交
522

523 524
		if (likely(vpgt->refcount[big])) {
			vpgt->refcount[big]++;
B
Ben Skeggs 已提交
525 526 527
			continue;
		}

528
		ret = nvkm_vm_map_pgt(vm, pde, vma->node->type);
B
Ben Skeggs 已提交
529 530
		if (ret) {
			if (pde != fpde)
531 532
				nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1);
			nvkm_mm_free(&vm->mm, &vma->node);
533
			mutex_unlock(&vm->mutex);
B
Ben Skeggs 已提交
534 535 536
			return ret;
		}
	}
537
	mutex_unlock(&vm->mutex);
B
Ben Skeggs 已提交
538

539
	vma->vm = NULL;
540
	nvkm_vm_ref(vm, &vma->vm, NULL);
B
Ben Skeggs 已提交
541 542 543 544 545 546
	vma->offset = (u64)vma->node->offset << 12;
	vma->access = access;
	return 0;
}

void
547
nvkm_vm_put(struct nvkm_vma *vma)
B
Ben Skeggs 已提交
548
{
549 550
	struct nvkm_mmu *mmu;
	struct nvkm_vm *vm;
B
Ben Skeggs 已提交
551 552 553 554
	u32 fpde, lpde;

	if (unlikely(vma->node == NULL))
		return;
555 556 557
	vm = vma->vm;
	mmu = vm->mmu;

558 559
	fpde = (vma->node->offset >> mmu->func->pgt_bits);
	lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
B
Ben Skeggs 已提交
560

561
	mutex_lock(&vm->mutex);
562
	nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde);
563
	nvkm_mm_free(&vm->mm, &vma->node);
564
	mutex_unlock(&vm->mutex);
565

566
	nvkm_vm_ref(NULL, &vma->vm, NULL);
B
Ben Skeggs 已提交
567 568
}

569 570 571 572
int
nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
{
	struct nvkm_mmu *mmu = vm->mmu;
573
	struct nvkm_memory *pgt;
574 575
	int ret;

576
	ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
577
			      (size >> mmu->func->spg_shift) * 8, 0x1000, true, &pgt);
578 579
	if (ret == 0) {
		vm->pgt[0].refcount[0] = 1;
580 581
		vm->pgt[0].mem[0] = pgt;
		nvkm_memory_boot(pgt, vm);
582
		vm->bootstrapped = true;
583 584 585 586 587
	}

	return ret;
}

588 589 590
static int
nvkm_vm_legacy(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
	       u32 block, struct nvkm_vm *vm)
B
Ben Skeggs 已提交
591 592 593 594
{
	u64 mm_length = (offset + length) - mm_offset;
	int ret;

595
	INIT_LIST_HEAD(&vm->pgd_list);
596
	kref_init(&vm->refcount);
597 598
	vm->fpde = offset >> (mmu->func->pgt_bits + 12);
	vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12);
B
Ben Skeggs 已提交
599

600
	vm->pgt  = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
B
Ben Skeggs 已提交
601 602 603 604 605
	if (!vm->pgt) {
		kfree(vm);
		return -ENOMEM;
	}

606 607 608
	if (block > length)
		block = length;

609
	ret = nvkm_mm_init(&vm->mm, 0, mm_offset >> 12, mm_length >> 12,
610
			   block >> 12);
B
Ben Skeggs 已提交
611
	if (ret) {
612
		vfree(vm->pgt);
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
		return ret;
	}

	return 0;
}

int
nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
	       u32 block, struct lock_class_key *key, struct nvkm_vm **pvm)
{
	static struct lock_class_key _key;
	struct nvkm_vm *vm;
	int ret;

	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
	if (!vm)
		return -ENOMEM;

	__mutex_init(&vm->mutex, "&vm->mutex", key ? key : &_key);
	vm->mmu = mmu;

	ret = nvkm_vm_legacy(mmu, offset, length, mm_offset, block, vm);
	if (ret) {
B
Ben Skeggs 已提交
636 637 638 639
		kfree(vm);
		return ret;
	}

640
	*pvm = vm;
B
Ben Skeggs 已提交
641 642 643
	return 0;
}

644
int
645
nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
646
	    struct lock_class_key *key, struct nvkm_vm **pvm)
647
{
648
	struct nvkm_mmu *mmu = device->mmu;
649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668

	*pvm = NULL;
	if (mmu->func->vmm.ctor) {
		int ret = mmu->func->vmm.ctor(mmu, mm_offset,
					      offset + length - mm_offset,
					      NULL, 0, key, "legacy", pvm);
		if (ret) {
			nvkm_vm_ref(NULL, pvm, NULL);
			return ret;
		}

		ret = nvkm_vm_legacy(mmu, offset, length, mm_offset,
				     (*pvm)->func->page_block ?
				     (*pvm)->func->page_block : 4096, *pvm);
		if (ret)
			nvkm_vm_ref(NULL, pvm, NULL);

		return ret;
	}

669 670
	if (!mmu->func->create)
		return -EINVAL;
671

672
	return mmu->func->create(mmu, offset, length, mm_offset, key, pvm);
673 674
}

B
Ben Skeggs 已提交
675
static int
676
nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd)
B
Ben Skeggs 已提交
677
{
678 679
	struct nvkm_mmu *mmu = vm->mmu;
	struct nvkm_vm_pgd *vpgd;
B
Ben Skeggs 已提交
680 681 682 683 684 685 686 687 688
	int i;

	if (!pgd)
		return 0;

	vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
	if (!vpgd)
		return -ENOMEM;

689
	vpgd->obj = pgd;
B
Ben Skeggs 已提交
690

691
	mutex_lock(&vm->mutex);
692
	for (i = vm->fpde; i <= vm->lpde; i++)
693
		mmu->func->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem);
B
Ben Skeggs 已提交
694
	list_add(&vpgd->head, &vm->pgd_list);
695
	mutex_unlock(&vm->mutex);
B
Ben Skeggs 已提交
696 697 698 699
	return 0;
}

static void
700
nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
B
Ben Skeggs 已提交
701
{
702
	struct nvkm_vm_pgd *vpgd, *tmp;
B
Ben Skeggs 已提交
703

704
	if (!mpgd)
B
Ben Skeggs 已提交
705 706
		return;

707
	mutex_lock(&vm->mutex);
B
Ben Skeggs 已提交
708
	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
709 710 711 712 713
		if (vpgd->obj == mpgd) {
			list_del(&vpgd->head);
			kfree(vpgd);
			break;
		}
B
Ben Skeggs 已提交
714
	}
715
	mutex_unlock(&vm->mutex);
B
Ben Skeggs 已提交
716 717 718
}

static void
719
nvkm_vm_del(struct kref *kref)
B
Ben Skeggs 已提交
720
{
721 722
	struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount);
	struct nvkm_vm_pgd *vpgd, *tmp;
B
Ben Skeggs 已提交
723 724

	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
725
		nvkm_vm_unlink(vm, vpgd->obj);
B
Ben Skeggs 已提交
726 727
	}

728
	nvkm_mm_fini(&vm->mm);
729
	vfree(vm->pgt);
730 731
	if (vm->func)
		nvkm_vmm_dtor(vm);
B
Ben Skeggs 已提交
732 733 734 735
	kfree(vm);
}

int
736
nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_gpuobj *pgd)
B
Ben Skeggs 已提交
737
{
738
	if (ref) {
739
		int ret = nvkm_vm_link(ref, pgd);
B
Ben Skeggs 已提交
740 741 742
		if (ret)
			return ret;

743
		kref_get(&ref->refcount);
B
Ben Skeggs 已提交
744 745
	}

746
	if (*ptr) {
747 748
		if ((*ptr)->bootstrapped && pgd)
			nvkm_memory_unref(&(*ptr)->pgt[0].mem[0]);
749 750
		nvkm_vm_unlink(*ptr, pgd);
		kref_put(&(*ptr)->refcount, nvkm_vm_del);
B
Ben Skeggs 已提交
751 752
	}

753
	*ptr = ref;
B
Ben Skeggs 已提交
754 755
	return 0;
}
756 757 758 759 760

static int
nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
{
	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
761 762 763 764 765 766 767 768

	if (mmu->func->vmm.global) {
		int ret = nvkm_vm_new(subdev->device, 0, mmu->limit, 0,
				      NULL, &mmu->vmm);
		if (ret)
			return ret;
	}

769 770
	if (mmu->func->oneinit)
		return mmu->func->oneinit(mmu);
771

772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787
	return 0;
}

static int
nvkm_mmu_init(struct nvkm_subdev *subdev)
{
	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
	if (mmu->func->init)
		mmu->func->init(mmu);
	return 0;
}

static void *
nvkm_mmu_dtor(struct nvkm_subdev *subdev)
{
	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
788

789
	nvkm_vm_ref(NULL, &mmu->vmm, NULL);
790 791

	nvkm_mmu_ptc_fini(mmu);
792
	return mmu;
793 794 795 796 797 798 799 800 801 802 803 804 805
}

static const struct nvkm_subdev_func
nvkm_mmu = {
	.dtor = nvkm_mmu_dtor,
	.oneinit = nvkm_mmu_oneinit,
	.init = nvkm_mmu_init,
};

void
nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
	      int index, struct nvkm_mmu *mmu)
{
806
	nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev);
807 808 809 810
	mmu->func = func;
	mmu->limit = func->limit;
	mmu->dma_bits = func->dma_bits;
	mmu->lpg_shift = func->lpg_shift;
811
	nvkm_mmu_ptc_init(mmu);
812 813 814 815 816 817 818 819 820 821 822
}

int
nvkm_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
	      int index, struct nvkm_mmu **pmmu)
{
	if (!(*pmmu = kzalloc(sizeof(**pmmu), GFP_KERNEL)))
		return -ENOMEM;
	nvkm_mmu_ctor(func, device, index, *pmmu);
	return 0;
}