base.c 12.3 KB
Newer Older
B
Ben Skeggs 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright 2010 Red Hat Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Ben Skeggs
 */
24
#include "priv.h"
B
Ben Skeggs 已提交
25

26
#include <core/gpuobj.h>
27
#include <subdev/fb.h>
B
Ben Skeggs 已提交
28 29

void
30
nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
B
Ben Skeggs 已提交
31
{
32 33 34
	struct nvkm_vm *vm = vma->vm;
	struct nvkm_mmu *mmu = vm->mmu;
	struct nvkm_mm_node *r;
35
	int big = vma->node->type != mmu->func->spg_shift;
B
Ben Skeggs 已提交
36 37
	u32 offset = vma->node->offset + (delta >> 12);
	u32 bits = vma->node->type - 12;
38 39 40
	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
	u32 max  = 1 << (mmu->func->pgt_bits - bits);
B
Ben Skeggs 已提交
41 42
	u32 end, len;

B
Ben Skeggs 已提交
43
	delta = 0;
44
	list_for_each_entry(r, &node->regions, rl_entry) {
B
Ben Skeggs 已提交
45 46 47 48
		u64 phys = (u64)r->offset << 12;
		u32 num  = r->length >> bits;

		while (num) {
49
			struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
B
Ben Skeggs 已提交
50 51 52 53 54 55

			end = (pte + num);
			if (unlikely(end >= max))
				end = max;
			len = end - pte;

56
			mmu->func->map(vma, pgt, node, pte, len, phys, delta);
B
Ben Skeggs 已提交
57 58 59 60

			num -= len;
			pte += len;
			if (unlikely(end >= max)) {
61
				phys += len << (bits + 12);
B
Ben Skeggs 已提交
62 63 64
				pde++;
				pte = 0;
			}
B
Ben Skeggs 已提交
65 66

			delta += (u64)len << vma->node->type;
B
Ben Skeggs 已提交
67 68 69
		}
	}

70
	mmu->func->flush(vm);
B
Ben Skeggs 已提交
71 72
}

73
static void
74 75
nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
		     struct nvkm_mem *mem)
D
Dave Airlie 已提交
76
{
77 78
	struct nvkm_vm *vm = vma->vm;
	struct nvkm_mmu *mmu = vm->mmu;
79
	int big = vma->node->type != mmu->func->spg_shift;
D
Dave Airlie 已提交
80 81 82
	u32 offset = vma->node->offset + (delta >> 12);
	u32 bits = vma->node->type - 12;
	u32 num  = length >> vma->node->type;
83 84 85
	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
	u32 max  = 1 << (mmu->func->pgt_bits - bits);
D
Dave Airlie 已提交
86 87 88 89 90 91
	unsigned m, sglen;
	u32 end, len;
	int i;
	struct scatterlist *sg;

	for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
92
		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
D
Dave Airlie 已提交
93 94 95 96 97 98 99 100 101 102
		sglen = sg_dma_len(sg) >> PAGE_SHIFT;

		end = pte + sglen;
		if (unlikely(end >= max))
			end = max;
		len = end - pte;

		for (m = 0; m < len; m++) {
			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);

103
			mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
D
Dave Airlie 已提交
104 105 106 107 108 109 110 111 112 113 114 115 116 117
			num--;
			pte++;

			if (num == 0)
				goto finish;
		}
		if (unlikely(end >= max)) {
			pde++;
			pte = 0;
		}
		if (m < sglen) {
			for (; m < sglen; m++) {
				dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);

118
				mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
D
Dave Airlie 已提交
119 120 121 122 123 124 125 126 127
				num--;
				pte++;
				if (num == 0)
					goto finish;
			}
		}

	}
finish:
128
	mmu->func->flush(vm);
D
Dave Airlie 已提交
129 130
}

131
static void
132 133
nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
	       struct nvkm_mem *mem)
B
Ben Skeggs 已提交
134
{
135 136
	struct nvkm_vm *vm = vma->vm;
	struct nvkm_mmu *mmu = vm->mmu;
137
	dma_addr_t *list = mem->pages;
138
	int big = vma->node->type != mmu->func->spg_shift;
B
Ben Skeggs 已提交
139 140 141
	u32 offset = vma->node->offset + (delta >> 12);
	u32 bits = vma->node->type - 12;
	u32 num  = length >> vma->node->type;
142 143 144
	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
	u32 max  = 1 << (mmu->func->pgt_bits - bits);
B
Ben Skeggs 已提交
145 146 147
	u32 end, len;

	while (num) {
148
		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
B
Ben Skeggs 已提交
149 150 151 152 153 154

		end = (pte + num);
		if (unlikely(end >= max))
			end = max;
		len = end - pte;

155
		mmu->func->map_sg(vma, pgt, mem, pte, len, list);
B
Ben Skeggs 已提交
156 157 158 159 160 161 162 163 164 165

		num  -= len;
		pte  += len;
		list += len;
		if (unlikely(end >= max)) {
			pde++;
			pte = 0;
		}
	}

166
	mmu->func->flush(vm);
B
Ben Skeggs 已提交
167 168
}

169
void
170
nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node)
171 172
{
	if (node->sg)
173
		nvkm_vm_map_sg_table(vma, 0, node->size << 12, node);
174 175
	else
	if (node->pages)
176
		nvkm_vm_map_sg(vma, 0, node->size << 12, node);
177
	else
178
		nvkm_vm_map_at(vma, 0, node);
179 180
}

B
Ben Skeggs 已提交
181
void
182
nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
B
Ben Skeggs 已提交
183
{
184 185
	struct nvkm_vm *vm = vma->vm;
	struct nvkm_mmu *mmu = vm->mmu;
186
	int big = vma->node->type != mmu->func->spg_shift;
B
Ben Skeggs 已提交
187 188 189
	u32 offset = vma->node->offset + (delta >> 12);
	u32 bits = vma->node->type - 12;
	u32 num  = length >> vma->node->type;
190 191 192
	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
	u32 max  = 1 << (mmu->func->pgt_bits - bits);
B
Ben Skeggs 已提交
193 194 195
	u32 end, len;

	while (num) {
196
		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
B
Ben Skeggs 已提交
197 198 199 200 201 202

		end = (pte + num);
		if (unlikely(end >= max))
			end = max;
		len = end - pte;

203
		mmu->func->unmap(vma, pgt, pte, len);
B
Ben Skeggs 已提交
204 205 206 207 208 209 210 211 212

		num -= len;
		pte += len;
		if (unlikely(end >= max)) {
			pde++;
			pte = 0;
		}
	}

213
	mmu->func->flush(vm);
B
Ben Skeggs 已提交
214 215 216
}

void
217
nvkm_vm_unmap(struct nvkm_vma *vma)
B
Ben Skeggs 已提交
218
{
219
	nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
B
Ben Skeggs 已提交
220 221 222
}

static void
223
nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
B
Ben Skeggs 已提交
224
{
225 226 227
	struct nvkm_mmu *mmu = vm->mmu;
	struct nvkm_vm_pgd *vpgd;
	struct nvkm_vm_pgt *vpgt;
228
	struct nvkm_memory *pgt;
B
Ben Skeggs 已提交
229 230 231 232
	u32 pde;

	for (pde = fpde; pde <= lpde; pde++) {
		vpgt = &vm->pgt[pde - vm->fpde];
233
		if (--vpgt->refcount[big])
B
Ben Skeggs 已提交
234 235
			continue;

236 237
		pgt = vpgt->mem[big];
		vpgt->mem[big] = NULL;
238

B
Ben Skeggs 已提交
239
		list_for_each_entry(vpgd, &vm->pgd_list, head) {
240
			mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
B
Ben Skeggs 已提交
241 242
		}

243
		nvkm_memory_del(&pgt);
B
Ben Skeggs 已提交
244 245 246 247
	}
}

static int
248
nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
B
Ben Skeggs 已提交
249
{
250 251 252
	struct nvkm_mmu *mmu = vm->mmu;
	struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
	struct nvkm_vm_pgd *vpgd;
253
	int big = (type != mmu->func->spg_shift);
B
Ben Skeggs 已提交
254 255 256
	u32 pgt_size;
	int ret;

257
	pgt_size  = (1 << (mmu->func->pgt_bits + 12)) >> type;
B
Ben Skeggs 已提交
258 259
	pgt_size *= 8;

260 261
	ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
			      pgt_size, 0x1000, true, &vpgt->mem[big]);
B
Ben Skeggs 已提交
262 263 264 265
	if (unlikely(ret))
		return ret;

	list_for_each_entry(vpgd, &vm->pgd_list, head) {
266
		mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
B
Ben Skeggs 已提交
267 268
	}

269
	vpgt->refcount[big]++;
B
Ben Skeggs 已提交
270 271 272 273
	return 0;
}

int
274 275
nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
	    struct nvkm_vma *vma)
B
Ben Skeggs 已提交
276
{
277
	struct nvkm_mmu *mmu = vm->mmu;
B
Ben Skeggs 已提交
278 279 280 281 282
	u32 align = (1 << page_shift) >> 12;
	u32 msize = size >> 12;
	u32 fpde, lpde, pde;
	int ret;

283
	mutex_lock(&vm->mutex);
284 285
	ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
			   &vma->node);
B
Ben Skeggs 已提交
286
	if (unlikely(ret != 0)) {
287
		mutex_unlock(&vm->mutex);
B
Ben Skeggs 已提交
288 289 290
		return ret;
	}

291 292
	fpde = (vma->node->offset >> mmu->func->pgt_bits);
	lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
293

B
Ben Skeggs 已提交
294
	for (pde = fpde; pde <= lpde; pde++) {
295
		struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
296
		int big = (vma->node->type != mmu->func->spg_shift);
B
Ben Skeggs 已提交
297

298 299
		if (likely(vpgt->refcount[big])) {
			vpgt->refcount[big]++;
B
Ben Skeggs 已提交
300 301 302
			continue;
		}

303
		ret = nvkm_vm_map_pgt(vm, pde, vma->node->type);
B
Ben Skeggs 已提交
304 305
		if (ret) {
			if (pde != fpde)
306 307
				nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1);
			nvkm_mm_free(&vm->mm, &vma->node);
308
			mutex_unlock(&vm->mutex);
B
Ben Skeggs 已提交
309 310 311
			return ret;
		}
	}
312
	mutex_unlock(&vm->mutex);
B
Ben Skeggs 已提交
313

314
	vma->vm = NULL;
315
	nvkm_vm_ref(vm, &vma->vm, NULL);
B
Ben Skeggs 已提交
316 317 318 319 320 321
	vma->offset = (u64)vma->node->offset << 12;
	vma->access = access;
	return 0;
}

void
322
nvkm_vm_put(struct nvkm_vma *vma)
B
Ben Skeggs 已提交
323
{
324 325
	struct nvkm_mmu *mmu;
	struct nvkm_vm *vm;
B
Ben Skeggs 已提交
326 327 328 329
	u32 fpde, lpde;

	if (unlikely(vma->node == NULL))
		return;
330 331 332
	vm = vma->vm;
	mmu = vm->mmu;

333 334
	fpde = (vma->node->offset >> mmu->func->pgt_bits);
	lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
B
Ben Skeggs 已提交
335

336
	mutex_lock(&vm->mutex);
337
	nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde);
338
	nvkm_mm_free(&vm->mm, &vma->node);
339
	mutex_unlock(&vm->mutex);
340

341
	nvkm_vm_ref(NULL, &vma->vm, NULL);
B
Ben Skeggs 已提交
342 343
}

344 345 346 347
int
nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
{
	struct nvkm_mmu *mmu = vm->mmu;
348
	struct nvkm_memory *pgt;
349 350
	int ret;

351
	ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
352
			      (size >> mmu->func->spg_shift) * 8, 0x1000, true, &pgt);
353 354
	if (ret == 0) {
		vm->pgt[0].refcount[0] = 1;
355 356
		vm->pgt[0].mem[0] = pgt;
		nvkm_memory_boot(pgt, vm);
357 358 359 360 361
	}

	return ret;
}

B
Ben Skeggs 已提交
362
int
363
nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
364
	       u32 block, struct lock_class_key *key, struct nvkm_vm **pvm)
B
Ben Skeggs 已提交
365
{
366
	static struct lock_class_key _key;
367
	struct nvkm_vm *vm;
B
Ben Skeggs 已提交
368 369 370
	u64 mm_length = (offset + length) - mm_offset;
	int ret;

371
	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
B
Ben Skeggs 已提交
372 373 374
	if (!vm)
		return -ENOMEM;

375
	__mutex_init(&vm->mutex, "&vm->mutex", key ? key : &_key);
376
	INIT_LIST_HEAD(&vm->pgd_list);
377
	vm->mmu = mmu;
378
	kref_init(&vm->refcount);
379 380
	vm->fpde = offset >> (mmu->func->pgt_bits + 12);
	vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12);
B
Ben Skeggs 已提交
381

382
	vm->pgt  = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
B
Ben Skeggs 已提交
383 384 385 386 387
	if (!vm->pgt) {
		kfree(vm);
		return -ENOMEM;
	}

388 389
	ret = nvkm_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
			   block >> 12);
B
Ben Skeggs 已提交
390
	if (ret) {
391
		vfree(vm->pgt);
B
Ben Skeggs 已提交
392 393 394 395
		kfree(vm);
		return ret;
	}

396 397
	*pvm = vm;

B
Ben Skeggs 已提交
398 399 400
	return 0;
}

401
int
402
nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
403
	    struct lock_class_key *key, struct nvkm_vm **pvm)
404
{
405 406 407 408
	struct nvkm_mmu *mmu = device->mmu;
	if (!mmu->func->create)
		return -EINVAL;
	return mmu->func->create(mmu, offset, length, mm_offset, key, pvm);
409 410
}

B
Ben Skeggs 已提交
411
static int
412
nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd)
B
Ben Skeggs 已提交
413
{
414 415
	struct nvkm_mmu *mmu = vm->mmu;
	struct nvkm_vm_pgd *vpgd;
B
Ben Skeggs 已提交
416 417 418 419 420 421 422 423 424
	int i;

	if (!pgd)
		return 0;

	vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
	if (!vpgd)
		return -ENOMEM;

425
	vpgd->obj = pgd;
B
Ben Skeggs 已提交
426

427
	mutex_lock(&vm->mutex);
428
	for (i = vm->fpde; i <= vm->lpde; i++)
429
		mmu->func->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem);
B
Ben Skeggs 已提交
430
	list_add(&vpgd->head, &vm->pgd_list);
431
	mutex_unlock(&vm->mutex);
B
Ben Skeggs 已提交
432 433 434 435
	return 0;
}

static void
436
nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
B
Ben Skeggs 已提交
437
{
438
	struct nvkm_vm_pgd *vpgd, *tmp;
B
Ben Skeggs 已提交
439

440
	if (!mpgd)
B
Ben Skeggs 已提交
441 442
		return;

443
	mutex_lock(&vm->mutex);
B
Ben Skeggs 已提交
444
	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
445 446 447 448 449
		if (vpgd->obj == mpgd) {
			list_del(&vpgd->head);
			kfree(vpgd);
			break;
		}
B
Ben Skeggs 已提交
450
	}
451
	mutex_unlock(&vm->mutex);
B
Ben Skeggs 已提交
452 453 454
}

static void
455
nvkm_vm_del(struct kref *kref)
B
Ben Skeggs 已提交
456
{
457 458
	struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount);
	struct nvkm_vm_pgd *vpgd, *tmp;
B
Ben Skeggs 已提交
459 460

	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
461
		nvkm_vm_unlink(vm, vpgd->obj);
B
Ben Skeggs 已提交
462 463
	}

464
	nvkm_mm_fini(&vm->mm);
465
	vfree(vm->pgt);
B
Ben Skeggs 已提交
466 467 468 469
	kfree(vm);
}

int
470
nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_gpuobj *pgd)
B
Ben Skeggs 已提交
471
{
472
	if (ref) {
473
		int ret = nvkm_vm_link(ref, pgd);
B
Ben Skeggs 已提交
474 475 476
		if (ret)
			return ret;

477
		kref_get(&ref->refcount);
B
Ben Skeggs 已提交
478 479
	}

480
	if (*ptr) {
481 482
		nvkm_vm_unlink(*ptr, pgd);
		kref_put(&(*ptr)->refcount, nvkm_vm_del);
B
Ben Skeggs 已提交
483 484
	}

485
	*ptr = ref;
B
Ben Skeggs 已提交
486 487
	return 0;
}
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542

static int
nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
{
	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
	if (mmu->func->oneinit)
		return mmu->func->oneinit(mmu);
	return 0;
}

static int
nvkm_mmu_init(struct nvkm_subdev *subdev)
{
	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
	if (mmu->func->init)
		mmu->func->init(mmu);
	return 0;
}

static void *
nvkm_mmu_dtor(struct nvkm_subdev *subdev)
{
	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
	if (mmu->func->dtor)
		return mmu->func->dtor(mmu);
	return mmu;
}

static const struct nvkm_subdev_func
nvkm_mmu = {
	.dtor = nvkm_mmu_dtor,
	.oneinit = nvkm_mmu_oneinit,
	.init = nvkm_mmu_init,
};

void
nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
	      int index, struct nvkm_mmu *mmu)
{
	nvkm_subdev_ctor(&nvkm_mmu, device, index, 0, &mmu->subdev);
	mmu->func = func;
	mmu->limit = func->limit;
	mmu->dma_bits = func->dma_bits;
	mmu->lpg_shift = func->lpg_shift;
}

int
nvkm_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
	      int index, struct nvkm_mmu **pmmu)
{
	if (!(*pmmu = kzalloc(sizeof(**pmmu), GFP_KERNEL)))
		return -ENOMEM;
	nvkm_mmu_ctor(func, device, index, *pmmu);
	return 0;
}