gem.c 13.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*
 * NVIDIA Tegra DRM GEM helper functions
 *
 * Copyright (C) 2012 Sascha Hauer, Pengutronix
 * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
 *
 * Based on the GEM/CMA helpers
 *
 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
 *
11 12 13
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
14 15
 */

T
Thierry Reding 已提交
16
#include <linux/dma-buf.h>
T
Thierry Reding 已提交
17
#include <linux/iommu.h>
18 19
#include <drm/tegra_drm.h>

20
#include "drm.h"
21 22
#include "gem.h"

23
static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
24 25 26 27 28 29
{
	return container_of(bo, struct tegra_bo, base);
}

static void tegra_bo_put(struct host1x_bo *bo)
{
30
	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
31 32 33 34 35 36 37 38 39
	struct drm_device *drm = obj->gem.dev;

	mutex_lock(&drm->struct_mutex);
	drm_gem_object_unreference(&obj->gem);
	mutex_unlock(&drm->struct_mutex);
}

static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
{
40
	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
41 42 43 44 45 46 47 48 49 50

	return obj->paddr;
}

static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
{
}

static void *tegra_bo_mmap(struct host1x_bo *bo)
{
51
	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
52 53 54 55 56 57 58 59 60 61

	return obj->vaddr;
}

static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
{
}

static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
{
62
	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
63 64 65 66 67 68 69 70 71 72 73

	return obj->vaddr + page * PAGE_SIZE;
}

static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
			    void *addr)
{
}

static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
{
74
	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
75 76 77 78 79 80 81 82 83
	struct drm_device *drm = obj->gem.dev;

	mutex_lock(&drm->struct_mutex);
	drm_gem_object_reference(&obj->gem);
	mutex_unlock(&drm->struct_mutex);

	return bo;
}

84
static const struct host1x_bo_ops tegra_bo_ops = {
85 86 87 88 89 90 91 92 93 94
	.get = tegra_bo_get,
	.put = tegra_bo_put,
	.pin = tegra_bo_pin,
	.unpin = tegra_bo_unpin,
	.mmap = tegra_bo_mmap,
	.munmap = tegra_bo_munmap,
	.kmap = tegra_bo_kmap,
	.kunmap = tegra_bo_kunmap,
};

T
Thierry Reding 已提交
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
{
	int prot = IOMMU_READ | IOMMU_WRITE;
	ssize_t err;

	if (bo->mm)
		return -EBUSY;

	bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
	if (!bo->mm)
		return -ENOMEM;

	err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
					 PAGE_SIZE, 0, 0, 0);
	if (err < 0) {
		dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
			err);
		goto free;
	}

	bo->paddr = bo->mm->start;

117 118
	err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
			   bo->sgt->nents, prot);
T
Thierry Reding 已提交
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
	if (err < 0) {
		dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
		goto remove;
	}

	bo->size = err;

	return 0;

remove:
	drm_mm_remove_node(bo->mm);
free:
	kfree(bo->mm);
	return err;
}

static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
{
	if (!bo->mm)
		return 0;

	iommu_unmap(tegra->domain, bo->paddr, bo->size);
	drm_mm_remove_node(bo->mm);
	kfree(bo->mm);

	return 0;
}

147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
					      size_t size)
{
	struct tegra_bo *bo;
	int err;

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
	if (!bo)
		return ERR_PTR(-ENOMEM);

	host1x_bo_init(&bo->base, &tegra_bo_ops);
	size = round_up(size, PAGE_SIZE);

	err = drm_gem_object_init(drm, &bo->gem, size);
	if (err < 0)
		goto free;

	err = drm_gem_create_mmap_offset(&bo->gem);
	if (err < 0)
		goto release;

	return bo;

release:
	drm_gem_object_release(&bo->gem);
free:
	kfree(bo);
	return ERR_PTR(err);
}

T
Thierry Reding 已提交
177
static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
178
{
T
Thierry Reding 已提交
179 180 181 182
	if (bo->pages) {
		drm_gem_put_pages(&bo->gem, bo->pages, true, true);
		sg_free_table(bo->sgt);
		kfree(bo->sgt);
183
	} else if (bo->vaddr) {
T
Thierry Reding 已提交
184 185 186 187 188
		dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr,
				      bo->paddr);
	}
}

189
static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
T
Thierry Reding 已提交
190
{
191 192 193 194
	struct scatterlist *s;
	struct sg_table *sgt;
	unsigned int i;

T
Thierry Reding 已提交
195 196 197 198
	bo->pages = drm_gem_get_pages(&bo->gem);
	if (IS_ERR(bo->pages))
		return PTR_ERR(bo->pages);

199
	bo->num_pages = bo->gem.size >> PAGE_SHIFT;
T
Thierry Reding 已提交
200

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
	sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
	if (IS_ERR(sgt))
		goto put_pages;

	/*
	 * Fake up the SG table so that dma_map_sg() can be used to flush the
	 * pages associated with it. Note that this relies on the fact that
	 * the DMA API doesn't hook into IOMMU on Tegra, therefore mapping is
	 * only cache maintenance.
	 *
	 * TODO: Replace this by drm_clflash_sg() once it can be implemented
	 * without relying on symbols that are not exported.
	 */
	for_each_sg(sgt->sgl, s, sgt->nents, i)
		sg_dma_address(s) = sg_phys(s);

217
	if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0)
218
		goto release_sgt;
T
Thierry Reding 已提交
219

220 221
	bo->sgt = sgt;

T
Thierry Reding 已提交
222
	return 0;
223 224 225 226

release_sgt:
	sg_free_table(sgt);
	kfree(sgt);
227
	sgt = ERR_PTR(-ENOMEM);
228 229 230
put_pages:
	drm_gem_put_pages(&bo->gem, bo->pages, false, false);
	return PTR_ERR(sgt);
T
Thierry Reding 已提交
231 232
}

233
static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
T
Thierry Reding 已提交
234 235 236 237 238
{
	struct tegra_drm *tegra = drm->dev_private;
	int err;

	if (tegra->domain) {
239
		err = tegra_bo_get_pages(drm, bo);
T
Thierry Reding 已提交
240 241 242 243 244 245 246 247 248
		if (err < 0)
			return err;

		err = tegra_bo_iommu_map(tegra, bo);
		if (err < 0) {
			tegra_bo_free(drm, bo);
			return err;
		}
	} else {
249 250
		size_t size = bo->gem.size;

T
Thierry Reding 已提交
251 252 253 254 255 256 257 258 259 260 261
		bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
						   GFP_KERNEL | __GFP_NOWARN);
		if (!bo->vaddr) {
			dev_err(drm->dev,
				"failed to allocate buffer of size %zu\n",
				size);
			return -ENOMEM;
		}
	}

	return 0;
262 263
}

264
struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
265
				 unsigned long flags)
266 267 268 269
{
	struct tegra_bo *bo;
	int err;

270 271 272
	bo = tegra_bo_alloc_object(drm, size);
	if (IS_ERR(bo))
		return bo;
273

274
	err = tegra_bo_alloc(drm, bo);
T
Thierry Reding 已提交
275 276
	if (err < 0)
		goto release;
277

278
	if (flags & DRM_TEGRA_GEM_CREATE_TILED)
279
		bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
280

281 282 283
	if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
		bo->flags |= TEGRA_BO_BOTTOM_UP;

284 285
	return bo;

T
Thierry Reding 已提交
286 287
release:
	drm_gem_object_release(&bo->gem);
288 289 290 291 292
	kfree(bo);
	return ERR_PTR(err);
}

struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
293
					     struct drm_device *drm,
294
					     size_t size,
295
					     unsigned long flags,
296
					     u32 *handle)
297 298
{
	struct tegra_bo *bo;
299
	int err;
300

301
	bo = tegra_bo_create(drm, size, flags);
302 303 304
	if (IS_ERR(bo))
		return bo;

305 306 307 308 309
	err = drm_gem_handle_create(file, &bo->gem, handle);
	if (err) {
		tegra_bo_free_object(&bo->gem);
		return ERR_PTR(err);
	}
310 311 312 313 314 315

	drm_gem_object_unreference_unlocked(&bo->gem);

	return bo;
}

316 317
static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
					struct dma_buf *buf)
T
Thierry Reding 已提交
318
{
T
Thierry Reding 已提交
319
	struct tegra_drm *tegra = drm->dev_private;
T
Thierry Reding 已提交
320 321 322 323
	struct dma_buf_attachment *attach;
	struct tegra_bo *bo;
	int err;

324 325 326
	bo = tegra_bo_alloc_object(drm, buf->size);
	if (IS_ERR(bo))
		return bo;
T
Thierry Reding 已提交
327 328 329 330

	attach = dma_buf_attach(buf, drm->dev);
	if (IS_ERR(attach)) {
		err = PTR_ERR(attach);
331
		goto free;
T
Thierry Reding 已提交
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
	}

	get_dma_buf(buf);

	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
	if (!bo->sgt) {
		err = -ENOMEM;
		goto detach;
	}

	if (IS_ERR(bo->sgt)) {
		err = PTR_ERR(bo->sgt);
		goto detach;
	}

T
Thierry Reding 已提交
347 348 349 350 351 352 353 354 355 356 357
	if (tegra->domain) {
		err = tegra_bo_iommu_map(tegra, bo);
		if (err < 0)
			goto detach;
	} else {
		if (bo->sgt->nents > 1) {
			err = -EINVAL;
			goto detach;
		}

		bo->paddr = sg_dma_address(bo->sgt->sgl);
T
Thierry Reding 已提交
358 359 360 361 362 363 364 365 366 367 368 369 370
	}

	bo->gem.import_attach = attach;

	return bo;

detach:
	if (!IS_ERR_OR_NULL(bo->sgt))
		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);

	dma_buf_detach(buf, attach);
	dma_buf_put(buf);
free:
371
	drm_gem_object_release(&bo->gem);
T
Thierry Reding 已提交
372 373 374 375
	kfree(bo);
	return ERR_PTR(err);
}

376 377
void tegra_bo_free_object(struct drm_gem_object *gem)
{
T
Thierry Reding 已提交
378
	struct tegra_drm *tegra = gem->dev->dev_private;
379 380
	struct tegra_bo *bo = to_tegra_bo(gem);

T
Thierry Reding 已提交
381 382 383
	if (tegra->domain)
		tegra_bo_iommu_unmap(tegra, bo);

T
Thierry Reding 已提交
384 385 386 387 388
	if (gem->import_attach) {
		dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
					 DMA_TO_DEVICE);
		drm_prime_gem_destroy(gem, NULL);
	} else {
T
Thierry Reding 已提交
389
		tegra_bo_free(gem->dev, bo);
T
Thierry Reding 已提交
390 391
	}

392 393 394 395 396 397 398
	drm_gem_object_release(gem);
	kfree(bo);
}

int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
			 struct drm_mode_create_dumb *args)
{
399
	unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
400
	struct tegra_drm *tegra = drm->dev_private;
401 402
	struct tegra_bo *bo;

403 404
	args->pitch = round_up(min_pitch, tegra->pitch_align);
	args->size = args->pitch * args->height;
405

406
	bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
407
					 &args->handle);
408 409 410 411 412 413 414
	if (IS_ERR(bo))
		return PTR_ERR(bo);

	return 0;
}

int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
415
			     u32 handle, u64 *offset)
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
{
	struct drm_gem_object *gem;
	struct tegra_bo *bo;

	mutex_lock(&drm->struct_mutex);

	gem = drm_gem_object_lookup(drm, file, handle);
	if (!gem) {
		dev_err(drm->dev, "failed to lookup GEM object\n");
		mutex_unlock(&drm->struct_mutex);
		return -EINVAL;
	}

	bo = to_tegra_bo(gem);

431
	*offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
432 433 434 435 436 437 438 439

	drm_gem_object_unreference(gem);

	mutex_unlock(&drm->struct_mutex);

	return 0;
}

T
Thierry Reding 已提交
440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct drm_gem_object *gem = vma->vm_private_data;
	struct tegra_bo *bo = to_tegra_bo(gem);
	struct page *page;
	pgoff_t offset;
	int err;

	if (!bo->pages)
		return VM_FAULT_SIGBUS;

	offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
	page = bo->pages[offset];

	err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
	switch (err) {
	case -EAGAIN:
	case 0:
	case -ERESTARTSYS:
	case -EINTR:
	case -EBUSY:
		return VM_FAULT_NOPAGE;

	case -ENOMEM:
		return VM_FAULT_OOM;
	}

	return VM_FAULT_SIGBUS;
}

470
const struct vm_operations_struct tegra_bo_vm_ops = {
T
Thierry Reding 已提交
471
	.fault = tegra_bo_fault,
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
	.open = drm_gem_vm_open,
	.close = drm_gem_vm_close,
};

int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct drm_gem_object *gem;
	struct tegra_bo *bo;
	int ret;

	ret = drm_gem_mmap(file, vma);
	if (ret)
		return ret;

	gem = vma->vm_private_data;
	bo = to_tegra_bo(gem);

T
Thierry Reding 已提交
489 490
	if (!bo->pages) {
		unsigned long vm_pgoff = vma->vm_pgoff;
491

T
Thierry Reding 已提交
492 493 494 495 496 497 498 499 500 501 502 503 504
		vma->vm_flags &= ~VM_PFNMAP;
		vma->vm_pgoff = 0;

		ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr,
					    bo->paddr, gem->size);
		if (ret) {
			drm_gem_vm_close(vma);
			return ret;
		}

		vma->vm_pgoff = vm_pgoff;
	} else {
		pgprot_t prot = vm_get_page_prot(vma->vm_flags);
505

T
Thierry Reding 已提交
506 507 508 509 510
		vma->vm_flags |= VM_MIXEDMAP;
		vma->vm_flags &= ~VM_PFNMAP;

		vma->vm_page_prot = pgprot_writecombine(prot);
	}
511

512
	return 0;
513
}
T
Thierry Reding 已提交
514 515 516 517 518 519 520 521 522 523 524 525 526

static struct sg_table *
tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
			    enum dma_data_direction dir)
{
	struct drm_gem_object *gem = attach->dmabuf->priv;
	struct tegra_bo *bo = to_tegra_bo(gem);
	struct sg_table *sgt;

	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt)
		return NULL;

T
Thierry Reding 已提交
527 528 529 530 531 532
	if (bo->pages) {
		struct scatterlist *sg;
		unsigned int i;

		if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
			goto free;
T
Thierry Reding 已提交
533

T
Thierry Reding 已提交
534 535 536 537 538 539 540 541 542 543 544 545
		for_each_sg(sgt->sgl, sg, bo->num_pages, i)
			sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);

		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
			goto free;
	} else {
		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
			goto free;

		sg_dma_address(sgt->sgl) = bo->paddr;
		sg_dma_len(sgt->sgl) = gem->size;
	}
T
Thierry Reding 已提交
546 547

	return sgt;
T
Thierry Reding 已提交
548 549 550 551 552

free:
	sg_free_table(sgt);
	kfree(sgt);
	return NULL;
T
Thierry Reding 已提交
553 554 555 556 557 558
}

static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
					  struct sg_table *sgt,
					  enum dma_data_direction dir)
{
T
Thierry Reding 已提交
559 560 561 562 563 564
	struct drm_gem_object *gem = attach->dmabuf->priv;
	struct tegra_bo *bo = to_tegra_bo(gem);

	if (bo->pages)
		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);

T
Thierry Reding 已提交
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
	sg_free_table(sgt);
	kfree(sgt);
}

static void tegra_gem_prime_release(struct dma_buf *buf)
{
	drm_gem_dmabuf_release(buf);
}

static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
					 unsigned long page)
{
	return NULL;
}

static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
					  unsigned long page,
					  void *addr)
{
}

static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
{
	return NULL;
}

static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
				   void *addr)
{
}

static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
{
	return -EINVAL;
}

601 602 603 604 605 606 607 608 609 610 611 612
static void *tegra_gem_prime_vmap(struct dma_buf *buf)
{
	struct drm_gem_object *gem = buf->priv;
	struct tegra_bo *bo = to_tegra_bo(gem);

	return bo->vaddr;
}

static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
{
}

T
Thierry Reding 已提交
613 614 615 616 617 618 619 620 621
static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
	.map_dma_buf = tegra_gem_prime_map_dma_buf,
	.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
	.release = tegra_gem_prime_release,
	.kmap_atomic = tegra_gem_prime_kmap_atomic,
	.kunmap_atomic = tegra_gem_prime_kunmap_atomic,
	.kmap = tegra_gem_prime_kmap,
	.kunmap = tegra_gem_prime_kunmap,
	.mmap = tegra_gem_prime_mmap,
622 623
	.vmap = tegra_gem_prime_vmap,
	.vunmap = tegra_gem_prime_vunmap,
T
Thierry Reding 已提交
624 625 626 627 628 629 630
};

struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
				       struct drm_gem_object *gem,
				       int flags)
{
	return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size,
631
			      flags, NULL);
T
Thierry Reding 已提交
632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653
}

struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
					      struct dma_buf *buf)
{
	struct tegra_bo *bo;

	if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
		struct drm_gem_object *gem = buf->priv;

		if (gem->dev == drm) {
			drm_gem_object_reference(gem);
			return gem;
		}
	}

	bo = tegra_bo_import(drm, buf);
	if (IS_ERR(bo))
		return ERR_CAST(bo);

	return &bo->gem;
}