gem.c 15.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5
/*
 * NVIDIA Tegra DRM GEM helper functions
 *
 * Copyright (C) 2012 Sascha Hauer, Pengutronix
6
 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
7 8 9 10 11 12
 *
 * Based on the GEM/CMA helpers
 *
 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
 */

T
Thierry Reding 已提交
13
#include <linux/dma-buf.h>
T
Thierry Reding 已提交
14
#include <linux/iommu.h>
S
Sam Ravnborg 已提交
15 16 17

#include <drm/drm_drv.h>
#include <drm/drm_prime.h>
18 19
#include <drm/tegra_drm.h>

20
#include "drm.h"
21 22 23 24
#include "gem.h"

static void tegra_bo_put(struct host1x_bo *bo)
{
25
	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
26

27
	drm_gem_object_put(&obj->gem);
28 29
}

30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
/* XXX move this into lib/scatterlist.c? */
static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg,
				  unsigned int nents, gfp_t gfp_mask)
{
	struct scatterlist *dst;
	unsigned int i;
	int err;

	err = sg_alloc_table(sgt, nents, gfp_mask);
	if (err < 0)
		return err;

	dst = sgt->sgl;

	for (i = 0; i < nents; i++) {
		sg_set_page(dst, sg_page(sg), sg->length, 0);
		dst = sg_next(dst);
		sg = sg_next(sg);
	}

	return 0;
}

53 54
static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
				     dma_addr_t *phys)
55
{
56
	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
57 58
	struct sg_table *sgt;
	int err;
59

60 61 62
	/*
	 * If we've manually mapped the buffer object through the IOMMU, make
	 * sure to return the IOVA address of our mapping.
63 64 65 66 67 68 69 70
	 *
	 * Similarly, for buffers that have been allocated by the DMA API the
	 * physical address can be used for devices that are not attached to
	 * an IOMMU. For these devices, callers must pass a valid pointer via
	 * the @phys argument.
	 *
	 * Imported buffers were also already mapped at import time, so the
	 * existing mapping can be reused.
71
	 */
72
	if (phys) {
73
		*phys = obj->iova;
74 75
		return NULL;
	}
76

77 78 79 80
	/*
	 * If we don't have a mapping for this buffer yet, return an SG table
	 * so that host1x can do the mapping for us via the DMA API.
	 */
81 82 83 84 85
	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt)
		return ERR_PTR(-ENOMEM);

	if (obj->pages) {
86 87 88 89
		/*
		 * If the buffer object was allocated from the explicit IOMMU
		 * API code paths, construct an SG table from the pages.
		 */
90 91 92 93
		err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
						0, obj->gem.size, GFP_KERNEL);
		if (err < 0)
			goto free;
94 95 96 97 98 99 100 101 102 103 104
	} else if (obj->sgt) {
		/*
		 * If the buffer object already has an SG table but no pages
		 * were allocated for it, it means the buffer was imported and
		 * the SG table needs to be copied to avoid overwriting any
		 * other potential users of the original SG table.
		 */
		err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, obj->sgt->nents,
					     GFP_KERNEL);
		if (err < 0)
			goto free;
105
	} else {
106 107 108 109 110
		/*
		 * If the buffer object had no pages allocated and if it was
		 * not imported, it had to be allocated with the DMA API, so
		 * the DMA API helper can be used.
		 */
111 112 113 114 115 116 117 118 119 120 121
		err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
				      obj->gem.size);
		if (err < 0)
			goto free;
	}

	return sgt;

free:
	kfree(sgt);
	return ERR_PTR(err);
122 123
}

124
static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
125
{
126 127 128 129
	if (sgt) {
		sg_free_table(sgt);
		kfree(sgt);
	}
130 131 132 133
}

static void *tegra_bo_mmap(struct host1x_bo *bo)
{
134
	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
135 136
	struct dma_buf_map map;
	int ret;
137

138
	if (obj->vaddr) {
139
		return obj->vaddr;
140 141 142 143
	} else if (obj->gem.import_attach) {
		ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map);
		return ret ? NULL : map.vaddr;
	} else {
144 145
		return vmap(obj->pages, obj->num_pages, VM_MAP,
			    pgprot_writecombine(PAGE_KERNEL));
146
	}
147 148 149 150
}

static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
{
151 152 153 154 155 156 157 158
	struct tegra_bo *obj = host1x_to_tegra_bo(bo);

	if (obj->vaddr)
		return;
	else if (obj->gem.import_attach)
		dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr);
	else
		vunmap(addr);
159 160 161 162
}

static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
{
163
	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
164

165
	drm_gem_object_get(&obj->gem);
166 167 168 169

	return bo;
}

170
static const struct host1x_bo_ops tegra_bo_ops = {
171 172 173 174 175 176 177 178
	.get = tegra_bo_get,
	.put = tegra_bo_put,
	.pin = tegra_bo_pin,
	.unpin = tegra_bo_unpin,
	.mmap = tegra_bo_mmap,
	.munmap = tegra_bo_munmap,
};

T
Thierry Reding 已提交
179 180 181
static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
{
	int prot = IOMMU_READ | IOMMU_WRITE;
182
	int err;
T
Thierry Reding 已提交
183 184 185 186 187 188 189 190

	if (bo->mm)
		return -EBUSY;

	bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
	if (!bo->mm)
		return -ENOMEM;

191 192
	mutex_lock(&tegra->mm_lock);

193 194
	err = drm_mm_insert_node_generic(&tegra->mm,
					 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
T
Thierry Reding 已提交
195
	if (err < 0) {
196
		dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
T
Thierry Reding 已提交
197
			err);
198
		goto unlock;
T
Thierry Reding 已提交
199 200
	}

201
	bo->iova = bo->mm->start;
T
Thierry Reding 已提交
202

203
	bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl,
204 205 206 207
				bo->sgt->nents, prot);
	if (!bo->size) {
		dev_err(tegra->drm->dev, "failed to map buffer\n");
		err = -ENOMEM;
T
Thierry Reding 已提交
208 209 210
		goto remove;
	}

211 212
	mutex_unlock(&tegra->mm_lock);

T
Thierry Reding 已提交
213 214 215 216
	return 0;

remove:
	drm_mm_remove_node(bo->mm);
217 218
unlock:
	mutex_unlock(&tegra->mm_lock);
T
Thierry Reding 已提交
219 220 221 222 223 224 225 226 227
	kfree(bo->mm);
	return err;
}

static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
{
	if (!bo->mm)
		return 0;

228
	mutex_lock(&tegra->mm_lock);
229
	iommu_unmap(tegra->domain, bo->iova, bo->size);
T
Thierry Reding 已提交
230
	drm_mm_remove_node(bo->mm);
231 232
	mutex_unlock(&tegra->mm_lock);

T
Thierry Reding 已提交
233 234 235 236 237
	kfree(bo->mm);

	return 0;
}

238 239 240 241 242 243
static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
	.free = tegra_bo_free_object,
	.export = tegra_gem_prime_export,
	.vm_ops = &tegra_bo_vm_ops,
};

244 245 246 247 248 249 250 251 252 253
static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
					      size_t size)
{
	struct tegra_bo *bo;
	int err;

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
	if (!bo)
		return ERR_PTR(-ENOMEM);

254 255
	bo->gem.funcs = &tegra_gem_object_funcs;

256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
	host1x_bo_init(&bo->base, &tegra_bo_ops);
	size = round_up(size, PAGE_SIZE);

	err = drm_gem_object_init(drm, &bo->gem, size);
	if (err < 0)
		goto free;

	err = drm_gem_create_mmap_offset(&bo->gem);
	if (err < 0)
		goto release;

	return bo;

release:
	drm_gem_object_release(&bo->gem);
free:
	kfree(bo);
	return ERR_PTR(err);
}

T
Thierry Reding 已提交
276
static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
277
{
T
Thierry Reding 已提交
278
	if (bo->pages) {
279
		dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
280
			     DMA_FROM_DEVICE);
T
Thierry Reding 已提交
281 282 283
		drm_gem_put_pages(&bo->gem, bo->pages, true, true);
		sg_free_table(bo->sgt);
		kfree(bo->sgt);
284
	} else if (bo->vaddr) {
285
		dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
T
Thierry Reding 已提交
286 287 288
	}
}

289
static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
T
Thierry Reding 已提交
290
{
291
	int err;
292

T
Thierry Reding 已提交
293 294 295 296
	bo->pages = drm_gem_get_pages(&bo->gem);
	if (IS_ERR(bo->pages))
		return PTR_ERR(bo->pages);

297
	bo->num_pages = bo->gem.size >> PAGE_SHIFT;
T
Thierry Reding 已提交
298

299
	bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
300 301
	if (IS_ERR(bo->sgt)) {
		err = PTR_ERR(bo->sgt);
302
		goto put_pages;
303
	}
304

305
	err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
306
			 DMA_FROM_DEVICE);
307 308 309 310
	if (err == 0) {
		err = -EFAULT;
		goto free_sgt;
	}
311

T
Thierry Reding 已提交
312
	return 0;
313

314 315 316
free_sgt:
	sg_free_table(bo->sgt);
	kfree(bo->sgt);
317 318
put_pages:
	drm_gem_put_pages(&bo->gem, bo->pages, false, false);
319
	return err;
T
Thierry Reding 已提交
320 321
}

322
static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
T
Thierry Reding 已提交
323 324 325 326 327
{
	struct tegra_drm *tegra = drm->dev_private;
	int err;

	if (tegra->domain) {
328
		err = tegra_bo_get_pages(drm, bo);
T
Thierry Reding 已提交
329 330 331 332 333 334 335 336 337
		if (err < 0)
			return err;

		err = tegra_bo_iommu_map(tegra, bo);
		if (err < 0) {
			tegra_bo_free(drm, bo);
			return err;
		}
	} else {
338 339
		size_t size = bo->gem.size;

340
		bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
341
					 GFP_KERNEL | __GFP_NOWARN);
T
Thierry Reding 已提交
342 343 344 345 346 347 348 349 350
		if (!bo->vaddr) {
			dev_err(drm->dev,
				"failed to allocate buffer of size %zu\n",
				size);
			return -ENOMEM;
		}
	}

	return 0;
351 352
}

353
struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
354
				 unsigned long flags)
355 356 357 358
{
	struct tegra_bo *bo;
	int err;

359 360 361
	bo = tegra_bo_alloc_object(drm, size);
	if (IS_ERR(bo))
		return bo;
362

363
	err = tegra_bo_alloc(drm, bo);
T
Thierry Reding 已提交
364 365
	if (err < 0)
		goto release;
366

367
	if (flags & DRM_TEGRA_GEM_CREATE_TILED)
368
		bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
369

370 371 372
	if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
		bo->flags |= TEGRA_BO_BOTTOM_UP;

373 374
	return bo;

T
Thierry Reding 已提交
375 376
release:
	drm_gem_object_release(&bo->gem);
377 378 379 380 381
	kfree(bo);
	return ERR_PTR(err);
}

struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
382
					     struct drm_device *drm,
383
					     size_t size,
384
					     unsigned long flags,
385
					     u32 *handle)
386 387
{
	struct tegra_bo *bo;
388
	int err;
389

390
	bo = tegra_bo_create(drm, size, flags);
391 392 393
	if (IS_ERR(bo))
		return bo;

394 395 396 397 398
	err = drm_gem_handle_create(file, &bo->gem, handle);
	if (err) {
		tegra_bo_free_object(&bo->gem);
		return ERR_PTR(err);
	}
399

400
	drm_gem_object_put(&bo->gem);
401 402 403 404

	return bo;
}

405 406
static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
					struct dma_buf *buf)
T
Thierry Reding 已提交
407
{
T
Thierry Reding 已提交
408
	struct tegra_drm *tegra = drm->dev_private;
T
Thierry Reding 已提交
409 410 411 412
	struct dma_buf_attachment *attach;
	struct tegra_bo *bo;
	int err;

413 414 415
	bo = tegra_bo_alloc_object(drm, buf->size);
	if (IS_ERR(bo))
		return bo;
T
Thierry Reding 已提交
416 417 418 419

	attach = dma_buf_attach(buf, drm->dev);
	if (IS_ERR(attach)) {
		err = PTR_ERR(attach);
420
		goto free;
T
Thierry Reding 已提交
421 422 423 424 425 426 427 428 429 430
	}

	get_dma_buf(buf);

	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
	if (IS_ERR(bo->sgt)) {
		err = PTR_ERR(bo->sgt);
		goto detach;
	}

T
Thierry Reding 已提交
431 432 433 434
	if (tegra->domain) {
		err = tegra_bo_iommu_map(tegra, bo);
		if (err < 0)
			goto detach;
T
Thierry Reding 已提交
435 436 437 438 439 440 441 442 443 444 445 446 447
	}

	bo->gem.import_attach = attach;

	return bo;

detach:
	if (!IS_ERR_OR_NULL(bo->sgt))
		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);

	dma_buf_detach(buf, attach);
	dma_buf_put(buf);
free:
448
	drm_gem_object_release(&bo->gem);
T
Thierry Reding 已提交
449 450 451 452
	kfree(bo);
	return ERR_PTR(err);
}

453 454
void tegra_bo_free_object(struct drm_gem_object *gem)
{
T
Thierry Reding 已提交
455
	struct tegra_drm *tegra = gem->dev->dev_private;
456 457
	struct tegra_bo *bo = to_tegra_bo(gem);

T
Thierry Reding 已提交
458 459 460
	if (tegra->domain)
		tegra_bo_iommu_unmap(tegra, bo);

T
Thierry Reding 已提交
461 462 463 464 465
	if (gem->import_attach) {
		dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
					 DMA_TO_DEVICE);
		drm_prime_gem_destroy(gem, NULL);
	} else {
T
Thierry Reding 已提交
466
		tegra_bo_free(gem->dev, bo);
T
Thierry Reding 已提交
467 468
	}

469 470 471 472 473 474 475
	drm_gem_object_release(gem);
	kfree(bo);
}

int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
			 struct drm_mode_create_dumb *args)
{
476
	unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
477
	struct tegra_drm *tegra = drm->dev_private;
478 479
	struct tegra_bo *bo;

480 481
	args->pitch = round_up(min_pitch, tegra->pitch_align);
	args->size = args->pitch * args->height;
482

483
	bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
484
					 &args->handle);
485 486 487 488 489 490
	if (IS_ERR(bo))
		return PTR_ERR(bo);

	return 0;
}

491
static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
T
Thierry Reding 已提交
492
{
493
	struct vm_area_struct *vma = vmf->vma;
T
Thierry Reding 已提交
494 495 496 497 498 499 500 501
	struct drm_gem_object *gem = vma->vm_private_data;
	struct tegra_bo *bo = to_tegra_bo(gem);
	struct page *page;
	pgoff_t offset;

	if (!bo->pages)
		return VM_FAULT_SIGBUS;

502
	offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
T
Thierry Reding 已提交
503 504
	page = bo->pages[offset];

505
	return vmf_insert_page(vma, vmf->address, page);
T
Thierry Reding 已提交
506 507
}

508
const struct vm_operations_struct tegra_bo_vm_ops = {
T
Thierry Reding 已提交
509
	.fault = tegra_bo_fault,
510 511 512 513
	.open = drm_gem_vm_open,
	.close = drm_gem_vm_close,
};

514
int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
515
{
516
	struct tegra_bo *bo = to_tegra_bo(gem);
517

T
Thierry Reding 已提交
518 519
	if (!bo->pages) {
		unsigned long vm_pgoff = vma->vm_pgoff;
520
		int err;
521

522 523 524 525 526
		/*
		 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
		 * and set the vm_pgoff (used as a fake buffer offset by DRM)
		 * to 0 as we want to map the whole buffer.
		 */
T
Thierry Reding 已提交
527 528 529
		vma->vm_flags &= ~VM_PFNMAP;
		vma->vm_pgoff = 0;

530
		err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
531
				  gem->size);
532
		if (err < 0) {
T
Thierry Reding 已提交
533
			drm_gem_vm_close(vma);
534
			return err;
T
Thierry Reding 已提交
535 536 537 538 539
		}

		vma->vm_pgoff = vm_pgoff;
	} else {
		pgprot_t prot = vm_get_page_prot(vma->vm_flags);
540

T
Thierry Reding 已提交
541 542 543 544 545
		vma->vm_flags |= VM_MIXEDMAP;
		vma->vm_flags &= ~VM_PFNMAP;

		vma->vm_page_prot = pgprot_writecombine(prot);
	}
546

547
	return 0;
548
}
T
Thierry Reding 已提交
549

550 551 552 553 554 555 556 557 558 559 560
int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct drm_gem_object *gem;
	int err;

	err = drm_gem_mmap(file, vma);
	if (err < 0)
		return err;

	gem = vma->vm_private_data;

561
	return __tegra_gem_mmap(gem, vma);
562 563
}

T
Thierry Reding 已提交
564 565 566 567 568 569 570 571 572 573 574 575
static struct sg_table *
tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
			    enum dma_data_direction dir)
{
	struct drm_gem_object *gem = attach->dmabuf->priv;
	struct tegra_bo *bo = to_tegra_bo(gem);
	struct sg_table *sgt;

	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt)
		return NULL;

T
Thierry Reding 已提交
576
	if (bo->pages) {
577 578
		if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
					      0, gem->size, GFP_KERNEL) < 0)
T
Thierry Reding 已提交
579 580
			goto free;
	} else {
581 582
		if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
				    gem->size) < 0)
T
Thierry Reding 已提交
583 584
			goto free;
	}
T
Thierry Reding 已提交
585

586 587 588
	if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
		goto free;

T
Thierry Reding 已提交
589
	return sgt;
T
Thierry Reding 已提交
590 591 592 593 594

free:
	sg_free_table(sgt);
	kfree(sgt);
	return NULL;
T
Thierry Reding 已提交
595 596 597 598 599 600
}

static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
					  struct sg_table *sgt,
					  enum dma_data_direction dir)
{
T
Thierry Reding 已提交
601 602 603 604 605 606
	struct drm_gem_object *gem = attach->dmabuf->priv;
	struct tegra_bo *bo = to_tegra_bo(gem);

	if (bo->pages)
		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);

T
Thierry Reding 已提交
607 608 609 610 611 612 613 614 615
	sg_free_table(sgt);
	kfree(sgt);
}

static void tegra_gem_prime_release(struct dma_buf *buf)
{
	drm_gem_dmabuf_release(buf);
}

616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
					    enum dma_data_direction direction)
{
	struct drm_gem_object *gem = buf->priv;
	struct tegra_bo *bo = to_tegra_bo(gem);
	struct drm_device *drm = gem->dev;

	if (bo->pages)
		dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents,
				    DMA_FROM_DEVICE);

	return 0;
}

static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
					  enum dma_data_direction direction)
{
	struct drm_gem_object *gem = buf->priv;
	struct tegra_bo *bo = to_tegra_bo(gem);
	struct drm_device *drm = gem->dev;

	if (bo->pages)
		dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
				       DMA_TO_DEVICE);

	return 0;
}

T
Thierry Reding 已提交
644 645
static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
{
646 647 648 649 650 651 652
	struct drm_gem_object *gem = buf->priv;
	int err;

	err = drm_gem_mmap_obj(gem, gem->size, vma);
	if (err < 0)
		return err;

653
	return __tegra_gem_mmap(gem, vma);
T
Thierry Reding 已提交
654 655
}

656
static int tegra_gem_prime_vmap(struct dma_buf *buf, struct dma_buf_map *map)
657 658 659 660
{
	struct drm_gem_object *gem = buf->priv;
	struct tegra_bo *bo = to_tegra_bo(gem);

661 662 663
	dma_buf_map_set_vaddr(map, bo->vaddr);

	return 0;
664 665 666 667 668 669
}

static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
{
}

T
Thierry Reding 已提交
670 671 672 673
static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
	.map_dma_buf = tegra_gem_prime_map_dma_buf,
	.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
	.release = tegra_gem_prime_release,
674 675
	.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
	.end_cpu_access = tegra_gem_prime_end_cpu_access,
T
Thierry Reding 已提交
676
	.mmap = tegra_gem_prime_mmap,
677 678
	.vmap = tegra_gem_prime_vmap,
	.vunmap = tegra_gem_prime_vunmap,
T
Thierry Reding 已提交
679 680
};

681
struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
T
Thierry Reding 已提交
682 683
				       int flags)
{
684 685
	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);

686
	exp_info.exp_name = KBUILD_MODNAME;
687
	exp_info.owner = gem->dev->driver->fops->owner;
688 689 690 691 692
	exp_info.ops = &tegra_gem_prime_dmabuf_ops;
	exp_info.size = gem->size;
	exp_info.flags = flags;
	exp_info.priv = gem;

693
	return drm_gem_dmabuf_export(gem->dev, &exp_info);
T
Thierry Reding 已提交
694 695 696 697 698 699 700 701 702 703 704
}

struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
					      struct dma_buf *buf)
{
	struct tegra_bo *bo;

	if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
		struct drm_gem_object *gem = buf->priv;

		if (gem->dev == drm) {
705
			drm_gem_object_get(gem);
T
Thierry Reding 已提交
706 707 708 709 710 711 712 713 714 715
			return gem;
		}
	}

	bo = tegra_bo_import(drm, buf);
	if (IS_ERR(bo))
		return ERR_CAST(bo);

	return &bo->gem;
}