drm_prime.c 17.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 * Copyright © 2012 Red Hat
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *      Dave Airlie <airlied@redhat.com>
 *      Rob Clark <rob.clark@linaro.org>
 *
 */

#include <linux/export.h>
#include <linux/dma-buf.h>
31
#include <drm/drmP.h>
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55

/*
 * DMA-BUF/GEM Object references and lifetime overview:
 *
 * On the export the dma_buf holds a reference to the exporting GEM
 * object. It takes this reference in handle_to_fd_ioctl, when it
 * first calls .prime_export and stores the exporting GEM object in
 * the dma_buf priv. This reference is released when the dma_buf
 * object goes away in the driver .release function.
 *
 * On the import the importing GEM object holds a reference to the
 * dma_buf (which in turn holds a ref to the exporting GEM object).
 * It takes that reference in the fd_to_handle ioctl.
 * It calls dma_buf_get, creates an attachment to it and stores the
 * attachment in the GEM object. When this attachment is destroyed
 * when the imported object is destroyed, we remove the attachment
 * and drop the reference to the dma_buf.
 *
 * Thus the chain of references always flows in one direction
 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
 *
 * Self-importing: if userspace is using PRIME as a replacement for flink
 * then it will get a fd->handle request for a GEM object that it created.
 * Drivers should detect this situation and return back the gem object
A
Aaron Plattner 已提交
56 57
 * from the dma-buf private.  Prime will do this automatically for drivers that
 * use the drm_gem_prime_{import,export} helpers.
58 59 60 61 62 63 64
 */

struct drm_prime_member {
	struct list_head entry;
	struct dma_buf *dma_buf;
	uint32_t handle;
};
65 66 67 68 69 70

struct drm_prime_attachment {
	struct sg_table *sgt;
	enum dma_data_direction dir;
};

71 72 73 74 75 76 77 78 79 80 81 82 83 84
static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
{
	struct drm_prime_member *member;

	member = kmalloc(sizeof(*member), GFP_KERNEL);
	if (!member)
		return -ENOMEM;

	get_dma_buf(dma_buf);
	member->dma_buf = dma_buf;
	member->handle = handle;
	list_add(&member->entry, &prime_fpriv->head);
	return 0;
}
85

86 87 88 89 90 91 92 93 94 95 96 97 98
static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
						      uint32_t handle)
{
	struct drm_prime_member *member;

	list_for_each_entry(member, &prime_fpriv->head, entry) {
		if (member->handle == handle)
			return member->dma_buf;
	}

	return NULL;
}

99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
				       struct dma_buf *dma_buf,
				       uint32_t *handle)
{
	struct drm_prime_member *member;

	list_for_each_entry(member, &prime_fpriv->head, entry) {
		if (member->dma_buf == dma_buf) {
			*handle = member->handle;
			return 0;
		}
	}
	return -ENOENT;
}

114 115 116 117
static int drm_gem_map_attach(struct dma_buf *dma_buf,
			      struct device *target_dev,
			      struct dma_buf_attachment *attach)
{
118
	struct drm_prime_attachment *prime_attach;
119 120 121
	struct drm_gem_object *obj = dma_buf->priv;
	struct drm_device *dev = obj->dev;

122 123 124 125 126 127 128
	prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
	if (!prime_attach)
		return -ENOMEM;

	prime_attach->dir = DMA_NONE;
	attach->priv = prime_attach;

129 130 131 132 133 134 135 136 137
	if (!dev->driver->gem_prime_pin)
		return 0;

	return dev->driver->gem_prime_pin(obj);
}

static void drm_gem_map_detach(struct dma_buf *dma_buf,
			       struct dma_buf_attachment *attach)
{
138
	struct drm_prime_attachment *prime_attach = attach->priv;
139 140
	struct drm_gem_object *obj = dma_buf->priv;
	struct drm_device *dev = obj->dev;
141
	struct sg_table *sgt;
142 143 144

	if (dev->driver->gem_prime_unpin)
		dev->driver->gem_prime_unpin(obj);
145 146 147 148 149

	if (!prime_attach)
		return;

	sgt = prime_attach->sgt;
150 151 152 153 154 155
	if (sgt) {
		if (prime_attach->dir != DMA_NONE)
			dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
					prime_attach->dir);
		sg_free_table(sgt);
	}
156 157 158 159

	kfree(sgt);
	kfree(prime_attach);
	attach->priv = NULL;
160 161
}

162 163
void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
					struct dma_buf *dma_buf)
164 165 166 167 168 169 170 171 172 173 174 175
{
	struct drm_prime_member *member, *safe;

	list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
		if (member->dma_buf == dma_buf) {
			dma_buf_put(dma_buf);
			list_del(&member->entry);
			kfree(member);
		}
	}
}

A
Aaron Plattner 已提交
176 177 178
static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
		enum dma_data_direction dir)
{
179
	struct drm_prime_attachment *prime_attach = attach->priv;
A
Aaron Plattner 已提交
180 181 182
	struct drm_gem_object *obj = attach->dmabuf->priv;
	struct sg_table *sgt;

183 184 185 186 187 188 189 190 191 192 193 194 195 196
	if (WARN_ON(dir == DMA_NONE || !prime_attach))
		return ERR_PTR(-EINVAL);

	/* return the cached mapping when possible */
	if (prime_attach->dir == dir)
		return prime_attach->sgt;

	/*
	 * two mappings with different directions for the same attachment are
	 * not allowed
	 */
	if (WARN_ON(prime_attach->dir != DMA_NONE))
		return ERR_PTR(-EBUSY);

A
Aaron Plattner 已提交
197 198
	sgt = obj->dev->driver->gem_prime_get_sg_table(obj);

199
	if (!IS_ERR(sgt)) {
200 201 202 203
		if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
			sg_free_table(sgt);
			kfree(sgt);
			sgt = ERR_PTR(-ENOMEM);
204 205 206
		} else {
			prime_attach->sgt = sgt;
			prime_attach->dir = dir;
207 208
		}
	}
A
Aaron Plattner 已提交
209 210 211 212 213 214 215

	return sgt;
}

static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
		struct sg_table *sgt, enum dma_data_direction dir)
{
216
	/* nothing to be done here */
A
Aaron Plattner 已提交
217 218
}

219
void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
A
Aaron Plattner 已提交
220 221 222
{
	struct drm_gem_object *obj = dma_buf->priv;

223 224
	/* drop the reference on the export fd holds */
	drm_gem_object_unreference_unlocked(obj);
A
Aaron Plattner 已提交
225
}
226
EXPORT_SYMBOL(drm_gem_dmabuf_release);
A
Aaron Plattner 已提交
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269

static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
	struct drm_gem_object *obj = dma_buf->priv;
	struct drm_device *dev = obj->dev;

	return dev->driver->gem_prime_vmap(obj);
}

static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
	struct drm_gem_object *obj = dma_buf->priv;
	struct drm_device *dev = obj->dev;

	dev->driver->gem_prime_vunmap(obj, vaddr);
}

static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
		unsigned long page_num)
{
	return NULL;
}

static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
		unsigned long page_num, void *addr)
{

}
static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
		unsigned long page_num)
{
	return NULL;
}

static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
		unsigned long page_num, void *addr)
{

}

static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
		struct vm_area_struct *vma)
{
270 271 272 273 274 275 276
	struct drm_gem_object *obj = dma_buf->priv;
	struct drm_device *dev = obj->dev;

	if (!dev->driver->gem_prime_mmap)
		return -ENOSYS;

	return dev->driver->gem_prime_mmap(obj, vma);
A
Aaron Plattner 已提交
277 278 279
}

static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
280 281
	.attach = drm_gem_map_attach,
	.detach = drm_gem_map_detach,
A
Aaron Plattner 已提交
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
	.map_dma_buf = drm_gem_map_dma_buf,
	.unmap_dma_buf = drm_gem_unmap_dma_buf,
	.release = drm_gem_dmabuf_release,
	.kmap = drm_gem_dmabuf_kmap,
	.kmap_atomic = drm_gem_dmabuf_kmap_atomic,
	.kunmap = drm_gem_dmabuf_kunmap,
	.kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
	.mmap = drm_gem_dmabuf_mmap,
	.vmap = drm_gem_dmabuf_vmap,
	.vunmap = drm_gem_dmabuf_vunmap,
};

/**
 * DOC: PRIME Helpers
 *
 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
 * simpler APIs by using the helper functions @drm_gem_prime_export and
 * @drm_gem_prime_import.  These functions implement dma-buf support in terms of
 * five lower-level driver callbacks:
 *
 * Export callbacks:
 *
 *  - @gem_prime_pin (optional): prepare a GEM object for exporting
 *
 *  - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
 *
 *  - @gem_prime_vmap: vmap a buffer exported by your driver
 *
 *  - @gem_prime_vunmap: vunmap a buffer exported by your driver
 *
 * Import callback:
 *
 *  - @gem_prime_import_sg_table (import): produce a GEM object from another
 *    driver's scatter/gather table
 */

struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
				     struct drm_gem_object *obj, int flags)
{
321
	return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
A
Aaron Plattner 已提交
322 323 324
}
EXPORT_SYMBOL(drm_gem_prime_export);

325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
static struct dma_buf *export_and_register_object(struct drm_device *dev,
						  struct drm_gem_object *obj,
						  uint32_t flags)
{
	struct dma_buf *dmabuf;

	/* prevent races with concurrent gem_close. */
	if (obj->handle_count == 0) {
		dmabuf = ERR_PTR(-ENOENT);
		return dmabuf;
	}

	dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
	if (IS_ERR(dmabuf)) {
		/* normally the created dma-buf takes ownership of the ref,
		 * but if that fails then drop the ref
		 */
		return dmabuf;
	}

	/*
	 * Note that callers do not need to clean up the export cache
	 * since the check for obj->handle_count guarantees that someone
	 * will clean it up.
	 */
	obj->dma_buf = dmabuf;
	get_dma_buf(obj->dma_buf);
352 353
	/* Grab a new ref since the callers is now used by the dma-buf */
	drm_gem_object_reference(obj);
354 355 356 357

	return dmabuf;
}

358 359 360 361 362
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
		struct drm_file *file_priv, uint32_t handle, uint32_t flags,
		int *prime_fd)
{
	struct drm_gem_object *obj;
363 364
	int ret = 0;
	struct dma_buf *dmabuf;
365

366
	mutex_lock(&file_priv->prime.lock);
367
	obj = drm_gem_object_lookup(dev, file_priv, handle);
368 369 370 371 372 373 374 375 376 377
	if (!obj)  {
		ret = -ENOENT;
		goto out_unlock;
	}

	dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
	if (dmabuf) {
		get_dma_buf(dmabuf);
		goto out_have_handle;
	}
378

379
	mutex_lock(&dev->object_name_lock);
380 381
	/* re-export the original imported object */
	if (obj->import_attach) {
382
		dmabuf = obj->import_attach->dmabuf;
383
		get_dma_buf(dmabuf);
384
		goto out_have_obj;
385 386
	}

387 388 389
	if (obj->dma_buf) {
		get_dma_buf(obj->dma_buf);
		dmabuf = obj->dma_buf;
390
		goto out_have_obj;
391
	}
392

393
	dmabuf = export_and_register_object(dev, obj, flags);
394
	if (IS_ERR(dmabuf)) {
395 396 397
		/* normally the created dma-buf takes ownership of the ref,
		 * but if that fails then drop the ref
		 */
398
		ret = PTR_ERR(dmabuf);
399
		mutex_unlock(&dev->object_name_lock);
400 401 402
		goto out;
	}

403 404 405 406 407 408
out_have_obj:
	/*
	 * If we've exported this buffer then cheat and add it to the import list
	 * so we get the correct handle back. We must do this under the
	 * protection of dev->object_name_lock to ensure that a racing gem close
	 * ioctl doesn't miss to remove this buffer handle from the cache.
409
	 */
410
	ret = drm_prime_add_buf_handle(&file_priv->prime,
411
				       dmabuf, handle);
412
	mutex_unlock(&dev->object_name_lock);
413
	if (ret)
414
		goto fail_put_dmabuf;
415

416
out_have_handle:
417
	ret = dma_buf_fd(dmabuf, flags);
418 419 420 421 422 423
	/*
	 * We must _not_ remove the buffer from the handle cache since the newly
	 * created dma buf is already linked in the global obj->dma_buf pointer,
	 * and that is invariant as long as a userspace gem handle exists.
	 * Closing the handle will clean out the cache anyway, so we don't leak.
	 */
424
	if (ret < 0) {
425
		goto fail_put_dmabuf;
426
	} else {
427
		*prime_fd = ret;
428 429 430
		ret = 0;
	}

431 432 433
	goto out;

fail_put_dmabuf:
434
	dma_buf_put(dmabuf);
435 436
out:
	drm_gem_object_unreference_unlocked(obj);
437 438 439
out_unlock:
	mutex_unlock(&file_priv->prime.lock);

440
	return ret;
441 442 443
}
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);

A
Aaron Plattner 已提交
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
					    struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sgt;
	struct drm_gem_object *obj;
	int ret;

	if (!dev->driver->gem_prime_import_sg_table)
		return ERR_PTR(-EINVAL);

	if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
		obj = dma_buf->priv;
		if (obj->dev == dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(obj);
			return obj;
		}
	}

	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
469
		return ERR_CAST(attach);
A
Aaron Plattner 已提交
470

471 472
	get_dma_buf(dma_buf);

A
Aaron Plattner 已提交
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR_OR_NULL(sgt)) {
		ret = PTR_ERR(sgt);
		goto fail_detach;
	}

	obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt);
	if (IS_ERR(obj)) {
		ret = PTR_ERR(obj);
		goto fail_unmap;
	}

	obj->import_attach = attach;

	return obj;

fail_unmap:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
fail_detach:
	dma_buf_detach(dma_buf, attach);
493 494
	dma_buf_put(dma_buf);

A
Aaron Plattner 已提交
495 496 497 498
	return ERR_PTR(ret);
}
EXPORT_SYMBOL(drm_gem_prime_import);

499 500 501 502 503 504 505 506 507 508 509 510 511
int drm_gem_prime_fd_to_handle(struct drm_device *dev,
		struct drm_file *file_priv, int prime_fd, uint32_t *handle)
{
	struct dma_buf *dma_buf;
	struct drm_gem_object *obj;
	int ret;

	dma_buf = dma_buf_get(prime_fd);
	if (IS_ERR(dma_buf))
		return PTR_ERR(dma_buf);

	mutex_lock(&file_priv->prime.lock);

512
	ret = drm_prime_lookup_buf_handle(&file_priv->prime,
513
			dma_buf, handle);
514
	if (ret == 0)
515 516 517
		goto out_put;

	/* never seen this one, need to import */
518
	mutex_lock(&dev->object_name_lock);
519 520 521
	obj = dev->driver->gem_prime_import(dev, dma_buf);
	if (IS_ERR(obj)) {
		ret = PTR_ERR(obj);
522 523 524 525 526 527 528 529
		goto out_unlock;
	}

	if (obj->dma_buf) {
		WARN_ON(obj->dma_buf != dma_buf);
	} else {
		obj->dma_buf = dma_buf;
		get_dma_buf(dma_buf);
530 531
	}

532 533
	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
	ret = drm_gem_handle_create_tail(file_priv, obj, handle);
534 535 536 537
	drm_gem_object_unreference_unlocked(obj);
	if (ret)
		goto out_put;

538
	ret = drm_prime_add_buf_handle(&file_priv->prime,
539 540 541 542 543
			dma_buf, *handle);
	if (ret)
		goto fail;

	mutex_unlock(&file_priv->prime.lock);
544 545 546

	dma_buf_put(dma_buf);

547 548 549 550 551 552
	return 0;

fail:
	/* hmm, if driver attached, we are relying on the free-object path
	 * to detach.. which seems ok..
	 */
553
	drm_gem_handle_delete(file_priv, *handle);
554 555
out_unlock:
	mutex_lock(&dev->object_name_lock);
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
out_put:
	dma_buf_put(dma_buf);
	mutex_unlock(&file_priv->prime.lock);
	return ret;
}
EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);

int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
				 struct drm_file *file_priv)
{
	struct drm_prime_handle *args = data;
	uint32_t flags;

	if (!drm_core_check_feature(dev, DRIVER_PRIME))
		return -EINVAL;

	if (!dev->driver->prime_handle_to_fd)
		return -ENOSYS;

	/* check flags are valid */
	if (args->flags & ~DRM_CLOEXEC)
		return -EINVAL;

	/* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
	flags = args->flags & DRM_CLOEXEC;

	return dev->driver->prime_handle_to_fd(dev, file_priv,
			args->handle, flags, &args->fd);
}

int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
				 struct drm_file *file_priv)
{
	struct drm_prime_handle *args = data;

	if (!drm_core_check_feature(dev, DRIVER_PRIME))
		return -EINVAL;

	if (!dev->driver->prime_fd_to_handle)
		return -ENOSYS;

	return dev->driver->prime_fd_to_handle(dev, file_priv,
			args->fd, &args->handle);
}

/*
 * drm_prime_pages_to_sg
 *
 * this helper creates an sg table object from a set of pages
 * the driver is responsible for mapping the pages into the
 * importers address space
 */
struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
{
	struct sg_table *sg = NULL;
	int ret;

	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
614 615
	if (!sg) {
		ret = -ENOMEM;
616
		goto out;
617
	}
618

619 620
	ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
				nr_pages << PAGE_SHIFT, GFP_KERNEL);
621 622 623 624 625 626
	if (ret)
		goto out;

	return sg;
out:
	kfree(sg);
627
	return ERR_PTR(ret);
628 629 630
}
EXPORT_SYMBOL(drm_prime_pages_to_sg);

631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
/* export an sg table into an array of pages and addresses
   this is currently required by the TTM driver in order to do correct fault
   handling */
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
				     dma_addr_t *addrs, int max_pages)
{
	unsigned count;
	struct scatterlist *sg;
	struct page *page;
	u32 len, offset;
	int pg_index;
	dma_addr_t addr;

	pg_index = 0;
	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
		len = sg->length;
		offset = sg->offset;
		page = sg_page(sg);
		addr = sg_dma_address(sg);

		while (len > 0) {
			if (WARN_ON(pg_index >= max_pages))
				return -1;
			pages[pg_index] = page;
			if (addrs)
				addrs[pg_index] = addr;

			page++;
			addr += PAGE_SIZE;
			len -= PAGE_SIZE;
			pg_index++;
		}
	}
	return 0;
}
EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
/* helper function to cleanup a GEM/prime object */
void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
{
	struct dma_buf_attachment *attach;
	struct dma_buf *dma_buf;
	attach = obj->import_attach;
	if (sg)
		dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
	dma_buf = attach->dmabuf;
	dma_buf_detach(attach->dmabuf, attach);
	/* remove the reference */
	dma_buf_put(dma_buf);
}
EXPORT_SYMBOL(drm_prime_gem_destroy);

void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
{
	INIT_LIST_HEAD(&prime_fpriv->head);
	mutex_init(&prime_fpriv->lock);
}
EXPORT_SYMBOL(drm_prime_init_file_private);

void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
{
691 692
	/* by now drm_gem_release should've made sure the list is empty */
	WARN_ON(!list_empty(&prime_fpriv->head));
693 694
}
EXPORT_SYMBOL(drm_prime_destroy_file_private);