drm_prime.c 22.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 * Copyright © 2012 Red Hat
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *      Dave Airlie <airlied@redhat.com>
 *      Rob Clark <rob.clark@linaro.org>
 *
 */

#include <linux/export.h>
#include <linux/dma-buf.h>
31
#include <linux/rbtree.h>
32
#include <drm/drmP.h>
D
Daniel Vetter 已提交
33 34
#include <drm/drm_gem.h>

35
#include "drm_internal.h"
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59

/*
 * DMA-BUF/GEM Object references and lifetime overview:
 *
 * On the export the dma_buf holds a reference to the exporting GEM
 * object. It takes this reference in handle_to_fd_ioctl, when it
 * first calls .prime_export and stores the exporting GEM object in
 * the dma_buf priv. This reference is released when the dma_buf
 * object goes away in the driver .release function.
 *
 * On the import the importing GEM object holds a reference to the
 * dma_buf (which in turn holds a ref to the exporting GEM object).
 * It takes that reference in the fd_to_handle ioctl.
 * It calls dma_buf_get, creates an attachment to it and stores the
 * attachment in the GEM object. When this attachment is destroyed
 * when the imported object is destroyed, we remove the attachment
 * and drop the reference to the dma_buf.
 *
 * Thus the chain of references always flows in one direction
 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
 *
 * Self-importing: if userspace is using PRIME as a replacement for flink
 * then it will get a fd->handle request for a GEM object that it created.
 * Drivers should detect this situation and return back the gem object
A
Aaron Plattner 已提交
60 61
 * from the dma-buf private.  Prime will do this automatically for drivers that
 * use the drm_gem_prime_{import,export} helpers.
62 63 64 65 66
 */

struct drm_prime_member {
	struct dma_buf *dma_buf;
	uint32_t handle;
67 68 69

	struct rb_node dmabuf_rb;
	struct rb_node handle_rb;
70
};
71 72 73 74 75 76

struct drm_prime_attachment {
	struct sg_table *sgt;
	enum dma_data_direction dir;
};

77 78
static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
				    struct dma_buf *dma_buf, uint32_t handle)
79 80
{
	struct drm_prime_member *member;
81
	struct rb_node **p, *rb;
82 83 84 85 86 87 88 89

	member = kmalloc(sizeof(*member), GFP_KERNEL);
	if (!member)
		return -ENOMEM;

	get_dma_buf(dma_buf);
	member->dma_buf = dma_buf;
	member->handle = handle;
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120

	rb = NULL;
	p = &prime_fpriv->dmabufs.rb_node;
	while (*p) {
		struct drm_prime_member *pos;

		rb = *p;
		pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
		if (dma_buf > pos->dma_buf)
			p = &rb->rb_right;
		else
			p = &rb->rb_left;
	}
	rb_link_node(&member->dmabuf_rb, rb, p);
	rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);

	rb = NULL;
	p = &prime_fpriv->handles.rb_node;
	while (*p) {
		struct drm_prime_member *pos;

		rb = *p;
		pos = rb_entry(rb, struct drm_prime_member, handle_rb);
		if (handle > pos->handle)
			p = &rb->rb_right;
		else
			p = &rb->rb_left;
	}
	rb_link_node(&member->handle_rb, rb, p);
	rb_insert_color(&member->handle_rb, &prime_fpriv->handles);

121 122
	return 0;
}
123

124 125 126
static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
						      uint32_t handle)
{
127 128 129 130 131
	struct rb_node *rb;

	rb = prime_fpriv->handles.rb_node;
	while (rb) {
		struct drm_prime_member *member;
132

133
		member = rb_entry(rb, struct drm_prime_member, handle_rb);
134 135
		if (member->handle == handle)
			return member->dma_buf;
136 137 138 139
		else if (member->handle < handle)
			rb = rb->rb_right;
		else
			rb = rb->rb_left;
140 141 142 143 144
	}

	return NULL;
}

145 146 147 148
static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
				       struct dma_buf *dma_buf,
				       uint32_t *handle)
{
149 150 151 152 153
	struct rb_node *rb;

	rb = prime_fpriv->dmabufs.rb_node;
	while (rb) {
		struct drm_prime_member *member;
154

155
		member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
156 157 158
		if (member->dma_buf == dma_buf) {
			*handle = member->handle;
			return 0;
159 160 161 162
		} else if (member->dma_buf < dma_buf) {
			rb = rb->rb_right;
		} else {
			rb = rb->rb_left;
163 164
		}
	}
165

166 167 168
	return -ENOENT;
}

169 170 171 172
static int drm_gem_map_attach(struct dma_buf *dma_buf,
			      struct device *target_dev,
			      struct dma_buf_attachment *attach)
{
173
	struct drm_prime_attachment *prime_attach;
174 175 176
	struct drm_gem_object *obj = dma_buf->priv;
	struct drm_device *dev = obj->dev;

177 178 179 180 181 182 183
	prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
	if (!prime_attach)
		return -ENOMEM;

	prime_attach->dir = DMA_NONE;
	attach->priv = prime_attach;

184 185 186 187 188 189 190 191 192
	if (!dev->driver->gem_prime_pin)
		return 0;

	return dev->driver->gem_prime_pin(obj);
}

static void drm_gem_map_detach(struct dma_buf *dma_buf,
			       struct dma_buf_attachment *attach)
{
193
	struct drm_prime_attachment *prime_attach = attach->priv;
194 195
	struct drm_gem_object *obj = dma_buf->priv;
	struct drm_device *dev = obj->dev;
196
	struct sg_table *sgt;
197 198 199

	if (dev->driver->gem_prime_unpin)
		dev->driver->gem_prime_unpin(obj);
200 201 202 203 204

	if (!prime_attach)
		return;

	sgt = prime_attach->sgt;
205 206 207 208 209 210
	if (sgt) {
		if (prime_attach->dir != DMA_NONE)
			dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
					prime_attach->dir);
		sg_free_table(sgt);
	}
211 212 213 214

	kfree(sgt);
	kfree(prime_attach);
	attach->priv = NULL;
215 216
}

217 218
void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
					struct dma_buf *dma_buf)
219
{
220
	struct rb_node *rb;
221

222 223 224 225 226
	rb = prime_fpriv->dmabufs.rb_node;
	while (rb) {
		struct drm_prime_member *member;

		member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
227
		if (member->dma_buf == dma_buf) {
228 229 230
			rb_erase(&member->handle_rb, &prime_fpriv->handles);
			rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);

231 232
			dma_buf_put(dma_buf);
			kfree(member);
233 234 235 236 237
			return;
		} else if (member->dma_buf < dma_buf) {
			rb = rb->rb_right;
		} else {
			rb = rb->rb_left;
238 239 240 241
		}
	}
}

A
Aaron Plattner 已提交
242
static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
243
					    enum dma_data_direction dir)
A
Aaron Plattner 已提交
244
{
245
	struct drm_prime_attachment *prime_attach = attach->priv;
A
Aaron Plattner 已提交
246 247 248
	struct drm_gem_object *obj = attach->dmabuf->priv;
	struct sg_table *sgt;

249 250 251 252 253 254 255 256 257 258 259 260 261 262
	if (WARN_ON(dir == DMA_NONE || !prime_attach))
		return ERR_PTR(-EINVAL);

	/* return the cached mapping when possible */
	if (prime_attach->dir == dir)
		return prime_attach->sgt;

	/*
	 * two mappings with different directions for the same attachment are
	 * not allowed
	 */
	if (WARN_ON(prime_attach->dir != DMA_NONE))
		return ERR_PTR(-EBUSY);

A
Aaron Plattner 已提交
263 264
	sgt = obj->dev->driver->gem_prime_get_sg_table(obj);

265
	if (!IS_ERR(sgt)) {
266 267 268 269
		if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
			sg_free_table(sgt);
			kfree(sgt);
			sgt = ERR_PTR(-ENOMEM);
270 271 272
		} else {
			prime_attach->sgt = sgt;
			prime_attach->dir = dir;
273 274
		}
	}
A
Aaron Plattner 已提交
275 276 277 278 279

	return sgt;
}

static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
280 281
				  struct sg_table *sgt,
				  enum dma_data_direction dir)
A
Aaron Plattner 已提交
282
{
283
	/* nothing to be done here */
A
Aaron Plattner 已提交
284 285
}

286 287
/**
 * drm_gem_dmabuf_export - dma_buf export implementation for GEM
288 289
 * @dev: parent device for the exported dmabuf
 * @exp_info: the export information used by dma_buf_export()
290 291 292
 *
 * This wraps dma_buf_export() for use by generic GEM drivers that are using
 * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
293 294
 * a reference to the &drm_device and the exported &drm_gem_object (stored in
 * exp_info->priv) which is released by drm_gem_dmabuf_release().
295 296 297 298 299 300 301 302 303
 *
 * Returns the new dmabuf.
 */
struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
				      struct dma_buf_export_info *exp_info)
{
	struct dma_buf *dma_buf;

	dma_buf = dma_buf_export(exp_info);
304 305 306 307 308
	if (IS_ERR(dma_buf))
		return dma_buf;

	drm_dev_ref(dev);
	drm_gem_object_reference(exp_info->priv);
309 310 311 312 313

	return dma_buf;
}
EXPORT_SYMBOL(drm_gem_dmabuf_export);

314 315 316 317 318 319
/**
 * drm_gem_dmabuf_release - dma_buf release implementation for GEM
 * @dma_buf: buffer to be released
 *
 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
 * must use this in their dma_buf ops structure as the release callback.
320 321
 * drm_gem_dmabuf_release() should be used in conjunction with
 * drm_gem_dmabuf_export().
322
 */
323
void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
A
Aaron Plattner 已提交
324 325
{
	struct drm_gem_object *obj = dma_buf->priv;
326
	struct drm_device *dev = obj->dev;
A
Aaron Plattner 已提交
327

328 329
	/* drop the reference on the export fd holds */
	drm_gem_object_unreference_unlocked(obj);
330 331

	drm_dev_unref(dev);
A
Aaron Plattner 已提交
332
}
333
EXPORT_SYMBOL(drm_gem_dmabuf_release);
A
Aaron Plattner 已提交
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351

static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
	struct drm_gem_object *obj = dma_buf->priv;
	struct drm_device *dev = obj->dev;

	return dev->driver->gem_prime_vmap(obj);
}

static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
	struct drm_gem_object *obj = dma_buf->priv;
	struct drm_device *dev = obj->dev;

	dev->driver->gem_prime_vunmap(obj, vaddr);
}

static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
352
					unsigned long page_num)
A
Aaron Plattner 已提交
353 354 355 356 357
{
	return NULL;
}

static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
358
					 unsigned long page_num, void *addr)
A
Aaron Plattner 已提交
359 360 361 362
{

}
static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
363
				 unsigned long page_num)
A
Aaron Plattner 已提交
364 365 366 367 368
{
	return NULL;
}

static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
369
				  unsigned long page_num, void *addr)
A
Aaron Plattner 已提交
370 371 372 373 374
{

}

static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
375
			       struct vm_area_struct *vma)
A
Aaron Plattner 已提交
376
{
377 378 379 380 381 382 383
	struct drm_gem_object *obj = dma_buf->priv;
	struct drm_device *dev = obj->dev;

	if (!dev->driver->gem_prime_mmap)
		return -ENOSYS;

	return dev->driver->gem_prime_mmap(obj, vma);
A
Aaron Plattner 已提交
384 385 386
}

static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
387 388
	.attach = drm_gem_map_attach,
	.detach = drm_gem_map_detach,
A
Aaron Plattner 已提交
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
	.map_dma_buf = drm_gem_map_dma_buf,
	.unmap_dma_buf = drm_gem_unmap_dma_buf,
	.release = drm_gem_dmabuf_release,
	.kmap = drm_gem_dmabuf_kmap,
	.kmap_atomic = drm_gem_dmabuf_kmap_atomic,
	.kunmap = drm_gem_dmabuf_kunmap,
	.kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
	.mmap = drm_gem_dmabuf_mmap,
	.vmap = drm_gem_dmabuf_vmap,
	.vunmap = drm_gem_dmabuf_vunmap,
};

/**
 * DOC: PRIME Helpers
 *
 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
 * simpler APIs by using the helper functions @drm_gem_prime_export and
 * @drm_gem_prime_import.  These functions implement dma-buf support in terms of
407
 * six lower-level driver callbacks:
A
Aaron Plattner 已提交
408 409 410
 *
 * Export callbacks:
 *
411 412 413 414 415
 *  * @gem_prime_pin (optional): prepare a GEM object for exporting
 *  * @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
 *  * @gem_prime_vmap: vmap a buffer exported by your driver
 *  * @gem_prime_vunmap: vunmap a buffer exported by your driver
 *  * @gem_prime_mmap (optional): mmap a buffer exported by your driver
416
 *
A
Aaron Plattner 已提交
417 418
 * Import callback:
 *
419
 *  * @gem_prime_import_sg_table (import): produce a GEM object from another
A
Aaron Plattner 已提交
420 421 422
 *    driver's scatter/gather table
 */

423
/**
424
 * drm_gem_prime_export - helper library implementation of the export callback
425 426
 * @dev: drm_device to export from
 * @obj: GEM object to export
427
 * @flags: flags like DRM_CLOEXEC and DRM_RDWR
428 429 430 431
 *
 * This is the implementation of the gem_prime_export functions for GEM drivers
 * using the PRIME helpers.
 */
A
Aaron Plattner 已提交
432
struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
433 434
				     struct drm_gem_object *obj,
				     int flags)
A
Aaron Plattner 已提交
435
{
436 437 438 439 440 441 442 443
	struct dma_buf_export_info exp_info = {
		.exp_name = KBUILD_MODNAME, /* white lie for debug */
		.owner = dev->driver->fops->owner,
		.ops = &drm_gem_prime_dmabuf_ops,
		.size = obj->size,
		.flags = flags,
		.priv = obj,
	};
444 445

	if (dev->driver->gem_prime_res_obj)
446
		exp_info.resv = dev->driver->gem_prime_res_obj(obj);
447

448
	return drm_gem_dmabuf_export(dev, &exp_info);
A
Aaron Plattner 已提交
449 450 451
}
EXPORT_SYMBOL(drm_gem_prime_export);

452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
static struct dma_buf *export_and_register_object(struct drm_device *dev,
						  struct drm_gem_object *obj,
						  uint32_t flags)
{
	struct dma_buf *dmabuf;

	/* prevent races with concurrent gem_close. */
	if (obj->handle_count == 0) {
		dmabuf = ERR_PTR(-ENOENT);
		return dmabuf;
	}

	dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
	if (IS_ERR(dmabuf)) {
		/* normally the created dma-buf takes ownership of the ref,
		 * but if that fails then drop the ref
		 */
		return dmabuf;
	}

	/*
	 * Note that callers do not need to clean up the export cache
	 * since the check for obj->handle_count guarantees that someone
	 * will clean it up.
	 */
	obj->dma_buf = dmabuf;
	get_dma_buf(obj->dma_buf);

	return dmabuf;
}

483 484 485 486 487 488 489 490 491 492 493 494 495
/**
 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
 * @dev: dev to export the buffer from
 * @file_priv: drm file-private structure
 * @handle: buffer handle to export
 * @flags: flags like DRM_CLOEXEC
 * @prime_fd: pointer to storage for the fd id of the create dma-buf
 *
 * This is the PRIME export function which must be used mandatorily by GEM
 * drivers to ensure correct lifetime management of the underlying GEM object.
 * The actual exporting from GEM object to a dma-buf is done through the
 * gem_prime_export driver callback.
 */
496
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
497 498 499
			       struct drm_file *file_priv, uint32_t handle,
			       uint32_t flags,
			       int *prime_fd)
500 501
{
	struct drm_gem_object *obj;
502 503
	int ret = 0;
	struct dma_buf *dmabuf;
504

505
	mutex_lock(&file_priv->prime.lock);
506
	obj = drm_gem_object_lookup(file_priv, handle);
507 508 509 510 511 512 513 514 515 516
	if (!obj)  {
		ret = -ENOENT;
		goto out_unlock;
	}

	dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
	if (dmabuf) {
		get_dma_buf(dmabuf);
		goto out_have_handle;
	}
517

518
	mutex_lock(&dev->object_name_lock);
519 520
	/* re-export the original imported object */
	if (obj->import_attach) {
521
		dmabuf = obj->import_attach->dmabuf;
522
		get_dma_buf(dmabuf);
523
		goto out_have_obj;
524 525
	}

526 527 528
	if (obj->dma_buf) {
		get_dma_buf(obj->dma_buf);
		dmabuf = obj->dma_buf;
529
		goto out_have_obj;
530
	}
531

532
	dmabuf = export_and_register_object(dev, obj, flags);
533
	if (IS_ERR(dmabuf)) {
534 535 536
		/* normally the created dma-buf takes ownership of the ref,
		 * but if that fails then drop the ref
		 */
537
		ret = PTR_ERR(dmabuf);
538
		mutex_unlock(&dev->object_name_lock);
539 540 541
		goto out;
	}

542 543 544 545 546 547
out_have_obj:
	/*
	 * If we've exported this buffer then cheat and add it to the import list
	 * so we get the correct handle back. We must do this under the
	 * protection of dev->object_name_lock to ensure that a racing gem close
	 * ioctl doesn't miss to remove this buffer handle from the cache.
548
	 */
549
	ret = drm_prime_add_buf_handle(&file_priv->prime,
550
				       dmabuf, handle);
551
	mutex_unlock(&dev->object_name_lock);
552
	if (ret)
553
		goto fail_put_dmabuf;
554

555
out_have_handle:
556
	ret = dma_buf_fd(dmabuf, flags);
557 558 559 560 561 562
	/*
	 * We must _not_ remove the buffer from the handle cache since the newly
	 * created dma buf is already linked in the global obj->dma_buf pointer,
	 * and that is invariant as long as a userspace gem handle exists.
	 * Closing the handle will clean out the cache anyway, so we don't leak.
	 */
563
	if (ret < 0) {
564
		goto fail_put_dmabuf;
565
	} else {
566
		*prime_fd = ret;
567 568 569
		ret = 0;
	}

570 571 572
	goto out;

fail_put_dmabuf:
573
	dma_buf_put(dmabuf);
574 575
out:
	drm_gem_object_unreference_unlocked(obj);
576 577 578
out_unlock:
	mutex_unlock(&file_priv->prime.lock);

579
	return ret;
580 581 582
}
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);

583
/**
584
 * drm_gem_prime_import - helper library implementation of the import callback
585 586 587 588 589 590
 * @dev: drm_device to import into
 * @dma_buf: dma-buf object to import
 *
 * This is the implementation of the gem_prime_import functions for GEM drivers
 * using the PRIME helpers.
 */
A
Aaron Plattner 已提交
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
					    struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sgt;
	struct drm_gem_object *obj;
	int ret;

	if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
		obj = dma_buf->priv;
		if (obj->dev == dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(obj);
			return obj;
		}
	}

611 612 613
	if (!dev->driver->gem_prime_import_sg_table)
		return ERR_PTR(-EINVAL);

A
Aaron Plattner 已提交
614 615
	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
616
		return ERR_CAST(attach);
A
Aaron Plattner 已提交
617

618 619
	get_dma_buf(dma_buf);

A
Aaron Plattner 已提交
620
	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
621
	if (IS_ERR(sgt)) {
A
Aaron Plattner 已提交
622 623 624 625
		ret = PTR_ERR(sgt);
		goto fail_detach;
	}

626
	obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
A
Aaron Plattner 已提交
627 628 629 630 631 632 633 634 635 636 637 638 639
	if (IS_ERR(obj)) {
		ret = PTR_ERR(obj);
		goto fail_unmap;
	}

	obj->import_attach = attach;

	return obj;

fail_unmap:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
fail_detach:
	dma_buf_detach(dma_buf, attach);
640 641
	dma_buf_put(dma_buf);

A
Aaron Plattner 已提交
642 643 644 645
	return ERR_PTR(ret);
}
EXPORT_SYMBOL(drm_gem_prime_import);

646 647 648 649 650 651 652 653 654 655 656 657
/**
 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
 * @dev: dev to export the buffer from
 * @file_priv: drm file-private structure
 * @prime_fd: fd id of the dma-buf which should be imported
 * @handle: pointer to storage for the handle of the imported buffer object
 *
 * This is the PRIME import function which must be used mandatorily by GEM
 * drivers to ensure correct lifetime management of the underlying GEM object.
 * The actual importing of GEM object from the dma-buf is done through the
 * gem_import_export driver callback.
 */
658
int drm_gem_prime_fd_to_handle(struct drm_device *dev,
659 660
			       struct drm_file *file_priv, int prime_fd,
			       uint32_t *handle)
661 662 663 664 665 666 667 668 669 670 671
{
	struct dma_buf *dma_buf;
	struct drm_gem_object *obj;
	int ret;

	dma_buf = dma_buf_get(prime_fd);
	if (IS_ERR(dma_buf))
		return PTR_ERR(dma_buf);

	mutex_lock(&file_priv->prime.lock);

672
	ret = drm_prime_lookup_buf_handle(&file_priv->prime,
673
			dma_buf, handle);
674
	if (ret == 0)
675 676 677
		goto out_put;

	/* never seen this one, need to import */
678
	mutex_lock(&dev->object_name_lock);
679 680 681
	obj = dev->driver->gem_prime_import(dev, dma_buf);
	if (IS_ERR(obj)) {
		ret = PTR_ERR(obj);
682 683 684 685 686 687 688 689
		goto out_unlock;
	}

	if (obj->dma_buf) {
		WARN_ON(obj->dma_buf != dma_buf);
	} else {
		obj->dma_buf = dma_buf;
		get_dma_buf(dma_buf);
690 691
	}

692
	/* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
693
	ret = drm_gem_handle_create_tail(file_priv, obj, handle);
694 695 696 697
	drm_gem_object_unreference_unlocked(obj);
	if (ret)
		goto out_put;

698
	ret = drm_prime_add_buf_handle(&file_priv->prime,
699
			dma_buf, *handle);
700
	mutex_unlock(&file_priv->prime.lock);
701 702 703
	if (ret)
		goto fail;

704 705
	dma_buf_put(dma_buf);

706 707 708 709 710 711
	return 0;

fail:
	/* hmm, if driver attached, we are relying on the free-object path
	 * to detach.. which seems ok..
	 */
712
	drm_gem_handle_delete(file_priv, *handle);
713 714 715
	dma_buf_put(dma_buf);
	return ret;

716
out_unlock:
D
Dan Carpenter 已提交
717
	mutex_unlock(&dev->object_name_lock);
718 719
out_put:
	mutex_unlock(&file_priv->prime.lock);
720
	dma_buf_put(dma_buf);
721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736
	return ret;
}
EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);

int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
				 struct drm_file *file_priv)
{
	struct drm_prime_handle *args = data;

	if (!drm_core_check_feature(dev, DRIVER_PRIME))
		return -EINVAL;

	if (!dev->driver->prime_handle_to_fd)
		return -ENOSYS;

	/* check flags are valid */
737
	if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
738 739 740
		return -EINVAL;

	return dev->driver->prime_handle_to_fd(dev, file_priv,
741
			args->handle, args->flags, &args->fd);
742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
}

int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
				 struct drm_file *file_priv)
{
	struct drm_prime_handle *args = data;

	if (!drm_core_check_feature(dev, DRIVER_PRIME))
		return -EINVAL;

	if (!dev->driver->prime_fd_to_handle)
		return -ENOSYS;

	return dev->driver->prime_fd_to_handle(dev, file_priv,
			args->fd, &args->handle);
}

759 760 761 762
/**
 * drm_prime_pages_to_sg - converts a page array into an sg list
 * @pages: pointer to the array of page pointers to convert
 * @nr_pages: length of the page vector
763
 *
764
 * This helper creates an sg table object from a set of pages
765
 * the driver is responsible for mapping the pages into the
766
 * importers address space for use with dma_buf itself.
767
 */
768
struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
769 770 771 772 773
{
	struct sg_table *sg = NULL;
	int ret;

	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
774 775
	if (!sg) {
		ret = -ENOMEM;
776
		goto out;
777
	}
778

779 780
	ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
				nr_pages << PAGE_SHIFT, GFP_KERNEL);
781 782 783 784 785 786
	if (ret)
		goto out;

	return sg;
out:
	kfree(sg);
787
	return ERR_PTR(ret);
788 789 790
}
EXPORT_SYMBOL(drm_prime_pages_to_sg);

791 792 793 794 795 796 797 798 799 800
/**
 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
 * @sgt: scatter-gather table to convert
 * @pages: array of page pointers to store the page array in
 * @addrs: optional array to store the dma bus address of each page
 * @max_pages: size of both the passed-in arrays
 *
 * Exports an sg table into an array of pages and addresses. This is currently
 * required by the TTM driver in order to do correct fault handling.
 */
801 802 803 804 805 806
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
				     dma_addr_t *addrs, int max_pages)
{
	unsigned count;
	struct scatterlist *sg;
	struct page *page;
807
	u32 len;
808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832
	int pg_index;
	dma_addr_t addr;

	pg_index = 0;
	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
		len = sg->length;
		page = sg_page(sg);
		addr = sg_dma_address(sg);

		while (len > 0) {
			if (WARN_ON(pg_index >= max_pages))
				return -1;
			pages[pg_index] = page;
			if (addrs)
				addrs[pg_index] = addr;

			page++;
			addr += PAGE_SIZE;
			len -= PAGE_SIZE;
			pg_index++;
		}
	}
	return 0;
}
EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
833 834 835 836 837 838 839 840 841

/**
 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
 * @obj: GEM object which was created from a dma-buf
 * @sg: the sg-table which was pinned at import time
 *
 * This is the cleanup functions which GEM drivers need to call when they use
 * @drm_gem_prime_import to import dma-bufs.
 */
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858
void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
{
	struct dma_buf_attachment *attach;
	struct dma_buf *dma_buf;
	attach = obj->import_attach;
	if (sg)
		dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
	dma_buf = attach->dmabuf;
	dma_buf_detach(attach->dmabuf, attach);
	/* remove the reference */
	dma_buf_put(dma_buf);
}
EXPORT_SYMBOL(drm_prime_gem_destroy);

void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
{
	mutex_init(&prime_fpriv->lock);
859 860
	prime_fpriv->dmabufs = RB_ROOT;
	prime_fpriv->handles = RB_ROOT;
861 862 863 864
}

void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
{
865
	/* by now drm_gem_release should've made sure the list is empty */
866
	WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
867
}