videobuf2-dma-contig.c 17.3 KB
Newer Older
1 2 3 4 5
/*
 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
 *
 * Copyright (C) 2010 Samsung Electronics
 *
6
 * Author: Pawel Osciak <pawel@osciak.com>
7 8 9 10 11 12
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 */

13
#include <linux/dma-buf.h>
14
#include <linux/module.h>
15 16
#include <linux/scatterlist.h>
#include <linux/sched.h>
17 18 19 20
#include <linux/slab.h>
#include <linux/dma-mapping.h>

#include <media/videobuf2-core.h>
21
#include <media/videobuf2-dma-contig.h>
22 23 24 25 26 27 28
#include <media/videobuf2-memops.h>

struct vb2_dc_conf {
	struct device		*dev;
};

struct vb2_dc_buf {
29
	struct device			*dev;
30 31
	void				*vaddr;
	unsigned long			size;
32
	dma_addr_t			dma_addr;
33 34
	enum dma_data_direction		dma_dir;
	struct sg_table			*dma_sgt;
35
	struct frame_vector		*vec;
36 37

	/* MMAP related */
38
	struct vb2_vmarea_handler	handler;
39
	atomic_t			refcount;
40
	struct sg_table			*sgt_base;
41

42 43
	/* DMABUF related */
	struct dma_buf_attachment	*db_attach;
44 45
};

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
/*********************************************/
/*        scatterlist table functions        */
/*********************************************/

static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
{
	struct scatterlist *s;
	dma_addr_t expected = sg_dma_address(sgt->sgl);
	unsigned int i;
	unsigned long size = 0;

	for_each_sg(sgt->sgl, s, sgt->nents, i) {
		if (sg_dma_address(s) != expected)
			break;
		expected = sg_dma_address(s) + sg_dma_len(s);
		size += sg_dma_len(s);
	}
	return size;
}

66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
/*********************************************/
/*         callbacks for all buffers         */
/*********************************************/

static void *vb2_dc_cookie(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;

	return &buf->dma_addr;
}

static void *vb2_dc_vaddr(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;

81 82 83
	if (!buf->vaddr && buf->db_attach)
		buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);

84 85 86 87 88 89 90 91 92 93
	return buf->vaddr;
}

static unsigned int vb2_dc_num_users(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;

	return atomic_read(&buf->refcount);
}

94 95 96 97 98
static void vb2_dc_prepare(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;
	struct sg_table *sgt = buf->dma_sgt;

99 100
	/* DMABUF exporter will flush the cache for us */
	if (!sgt || buf->db_attach)
101 102 103 104 105 106 107 108 109 110
		return;

	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
}

static void vb2_dc_finish(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;
	struct sg_table *sgt = buf->dma_sgt;

111 112
	/* DMABUF exporter will flush the cache for us */
	if (!sgt || buf->db_attach)
113 114 115 116 117
		return;

	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
}

118 119 120 121 122 123 124 125 126 127 128
/*********************************************/
/*        callbacks for MMAP buffers         */
/*********************************************/

static void vb2_dc_put(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;

	if (!atomic_dec_and_test(&buf->refcount))
		return;

129 130 131 132
	if (buf->sgt_base) {
		sg_free_table(buf->sgt_base);
		kfree(buf->sgt_base);
	}
133
	dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
134
	put_device(buf->dev);
135 136
	kfree(buf);
}
137

138 139
static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size,
			  enum dma_data_direction dma_dir, gfp_t gfp_flags)
140 141
{
	struct vb2_dc_conf *conf = alloc_ctx;
142
	struct device *dev = conf->dev;
143 144 145 146 147 148
	struct vb2_dc_buf *buf;

	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf)
		return ERR_PTR(-ENOMEM);

H
Hans Verkuil 已提交
149 150
	buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr,
						GFP_KERNEL | gfp_flags);
151
	if (!buf->vaddr) {
152
		dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
153 154 155 156
		kfree(buf);
		return ERR_PTR(-ENOMEM);
	}

157 158
	/* Prevent the device from being released while the buffer is used */
	buf->dev = get_device(dev);
159
	buf->size = size;
160
	buf->dma_dir = dma_dir;
161 162

	buf->handler.refcount = &buf->refcount;
163
	buf->handler.put = vb2_dc_put;
164 165 166 167 168 169 170
	buf->handler.arg = buf;

	atomic_inc(&buf->refcount);

	return buf;
}

171
static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
172 173
{
	struct vb2_dc_buf *buf = buf_priv;
174
	int ret;
175 176 177 178 179 180

	if (!buf) {
		printk(KERN_ERR "No buffer to map\n");
		return -EINVAL;
	}

181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
	/*
	 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
	 * map whole buffer
	 */
	vma->vm_pgoff = 0;

	ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
		buf->dma_addr, buf->size);

	if (ret) {
		pr_err("Remapping memory failed, error: %d\n", ret);
		return ret;
	}

	vma->vm_flags		|= VM_DONTEXPAND | VM_DONTDUMP;
	vma->vm_private_data	= &buf->handler;
	vma->vm_ops		= &vb2_common_vm_ops;

	vma->vm_ops->open(vma);

	pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
		__func__, (unsigned long)buf->dma_addr, vma->vm_start,
		buf->size);

	return 0;
206 207
}

208 209 210 211 212 213
/*********************************************/
/*         DMABUF ops for exporters          */
/*********************************************/

struct vb2_dc_attachment {
	struct sg_table sgt;
214
	enum dma_data_direction dma_dir;
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
};

static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
	struct dma_buf_attachment *dbuf_attach)
{
	struct vb2_dc_attachment *attach;
	unsigned int i;
	struct scatterlist *rd, *wr;
	struct sg_table *sgt;
	struct vb2_dc_buf *buf = dbuf->priv;
	int ret;

	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
	if (!attach)
		return -ENOMEM;

	sgt = &attach->sgt;
	/* Copy the buf->base_sgt scatter list to the attachment, as we can't
	 * map the same scatter list to multiple attachments at the same time.
	 */
	ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
	if (ret) {
		kfree(attach);
		return -ENOMEM;
	}

	rd = buf->sgt_base->sgl;
	wr = sgt->sgl;
	for (i = 0; i < sgt->orig_nents; ++i) {
		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
		rd = sg_next(rd);
		wr = sg_next(wr);
	}

249
	attach->dma_dir = DMA_NONE;
250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
	dbuf_attach->priv = attach;

	return 0;
}

static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
	struct dma_buf_attachment *db_attach)
{
	struct vb2_dc_attachment *attach = db_attach->priv;
	struct sg_table *sgt;

	if (!attach)
		return;

	sgt = &attach->sgt;

	/* release the scatterlist cache */
267
	if (attach->dma_dir != DMA_NONE)
268
		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
269
			attach->dma_dir);
270 271 272 273 274 275
	sg_free_table(sgt);
	kfree(attach);
	db_attach->priv = NULL;
}

static struct sg_table *vb2_dc_dmabuf_ops_map(
276
	struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
277 278 279 280 281 282 283 284 285 286
{
	struct vb2_dc_attachment *attach = db_attach->priv;
	/* stealing dmabuf mutex to serialize map/unmap operations */
	struct mutex *lock = &db_attach->dmabuf->lock;
	struct sg_table *sgt;

	mutex_lock(lock);

	sgt = &attach->sgt;
	/* return previously mapped sg table */
287
	if (attach->dma_dir == dma_dir) {
288 289 290 291 292
		mutex_unlock(lock);
		return sgt;
	}

	/* release any previous cache */
293
	if (attach->dma_dir != DMA_NONE) {
294
		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
295 296
			attach->dma_dir);
		attach->dma_dir = DMA_NONE;
297 298 299
	}

	/* mapping to the client with new direction */
300 301 302
	sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
				dma_dir);
	if (!sgt->nents) {
303 304 305 306 307
		pr_err("failed to map scatterlist\n");
		mutex_unlock(lock);
		return ERR_PTR(-EIO);
	}

308
	attach->dma_dir = dma_dir;
309 310 311 312 313 314 315

	mutex_unlock(lock);

	return sgt;
}

static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
316
	struct sg_table *sgt, enum dma_data_direction dma_dir)
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
{
	/* nothing to be done here */
}

static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
{
	/* drop reference obtained in vb2_dc_get_dmabuf */
	vb2_dc_put(dbuf->priv);
}

static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
{
	struct vb2_dc_buf *buf = dbuf->priv;

	return buf->vaddr + pgnum * PAGE_SIZE;
}

static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
{
	struct vb2_dc_buf *buf = dbuf->priv;

	return buf->vaddr;
}

static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
	struct vm_area_struct *vma)
{
	return vb2_dc_mmap(dbuf->priv, vma);
}

static struct dma_buf_ops vb2_dc_dmabuf_ops = {
	.attach = vb2_dc_dmabuf_ops_attach,
	.detach = vb2_dc_dmabuf_ops_detach,
	.map_dma_buf = vb2_dc_dmabuf_ops_map,
	.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
	.kmap = vb2_dc_dmabuf_ops_kmap,
	.kmap_atomic = vb2_dc_dmabuf_ops_kmap,
	.vmap = vb2_dc_dmabuf_ops_vmap,
	.mmap = vb2_dc_dmabuf_ops_mmap,
	.release = vb2_dc_dmabuf_ops_release,
};

static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
{
	int ret;
	struct sg_table *sgt;

	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt) {
		dev_err(buf->dev, "failed to alloc sg table\n");
		return NULL;
	}

	ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
		buf->size);
	if (ret < 0) {
		dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
		kfree(sgt);
		return NULL;
	}

	return sgt;
}

381
static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
382 383 384
{
	struct vb2_dc_buf *buf = buf_priv;
	struct dma_buf *dbuf;
385 386 387 388 389 390
	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);

	exp_info.ops = &vb2_dc_dmabuf_ops;
	exp_info.size = buf->size;
	exp_info.flags = flags;
	exp_info.priv = buf;
391 392 393 394 395 396 397

	if (!buf->sgt_base)
		buf->sgt_base = vb2_dc_get_base_sgt(buf);

	if (WARN_ON(!buf->sgt_base))
		return NULL;

398
	dbuf = dma_buf_export(&exp_info);
399 400 401 402 403 404 405 406 407
	if (IS_ERR(dbuf))
		return NULL;

	/* dmabuf keeps reference to vb2 buffer */
	atomic_inc(&buf->refcount);

	return dbuf;
}

408 409 410 411
/*********************************************/
/*       callbacks for USERPTR buffers       */
/*********************************************/

412 413 414 415
static void vb2_dc_put_userptr(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;
	struct sg_table *sgt = buf->dma_sgt;
416 417
	int i;
	struct page **pages;
418

419
	if (sgt) {
420 421 422 423 424 425 426 427 428
		DEFINE_DMA_ATTRS(attrs);

		dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
		/*
		 * No need to sync to CPU, it's already synced to the CPU
		 * since the finish() memop will have been called before this.
		 */
		dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
				   buf->dma_dir, &attrs);
429 430 431 432 433
		pages = frame_vector_pages(buf->vec);
		/* sgt should exist only if vector contains pages... */
		BUG_ON(IS_ERR(pages));
		for (i = 0; i < frame_vector_count(buf->vec); i++)
			set_page_dirty_lock(pages[i]);
434 435 436
		sg_free_table(sgt);
		kfree(sgt);
	}
437
	vb2_destroy_framevec(buf->vec);
438 439 440
	kfree(buf);
}

441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
/*
 * For some kind of reserved memory there might be no struct page available,
 * so all that can be done to support such 'pages' is to try to convert
 * pfn to dma address or at the last resort just assume that
 * dma address == physical address (like it has been assumed in earlier version
 * of videobuf2-dma-contig
 */

#ifdef __arch_pfn_to_dma
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
{
	return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
}
#elif defined(__pfn_to_bus)
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
{
	return (dma_addr_t)__pfn_to_bus(pfn);
}
#elif defined(__pfn_to_phys)
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
{
	return (dma_addr_t)__pfn_to_phys(pfn);
}
#else
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
{
	/* really, we cannot do anything better at this point */
	return (dma_addr_t)(pfn) << PAGE_SHIFT;
}
#endif

472
static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
473
	unsigned long size, enum dma_data_direction dma_dir)
474
{
475
	struct vb2_dc_conf *conf = alloc_ctx;
476
	struct vb2_dc_buf *buf;
477
	struct frame_vector *vec;
478
	unsigned long offset;
479
	int n_pages, i;
480 481 482
	int ret = 0;
	struct sg_table *sgt;
	unsigned long contig_size;
483
	unsigned long dma_align = dma_get_cache_alignment();
484 485 486
	DEFINE_DMA_ATTRS(attrs);

	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
487 488 489 490 491 492 493 494 495 496 497

	/* Only cache aligned DMA transfers are reliable */
	if (!IS_ALIGNED(vaddr | size, dma_align)) {
		pr_debug("user data must be aligned to %lu bytes\n", dma_align);
		return ERR_PTR(-EINVAL);
	}

	if (!size) {
		pr_debug("size is zero\n");
		return ERR_PTR(-EINVAL);
	}
498 499 500 501 502

	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf)
		return ERR_PTR(-ENOMEM);

503
	buf->dev = conf->dev;
504
	buf->dma_dir = dma_dir;
505 506

	offset = vaddr & ~PAGE_MASK;
507 508 509
	vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
	if (IS_ERR(vec)) {
		ret = PTR_ERR(vec);
510 511
		goto fail_buf;
	}
512 513 514 515 516
	buf->vec = vec;
	n_pages = frame_vector_count(vec);
	ret = frame_vector_to_pages(vec);
	if (ret < 0) {
		unsigned long *nums = frame_vector_pfns(vec);
517

518 519 520 521 522 523 524 525 526
		/*
		 * Failed to convert to pages... Check the memory is physically
		 * contiguous and use direct mapping
		 */
		for (i = 1; i < n_pages; i++)
			if (nums[i-1] + 1 != nums[i])
				goto fail_pfnvec;
		buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
		goto out;
527 528 529 530 531 532
	}

	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt) {
		pr_err("failed to allocate sg table\n");
		ret = -ENOMEM;
533
		goto fail_pfnvec;
534 535
	}

536
	ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
537 538 539 540 541 542
		offset, size, GFP_KERNEL);
	if (ret) {
		pr_err("failed to initialize sg table\n");
		goto fail_sgt;
	}

543 544 545 546 547 548
	/*
	 * No need to sync to the device, this will happen later when the
	 * prepare() memop is called.
	 */
	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
				      buf->dma_dir, &attrs);
549 550 551 552 553 554 555 556 557 558 559 560
	if (sgt->nents <= 0) {
		pr_err("failed to map scatterlist\n");
		ret = -EIO;
		goto fail_sgt_init;
	}

	contig_size = vb2_dc_get_contiguous_size(sgt);
	if (contig_size < size) {
		pr_err("contiguous mapping is too small %lu/%lu\n",
			contig_size, size);
		ret = -EFAULT;
		goto fail_map_sg;
561 562
	}

563 564
	buf->dma_addr = sg_dma_address(sgt->sgl);
	buf->dma_sgt = sgt;
565 566
out:
	buf->size = size;
567 568 569

	return buf;

570
fail_map_sg:
571 572
	dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
			   buf->dma_dir, &attrs);
573

574 575 576 577 578
fail_sgt_init:
	sg_free_table(sgt);

fail_sgt:
	kfree(sgt);
579

580 581
fail_pfnvec:
	vb2_destroy_framevec(vec);
582 583

fail_buf:
584
	kfree(buf);
585 586

	return ERR_PTR(ret);
587 588
}

589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
/*********************************************/
/*       callbacks for DMABUF buffers        */
/*********************************************/

static int vb2_dc_map_dmabuf(void *mem_priv)
{
	struct vb2_dc_buf *buf = mem_priv;
	struct sg_table *sgt;
	unsigned long contig_size;

	if (WARN_ON(!buf->db_attach)) {
		pr_err("trying to pin a non attached buffer\n");
		return -EINVAL;
	}

	if (WARN_ON(buf->dma_sgt)) {
		pr_err("dmabuf buffer is already pinned\n");
		return 0;
	}

	/* get the associated scatterlist for this buffer */
	sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
611
	if (IS_ERR(sgt)) {
612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
		pr_err("Error getting dmabuf scatterlist\n");
		return -EINVAL;
	}

	/* checking if dmabuf is big enough to store contiguous chunk */
	contig_size = vb2_dc_get_contiguous_size(sgt);
	if (contig_size < buf->size) {
		pr_err("contiguous chunk is too small %lu/%lu b\n",
			contig_size, buf->size);
		dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
		return -EFAULT;
	}

	buf->dma_addr = sg_dma_address(sgt->sgl);
	buf->dma_sgt = sgt;
627
	buf->vaddr = NULL;
628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646

	return 0;
}

static void vb2_dc_unmap_dmabuf(void *mem_priv)
{
	struct vb2_dc_buf *buf = mem_priv;
	struct sg_table *sgt = buf->dma_sgt;

	if (WARN_ON(!buf->db_attach)) {
		pr_err("trying to unpin a not attached buffer\n");
		return;
	}

	if (WARN_ON(!sgt)) {
		pr_err("dmabuf buffer is already unpinned\n");
		return;
	}

647 648 649 650
	if (buf->vaddr) {
		dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
		buf->vaddr = NULL;
	}
651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
	dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);

	buf->dma_addr = 0;
	buf->dma_sgt = NULL;
}

static void vb2_dc_detach_dmabuf(void *mem_priv)
{
	struct vb2_dc_buf *buf = mem_priv;

	/* if vb2 works correctly you should never detach mapped buffer */
	if (WARN_ON(buf->dma_addr))
		vb2_dc_unmap_dmabuf(buf);

	/* detach this attachment */
	dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
	kfree(buf);
}

static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
671
	unsigned long size, enum dma_data_direction dma_dir)
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
{
	struct vb2_dc_conf *conf = alloc_ctx;
	struct vb2_dc_buf *buf;
	struct dma_buf_attachment *dba;

	if (dbuf->size < size)
		return ERR_PTR(-EFAULT);

	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
	if (!buf)
		return ERR_PTR(-ENOMEM);

	buf->dev = conf->dev;
	/* create attachment for the dmabuf with the user device */
	dba = dma_buf_attach(dbuf, buf->dev);
	if (IS_ERR(dba)) {
		pr_err("failed to attach dmabuf\n");
		kfree(buf);
		return dba;
	}

693
	buf->dma_dir = dma_dir;
694 695 696 697 698 699
	buf->size = size;
	buf->db_attach = dba;

	return buf;
}

700 701 702 703
/*********************************************/
/*       DMA CONTIG exported functions       */
/*********************************************/

704
const struct vb2_mem_ops vb2_dma_contig_memops = {
705 706
	.alloc		= vb2_dc_alloc,
	.put		= vb2_dc_put,
707
	.get_dmabuf	= vb2_dc_get_dmabuf,
708 709 710 711 712
	.cookie		= vb2_dc_cookie,
	.vaddr		= vb2_dc_vaddr,
	.mmap		= vb2_dc_mmap,
	.get_userptr	= vb2_dc_get_userptr,
	.put_userptr	= vb2_dc_put_userptr,
713 714
	.prepare	= vb2_dc_prepare,
	.finish		= vb2_dc_finish,
715 716 717 718
	.map_dmabuf	= vb2_dc_map_dmabuf,
	.unmap_dmabuf	= vb2_dc_unmap_dmabuf,
	.attach_dmabuf	= vb2_dc_attach_dmabuf,
	.detach_dmabuf	= vb2_dc_detach_dmabuf,
719
	.num_users	= vb2_dc_num_users,
720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
};
EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);

void *vb2_dma_contig_init_ctx(struct device *dev)
{
	struct vb2_dc_conf *conf;

	conf = kzalloc(sizeof *conf, GFP_KERNEL);
	if (!conf)
		return ERR_PTR(-ENOMEM);

	conf->dev = dev;

	return conf;
}
EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);

void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
{
739 740
	if (!IS_ERR_OR_NULL(alloc_ctx))
		kfree(alloc_ctx);
741 742 743 744
}
EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);

MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
745
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
746
MODULE_LICENSE("GPL");