videobuf2-dma-contig.c 20.3 KB
Newer Older
1 2 3 4 5
/*
 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
 *
 * Copyright (C) 2010 Samsung Electronics
 *
6
 * Author: Pawel Osciak <pawel@osciak.com>
7 8 9 10 11 12
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 */

13
#include <linux/dma-buf.h>
14
#include <linux/module.h>
15 16
#include <linux/scatterlist.h>
#include <linux/sched.h>
17 18 19 20
#include <linux/slab.h>
#include <linux/dma-mapping.h>

#include <media/videobuf2-core.h>
21
#include <media/videobuf2-dma-contig.h>
22 23 24 25 26 27 28
#include <media/videobuf2-memops.h>

struct vb2_dc_conf {
	struct device		*dev;
};

struct vb2_dc_buf {
29
	struct device			*dev;
30 31
	void				*vaddr;
	unsigned long			size;
32
	dma_addr_t			dma_addr;
33 34
	enum dma_data_direction		dma_dir;
	struct sg_table			*dma_sgt;
35 36

	/* MMAP related */
37
	struct vb2_vmarea_handler	handler;
38
	atomic_t			refcount;
39
	struct sg_table			*sgt_base;
40 41 42

	/* USERPTR related */
	struct vm_area_struct		*vma;
43 44 45

	/* DMABUF related */
	struct dma_buf_attachment	*db_attach;
46 47
};

48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
/*********************************************/
/*        scatterlist table functions        */
/*********************************************/


static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
	void (*cb)(struct page *pg))
{
	struct scatterlist *s;
	unsigned int i;

	for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
		struct page *page = sg_page(s);
		unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
			>> PAGE_SHIFT;
		unsigned int j;

		for (j = 0; j < n_pages; ++j, ++page)
			cb(page);
	}
}

static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
{
	struct scatterlist *s;
	dma_addr_t expected = sg_dma_address(sgt->sgl);
	unsigned int i;
	unsigned long size = 0;

	for_each_sg(sgt->sgl, s, sgt->nents, i) {
		if (sg_dma_address(s) != expected)
			break;
		expected = sg_dma_address(s) + sg_dma_len(s);
		size += sg_dma_len(s);
	}
	return size;
}

86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
/*********************************************/
/*         callbacks for all buffers         */
/*********************************************/

static void *vb2_dc_cookie(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;

	return &buf->dma_addr;
}

static void *vb2_dc_vaddr(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;

101 102 103
	if (!buf->vaddr && buf->db_attach)
		buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);

104 105 106 107 108 109 110 111 112 113
	return buf->vaddr;
}

static unsigned int vb2_dc_num_users(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;

	return atomic_read(&buf->refcount);
}

114 115 116 117 118
static void vb2_dc_prepare(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;
	struct sg_table *sgt = buf->dma_sgt;

119 120
	/* DMABUF exporter will flush the cache for us */
	if (!sgt || buf->db_attach)
121 122 123 124 125 126 127 128 129 130
		return;

	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
}

static void vb2_dc_finish(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;
	struct sg_table *sgt = buf->dma_sgt;

131 132
	/* DMABUF exporter will flush the cache for us */
	if (!sgt || buf->db_attach)
133 134 135 136 137
		return;

	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
}

138 139 140 141 142 143 144 145 146 147 148
/*********************************************/
/*        callbacks for MMAP buffers         */
/*********************************************/

static void vb2_dc_put(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;

	if (!atomic_dec_and_test(&buf->refcount))
		return;

149 150 151 152
	if (buf->sgt_base) {
		sg_free_table(buf->sgt_base);
		kfree(buf->sgt_base);
	}
153
	dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
154
	put_device(buf->dev);
155 156
	kfree(buf);
}
157

158 159
static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size,
			  enum dma_data_direction dma_dir, gfp_t gfp_flags)
160 161
{
	struct vb2_dc_conf *conf = alloc_ctx;
162
	struct device *dev = conf->dev;
163 164 165 166 167 168
	struct vb2_dc_buf *buf;

	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf)
		return ERR_PTR(-ENOMEM);

H
Hans Verkuil 已提交
169 170
	buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr,
						GFP_KERNEL | gfp_flags);
171
	if (!buf->vaddr) {
172
		dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
173 174 175 176
		kfree(buf);
		return ERR_PTR(-ENOMEM);
	}

177 178
	/* Prevent the device from being released while the buffer is used */
	buf->dev = get_device(dev);
179
	buf->size = size;
180
	buf->dma_dir = dma_dir;
181 182

	buf->handler.refcount = &buf->refcount;
183
	buf->handler.put = vb2_dc_put;
184 185 186 187 188 189 190
	buf->handler.arg = buf;

	atomic_inc(&buf->refcount);

	return buf;
}

191
static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
192 193
{
	struct vb2_dc_buf *buf = buf_priv;
194
	int ret;
195 196 197 198 199 200

	if (!buf) {
		printk(KERN_ERR "No buffer to map\n");
		return -EINVAL;
	}

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
	/*
	 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
	 * map whole buffer
	 */
	vma->vm_pgoff = 0;

	ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
		buf->dma_addr, buf->size);

	if (ret) {
		pr_err("Remapping memory failed, error: %d\n", ret);
		return ret;
	}

	vma->vm_flags		|= VM_DONTEXPAND | VM_DONTDUMP;
	vma->vm_private_data	= &buf->handler;
	vma->vm_ops		= &vb2_common_vm_ops;

	vma->vm_ops->open(vma);

	pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
		__func__, (unsigned long)buf->dma_addr, vma->vm_start,
		buf->size);

	return 0;
226 227
}

228 229 230 231 232 233
/*********************************************/
/*         DMABUF ops for exporters          */
/*********************************************/

struct vb2_dc_attachment {
	struct sg_table sgt;
234
	enum dma_data_direction dma_dir;
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
};

static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
	struct dma_buf_attachment *dbuf_attach)
{
	struct vb2_dc_attachment *attach;
	unsigned int i;
	struct scatterlist *rd, *wr;
	struct sg_table *sgt;
	struct vb2_dc_buf *buf = dbuf->priv;
	int ret;

	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
	if (!attach)
		return -ENOMEM;

	sgt = &attach->sgt;
	/* Copy the buf->base_sgt scatter list to the attachment, as we can't
	 * map the same scatter list to multiple attachments at the same time.
	 */
	ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
	if (ret) {
		kfree(attach);
		return -ENOMEM;
	}

	rd = buf->sgt_base->sgl;
	wr = sgt->sgl;
	for (i = 0; i < sgt->orig_nents; ++i) {
		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
		rd = sg_next(rd);
		wr = sg_next(wr);
	}

269
	attach->dma_dir = DMA_NONE;
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
	dbuf_attach->priv = attach;

	return 0;
}

static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
	struct dma_buf_attachment *db_attach)
{
	struct vb2_dc_attachment *attach = db_attach->priv;
	struct sg_table *sgt;

	if (!attach)
		return;

	sgt = &attach->sgt;

	/* release the scatterlist cache */
287
	if (attach->dma_dir != DMA_NONE)
288
		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
289
			attach->dma_dir);
290 291 292 293 294 295
	sg_free_table(sgt);
	kfree(attach);
	db_attach->priv = NULL;
}

static struct sg_table *vb2_dc_dmabuf_ops_map(
296
	struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
297 298 299 300 301 302 303 304 305 306 307
{
	struct vb2_dc_attachment *attach = db_attach->priv;
	/* stealing dmabuf mutex to serialize map/unmap operations */
	struct mutex *lock = &db_attach->dmabuf->lock;
	struct sg_table *sgt;
	int ret;

	mutex_lock(lock);

	sgt = &attach->sgt;
	/* return previously mapped sg table */
308
	if (attach->dma_dir == dma_dir) {
309 310 311 312 313
		mutex_unlock(lock);
		return sgt;
	}

	/* release any previous cache */
314
	if (attach->dma_dir != DMA_NONE) {
315
		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
316 317
			attach->dma_dir);
		attach->dma_dir = DMA_NONE;
318 319 320
	}

	/* mapping to the client with new direction */
321
	ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir);
322 323 324 325 326 327
	if (ret <= 0) {
		pr_err("failed to map scatterlist\n");
		mutex_unlock(lock);
		return ERR_PTR(-EIO);
	}

328
	attach->dma_dir = dma_dir;
329 330 331 332 333 334 335

	mutex_unlock(lock);

	return sgt;
}

static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
336
	struct sg_table *sgt, enum dma_data_direction dma_dir)
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
{
	/* nothing to be done here */
}

static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
{
	/* drop reference obtained in vb2_dc_get_dmabuf */
	vb2_dc_put(dbuf->priv);
}

static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
{
	struct vb2_dc_buf *buf = dbuf->priv;

	return buf->vaddr + pgnum * PAGE_SIZE;
}

static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
{
	struct vb2_dc_buf *buf = dbuf->priv;

	return buf->vaddr;
}

static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
	struct vm_area_struct *vma)
{
	return vb2_dc_mmap(dbuf->priv, vma);
}

static struct dma_buf_ops vb2_dc_dmabuf_ops = {
	.attach = vb2_dc_dmabuf_ops_attach,
	.detach = vb2_dc_dmabuf_ops_detach,
	.map_dma_buf = vb2_dc_dmabuf_ops_map,
	.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
	.kmap = vb2_dc_dmabuf_ops_kmap,
	.kmap_atomic = vb2_dc_dmabuf_ops_kmap,
	.vmap = vb2_dc_dmabuf_ops_vmap,
	.mmap = vb2_dc_dmabuf_ops_mmap,
	.release = vb2_dc_dmabuf_ops_release,
};

static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
{
	int ret;
	struct sg_table *sgt;

	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt) {
		dev_err(buf->dev, "failed to alloc sg table\n");
		return NULL;
	}

	ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
		buf->size);
	if (ret < 0) {
		dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
		kfree(sgt);
		return NULL;
	}

	return sgt;
}

401
static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
402 403 404
{
	struct vb2_dc_buf *buf = buf_priv;
	struct dma_buf *dbuf;
405 406 407 408 409 410
	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);

	exp_info.ops = &vb2_dc_dmabuf_ops;
	exp_info.size = buf->size;
	exp_info.flags = flags;
	exp_info.priv = buf;
411 412 413 414 415 416 417

	if (!buf->sgt_base)
		buf->sgt_base = vb2_dc_get_base_sgt(buf);

	if (WARN_ON(!buf->sgt_base))
		return NULL;

418
	dbuf = dma_buf_export(&exp_info);
419 420 421 422 423 424 425 426 427
	if (IS_ERR(dbuf))
		return NULL;

	/* dmabuf keeps reference to vb2 buffer */
	atomic_inc(&buf->refcount);

	return dbuf;
}

428 429 430 431
/*********************************************/
/*       callbacks for USERPTR buffers       */
/*********************************************/

432 433 434 435 436
static inline int vma_is_io(struct vm_area_struct *vma)
{
	return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
}

437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
	struct vm_area_struct *vma, unsigned long *res)
{
	unsigned long pfn, start_pfn, prev_pfn;
	unsigned int i;
	int ret;

	if (!vma_is_io(vma))
		return -EFAULT;

	ret = follow_pfn(vma, start, &pfn);
	if (ret)
		return ret;

	start_pfn = pfn;
	start += PAGE_SIZE;

	for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
		prev_pfn = pfn;
		ret = follow_pfn(vma, start, &pfn);

		if (ret) {
			pr_err("no page for address %lu\n", start);
			return ret;
		}
		if (pfn != prev_pfn + 1)
			return -EINVAL;
	}

	*res = start_pfn;
	return 0;
}

470
static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
471 472
	int n_pages, struct vm_area_struct *vma,
	enum dma_data_direction dma_dir)
473 474 475 476 477 478 479 480
{
	if (vma_is_io(vma)) {
		unsigned int i;

		for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
			unsigned long pfn;
			int ret = follow_pfn(vma, start, &pfn);

481 482 483
			if (!pfn_valid(pfn))
				return -EINVAL;

484 485 486 487 488 489 490 491 492 493
			if (ret) {
				pr_err("no page for address %lu\n", start);
				return ret;
			}
			pages[i] = pfn_to_page(pfn);
		}
	} else {
		int n;

		n = get_user_pages(current, current->mm, start & PAGE_MASK,
494
			n_pages, dma_dir == DMA_FROM_DEVICE, 1, pages, NULL);
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
		/* negative error means that no page was pinned */
		n = max(n, 0);
		if (n != n_pages) {
			pr_err("got only %d of %d user pages\n", n, n_pages);
			while (n)
				put_page(pages[--n]);
			return -EFAULT;
		}
	}

	return 0;
}

static void vb2_dc_put_dirty_page(struct page *page)
{
	set_page_dirty_lock(page);
	put_page(page);
}

static void vb2_dc_put_userptr(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;
	struct sg_table *sgt = buf->dma_sgt;

519
	if (sgt) {
520 521 522 523 524 525 526 527 528
		DEFINE_DMA_ATTRS(attrs);

		dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
		/*
		 * No need to sync to CPU, it's already synced to the CPU
		 * since the finish() memop will have been called before this.
		 */
		dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
				   buf->dma_dir, &attrs);
529 530
		if (!vma_is_io(buf->vma))
			vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
531

532 533 534
		sg_free_table(sgt);
		kfree(sgt);
	}
535
	down_read(&current->mm->mmap_sem);
536
	vb2_put_vma(buf->vma);
537
	up_read(&current->mm->mmap_sem);
538 539 540
	kfree(buf);
}

541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
/*
 * For some kind of reserved memory there might be no struct page available,
 * so all that can be done to support such 'pages' is to try to convert
 * pfn to dma address or at the last resort just assume that
 * dma address == physical address (like it has been assumed in earlier version
 * of videobuf2-dma-contig
 */

#ifdef __arch_pfn_to_dma
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
{
	return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
}
#elif defined(__pfn_to_bus)
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
{
	return (dma_addr_t)__pfn_to_bus(pfn);
}
#elif defined(__pfn_to_phys)
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
{
	return (dma_addr_t)__pfn_to_phys(pfn);
}
#else
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
{
	/* really, we cannot do anything better at this point */
	return (dma_addr_t)(pfn) << PAGE_SHIFT;
}
#endif

572
static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
573
	unsigned long size, enum dma_data_direction dma_dir)
574
{
575
	struct vb2_dc_conf *conf = alloc_ctx;
576
	struct vb2_dc_buf *buf;
577 578 579 580 581 582
	unsigned long start;
	unsigned long end;
	unsigned long offset;
	struct page **pages;
	int n_pages;
	int ret = 0;
583
	struct vm_area_struct *vma;
584 585
	struct sg_table *sgt;
	unsigned long contig_size;
586
	unsigned long dma_align = dma_get_cache_alignment();
587 588 589
	DEFINE_DMA_ATTRS(attrs);

	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
590 591 592 593 594 595 596 597 598 599 600

	/* Only cache aligned DMA transfers are reliable */
	if (!IS_ALIGNED(vaddr | size, dma_align)) {
		pr_debug("user data must be aligned to %lu bytes\n", dma_align);
		return ERR_PTR(-EINVAL);
	}

	if (!size) {
		pr_debug("size is zero\n");
		return ERR_PTR(-EINVAL);
	}
601 602 603 604 605

	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf)
		return ERR_PTR(-ENOMEM);

606
	buf->dev = conf->dev;
607
	buf->dma_dir = dma_dir;
608 609 610 611 612 613 614 615 616 617 618 619 620

	start = vaddr & PAGE_MASK;
	offset = vaddr & ~PAGE_MASK;
	end = PAGE_ALIGN(vaddr + size);
	n_pages = (end - start) >> PAGE_SHIFT;

	pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
	if (!pages) {
		ret = -ENOMEM;
		pr_err("failed to allocate pages table\n");
		goto fail_buf;
	}

621
	down_read(&current->mm->mmap_sem);
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
	/* current->mm->mmap_sem is taken by videobuf2 core */
	vma = find_vma(current->mm, vaddr);
	if (!vma) {
		pr_err("no vma for address %lu\n", vaddr);
		ret = -EFAULT;
		goto fail_pages;
	}

	if (vma->vm_end < vaddr + size) {
		pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
		ret = -EFAULT;
		goto fail_pages;
	}

	buf->vma = vb2_get_vma(vma);
	if (!buf->vma) {
		pr_err("failed to copy vma\n");
		ret = -ENOMEM;
		goto fail_pages;
	}

	/* extract page list from userspace mapping */
644
	ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, dma_dir);
645
	if (ret) {
646 647
		unsigned long pfn;
		if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
648
			up_read(&current->mm->mmap_sem);
649 650 651 652 653 654
			buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
			buf->size = size;
			kfree(pages);
			return buf;
		}

655 656 657
		pr_err("failed to get user pages\n");
		goto fail_vma;
	}
658
	up_read(&current->mm->mmap_sem);
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677

	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt) {
		pr_err("failed to allocate sg table\n");
		ret = -ENOMEM;
		goto fail_get_user_pages;
	}

	ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
		offset, size, GFP_KERNEL);
	if (ret) {
		pr_err("failed to initialize sg table\n");
		goto fail_sgt;
	}

	/* pages are no longer needed */
	kfree(pages);
	pages = NULL;

678 679 680 681 682 683
	/*
	 * No need to sync to the device, this will happen later when the
	 * prepare() memop is called.
	 */
	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
				      buf->dma_dir, &attrs);
684 685 686 687 688 689 690 691 692 693 694 695
	if (sgt->nents <= 0) {
		pr_err("failed to map scatterlist\n");
		ret = -EIO;
		goto fail_sgt_init;
	}

	contig_size = vb2_dc_get_contiguous_size(sgt);
	if (contig_size < size) {
		pr_err("contiguous mapping is too small %lu/%lu\n",
			contig_size, size);
		ret = -EFAULT;
		goto fail_map_sg;
696 697
	}

698
	buf->dma_addr = sg_dma_address(sgt->sgl);
699
	buf->size = size;
700
	buf->dma_sgt = sgt;
701 702 703

	return buf;

704
fail_map_sg:
705 706
	dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
			   buf->dma_dir, &attrs);
707

708 709 710 711 712 713 714
fail_sgt_init:
	if (!vma_is_io(buf->vma))
		vb2_dc_sgt_foreach_page(sgt, put_page);
	sg_free_table(sgt);

fail_sgt:
	kfree(sgt);
715

716 717 718 719 720
fail_get_user_pages:
	if (pages && !vma_is_io(buf->vma))
		while (n_pages)
			put_page(pages[--n_pages]);

721
	down_read(&current->mm->mmap_sem);
722
fail_vma:
723
	vb2_put_vma(buf->vma);
724 725

fail_pages:
726
	up_read(&current->mm->mmap_sem);
727 728 729
	kfree(pages); /* kfree is NULL-proof */

fail_buf:
730
	kfree(buf);
731 732

	return ERR_PTR(ret);
733 734
}

735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
/*********************************************/
/*       callbacks for DMABUF buffers        */
/*********************************************/

static int vb2_dc_map_dmabuf(void *mem_priv)
{
	struct vb2_dc_buf *buf = mem_priv;
	struct sg_table *sgt;
	unsigned long contig_size;

	if (WARN_ON(!buf->db_attach)) {
		pr_err("trying to pin a non attached buffer\n");
		return -EINVAL;
	}

	if (WARN_ON(buf->dma_sgt)) {
		pr_err("dmabuf buffer is already pinned\n");
		return 0;
	}

	/* get the associated scatterlist for this buffer */
	sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
757
	if (IS_ERR(sgt)) {
758 759 760 761 762 763 764 765 766 767 768 769 770 771 772
		pr_err("Error getting dmabuf scatterlist\n");
		return -EINVAL;
	}

	/* checking if dmabuf is big enough to store contiguous chunk */
	contig_size = vb2_dc_get_contiguous_size(sgt);
	if (contig_size < buf->size) {
		pr_err("contiguous chunk is too small %lu/%lu b\n",
			contig_size, buf->size);
		dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
		return -EFAULT;
	}

	buf->dma_addr = sg_dma_address(sgt->sgl);
	buf->dma_sgt = sgt;
773
	buf->vaddr = NULL;
774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792

	return 0;
}

static void vb2_dc_unmap_dmabuf(void *mem_priv)
{
	struct vb2_dc_buf *buf = mem_priv;
	struct sg_table *sgt = buf->dma_sgt;

	if (WARN_ON(!buf->db_attach)) {
		pr_err("trying to unpin a not attached buffer\n");
		return;
	}

	if (WARN_ON(!sgt)) {
		pr_err("dmabuf buffer is already unpinned\n");
		return;
	}

793 794 795 796
	if (buf->vaddr) {
		dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
		buf->vaddr = NULL;
	}
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816
	dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);

	buf->dma_addr = 0;
	buf->dma_sgt = NULL;
}

static void vb2_dc_detach_dmabuf(void *mem_priv)
{
	struct vb2_dc_buf *buf = mem_priv;

	/* if vb2 works correctly you should never detach mapped buffer */
	if (WARN_ON(buf->dma_addr))
		vb2_dc_unmap_dmabuf(buf);

	/* detach this attachment */
	dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
	kfree(buf);
}

static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
817
	unsigned long size, enum dma_data_direction dma_dir)
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838
{
	struct vb2_dc_conf *conf = alloc_ctx;
	struct vb2_dc_buf *buf;
	struct dma_buf_attachment *dba;

	if (dbuf->size < size)
		return ERR_PTR(-EFAULT);

	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
	if (!buf)
		return ERR_PTR(-ENOMEM);

	buf->dev = conf->dev;
	/* create attachment for the dmabuf with the user device */
	dba = dma_buf_attach(dbuf, buf->dev);
	if (IS_ERR(dba)) {
		pr_err("failed to attach dmabuf\n");
		kfree(buf);
		return dba;
	}

839
	buf->dma_dir = dma_dir;
840 841 842 843 844 845
	buf->size = size;
	buf->db_attach = dba;

	return buf;
}

846 847 848 849
/*********************************************/
/*       DMA CONTIG exported functions       */
/*********************************************/

850
const struct vb2_mem_ops vb2_dma_contig_memops = {
851 852
	.alloc		= vb2_dc_alloc,
	.put		= vb2_dc_put,
853
	.get_dmabuf	= vb2_dc_get_dmabuf,
854 855 856 857 858
	.cookie		= vb2_dc_cookie,
	.vaddr		= vb2_dc_vaddr,
	.mmap		= vb2_dc_mmap,
	.get_userptr	= vb2_dc_get_userptr,
	.put_userptr	= vb2_dc_put_userptr,
859 860
	.prepare	= vb2_dc_prepare,
	.finish		= vb2_dc_finish,
861 862 863 864
	.map_dmabuf	= vb2_dc_map_dmabuf,
	.unmap_dmabuf	= vb2_dc_unmap_dmabuf,
	.attach_dmabuf	= vb2_dc_attach_dmabuf,
	.detach_dmabuf	= vb2_dc_detach_dmabuf,
865
	.num_users	= vb2_dc_num_users,
866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884
};
EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);

void *vb2_dma_contig_init_ctx(struct device *dev)
{
	struct vb2_dc_conf *conf;

	conf = kzalloc(sizeof *conf, GFP_KERNEL);
	if (!conf)
		return ERR_PTR(-ENOMEM);

	conf->dev = dev;

	return conf;
}
EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);

void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
{
885 886
	if (!IS_ERR_OR_NULL(alloc_ctx))
		kfree(alloc_ctx);
887 888 889 890
}
EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);

MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
891
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
892
MODULE_LICENSE("GPL");