videobuf2-dma-sg.c 15.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
 *
 * Copyright (C) 2010 Samsung Electronics
 *
 * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 */

#include <linux/module.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>

20
#include <media/videobuf2-v4l2.h>
21 22 23
#include <media/videobuf2-memops.h>
#include <media/videobuf2-dma-sg.h>

24 25 26 27 28 29 30 31 32
static int debug;
module_param(debug, int, 0644);

#define dprintk(level, fmt, arg...)					\
	do {								\
		if (debug >= level)					\
			printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg);	\
	} while (0)

33
struct vb2_dma_sg_buf {
34
	struct device			*dev;
35 36
	void				*vaddr;
	struct page			**pages;
37
	struct frame_vector		*vec;
38
	int				offset;
39
	enum dma_data_direction		dma_dir;
40
	struct sg_table			sg_table;
41 42 43 44 45 46
	/*
	 * This will point to sg_table when used with the MMAP or USERPTR
	 * memory model, and to the dma_buf sglist when used with the
	 * DMABUF memory model.
	 */
	struct sg_table			*dma_sgt;
47 48
	size_t				size;
	unsigned int			num_pages;
49 50
	atomic_t			refcount;
	struct vb2_vmarea_handler	handler;
51 52

	struct dma_buf_attachment	*db_attach;
53 54 55 56
};

static void vb2_dma_sg_put(void *buf_priv);

57 58 59 60
static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
		gfp_t gfp_flags)
{
	unsigned int last_page = 0;
61
	int size = buf->size;
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88

	while (size > 0) {
		struct page *pages;
		int order;
		int i;

		order = get_order(size);
		/* Dont over allocate*/
		if ((PAGE_SIZE << order) > size)
			order--;

		pages = NULL;
		while (!pages) {
			pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
					__GFP_NOWARN | gfp_flags, order);
			if (pages)
				break;

			if (order == 0) {
				while (last_page--)
					__free_page(buf->pages[last_page]);
				return -ENOMEM;
			}
			order--;
		}

		split_page(pages, order);
89 90
		for (i = 0; i < (1 << order); i++)
			buf->pages[last_page++] = &pages[i];
91 92 93 94 95 96 97

		size -= PAGE_SIZE << order;
	}

	return 0;
}

98
static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
99 100
			      unsigned long size, enum dma_data_direction dma_dir,
			      gfp_t gfp_flags)
101 102
{
	struct vb2_dma_sg_buf *buf;
103
	struct sg_table *sgt;
104
	int ret;
105
	int num_pages;
106

107 108 109
	if (WARN_ON(!dev))
		return ERR_PTR(-EINVAL);

110 111
	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf)
112
		return ERR_PTR(-ENOMEM);
113 114

	buf->vaddr = NULL;
115
	buf->dma_dir = dma_dir;
116
	buf->offset = 0;
117
	buf->size = size;
118
	/* size is already page aligned */
119
	buf->num_pages = size >> PAGE_SHIFT;
120
	buf->dma_sgt = &buf->sg_table;
121

122
	buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
123 124 125 126
			     GFP_KERNEL);
	if (!buf->pages)
		goto fail_pages_array_alloc;

127 128 129
	ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
	if (ret)
		goto fail_pages_alloc;
130

131
	ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
132
			buf->num_pages, 0, size, GFP_KERNEL);
133 134 135
	if (ret)
		goto fail_table_alloc;

136
	/* Prevent the device from being released while the buffer is used */
137
	buf->dev = get_device(dev);
138 139

	sgt = &buf->sg_table;
140 141 142 143
	/*
	 * No need to sync to the device, this will happen later when the
	 * prepare() memop is called.
	 */
144
	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
145
				      buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
146
	if (!sgt->nents)
147 148
		goto fail_map;

149 150 151 152 153 154
	buf->handler.refcount = &buf->refcount;
	buf->handler.put = vb2_dma_sg_put;
	buf->handler.arg = buf;

	atomic_inc(&buf->refcount);

155
	dprintk(1, "%s: Allocated buffer of %d pages\n",
156
		__func__, buf->num_pages);
157 158
	return buf;

159 160
fail_map:
	put_device(buf->dev);
161
	sg_free_table(buf->dma_sgt);
162 163 164 165
fail_table_alloc:
	num_pages = buf->num_pages;
	while (num_pages--)
		__free_page(buf->pages[num_pages]);
166
fail_pages_alloc:
167
	kfree(buf->pages);
168 169
fail_pages_array_alloc:
	kfree(buf);
170
	return ERR_PTR(-ENOMEM);
171 172 173 174 175
}

static void vb2_dma_sg_put(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
176
	struct sg_table *sgt = &buf->sg_table;
177
	int i = buf->num_pages;
178 179

	if (atomic_dec_and_test(&buf->refcount)) {
180
		dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
181
			buf->num_pages);
182
		dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
183
				   buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
184
		if (buf->vaddr)
185
			vm_unmap_ram(buf->vaddr, buf->num_pages);
186
		sg_free_table(buf->dma_sgt);
187 188 189
		while (--i >= 0)
			__free_page(buf->pages[i]);
		kfree(buf->pages);
190
		put_device(buf->dev);
191 192 193 194
		kfree(buf);
	}
}

195 196 197
static void vb2_dma_sg_prepare(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
198 199 200 201 202
	struct sg_table *sgt = buf->dma_sgt;

	/* DMABUF exporter will flush the cache for us */
	if (buf->db_attach)
		return;
203

204 205
	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
			       buf->dma_dir);
206 207 208 209 210
}

static void vb2_dma_sg_finish(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
211 212 213 214 215
	struct sg_table *sgt = buf->dma_sgt;

	/* DMABUF exporter will flush the cache for us */
	if (buf->db_attach)
		return;
216

217
	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
218 219
}

220
static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
221 222
				    unsigned long size,
				    enum dma_data_direction dma_dir)
223 224
{
	struct vb2_dma_sg_buf *buf;
225
	struct sg_table *sgt;
226
	struct frame_vector *vec;
227

228 229
	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf)
230
		return ERR_PTR(-ENOMEM);
231 232

	buf->vaddr = NULL;
233
	buf->dev = dev;
234
	buf->dma_dir = dma_dir;
235
	buf->offset = vaddr & ~PAGE_MASK;
236
	buf->size = size;
237
	buf->dma_sgt = &buf->sg_table;
238 239 240 241
	vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE);
	if (IS_ERR(vec))
		goto userptr_fail_pfnvec;
	buf->vec = vec;
242

243 244 245 246
	buf->pages = frame_vector_pages(vec);
	if (IS_ERR(buf->pages))
		goto userptr_fail_sgtable;
	buf->num_pages = frame_vector_count(vec);
247

248
	if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
249
			buf->num_pages, buf->offset, size, 0))
250
		goto userptr_fail_sgtable;
251

252
	sgt = &buf->sg_table;
253 254 255 256
	/*
	 * No need to sync to the device, this will happen later when the
	 * prepare() memop is called.
	 */
257
	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
258
				      buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
259
	if (!sgt->nents)
260
		goto userptr_fail_map;
261

262 263
	return buf;

264 265
userptr_fail_map:
	sg_free_table(&buf->sg_table);
266 267 268
userptr_fail_sgtable:
	vb2_destroy_framevec(vec);
userptr_fail_pfnvec:
269
	kfree(buf);
270
	return ERR_PTR(-ENOMEM);
271 272 273 274 275 276 277 278 279
}

/*
 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
 *		 be used
 */
static void vb2_dma_sg_put_userptr(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
280
	struct sg_table *sgt = &buf->sg_table;
281
	int i = buf->num_pages;
282

283
	dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
284
	       __func__, buf->num_pages);
285
	dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
286
			   DMA_ATTR_SKIP_CPU_SYNC);
287
	if (buf->vaddr)
288
		vm_unmap_ram(buf->vaddr, buf->num_pages);
289
	sg_free_table(buf->dma_sgt);
290
	while (--i >= 0) {
291
		if (buf->dma_dir == DMA_FROM_DEVICE)
292 293
			set_page_dirty_lock(buf->pages[i]);
	}
294
	vb2_destroy_framevec(buf->vec);
295 296 297 298 299 300 301 302 303
	kfree(buf);
}

static void *vb2_dma_sg_vaddr(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;

	BUG_ON(!buf);

304 305 306 307 308 309 310
	if (!buf->vaddr) {
		if (buf->db_attach)
			buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
		else
			buf->vaddr = vm_map_ram(buf->pages,
					buf->num_pages, -1, PAGE_KERNEL);
	}
311 312

	/* add offset in case userptr is not page-aligned */
313
	return buf->vaddr ? buf->vaddr + buf->offset : NULL;
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
}

static unsigned int vb2_dma_sg_num_users(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;

	return atomic_read(&buf->refcount);
}

static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
	unsigned long uaddr = vma->vm_start;
	unsigned long usize = vma->vm_end - vma->vm_start;
	int i = 0;

	if (!buf) {
		printk(KERN_ERR "No memory to map\n");
		return -EINVAL;
	}

	do {
		int ret;

		ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
		if (ret) {
			printk(KERN_ERR "Remapping memory, error: %d\n", ret);
			return ret;
		}

		uaddr += PAGE_SIZE;
		usize -= PAGE_SIZE;
	} while (usize > 0);


	/*
	 * Use common vm_area operations to track buffer refcount.
	 */
	vma->vm_private_data	= &buf->handler;
	vma->vm_ops		= &vb2_common_vm_ops;

	vma->vm_ops->open(vma);

	return 0;
}

360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
/*********************************************/
/*         DMABUF ops for exporters          */
/*********************************************/

struct vb2_dma_sg_attachment {
	struct sg_table sgt;
	enum dma_data_direction dma_dir;
};

static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
	struct dma_buf_attachment *dbuf_attach)
{
	struct vb2_dma_sg_attachment *attach;
	unsigned int i;
	struct scatterlist *rd, *wr;
	struct sg_table *sgt;
	struct vb2_dma_sg_buf *buf = dbuf->priv;
	int ret;

	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
	if (!attach)
		return -ENOMEM;

	sgt = &attach->sgt;
	/* Copy the buf->base_sgt scatter list to the attachment, as we can't
	 * map the same scatter list to multiple attachments at the same time.
	 */
	ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
	if (ret) {
		kfree(attach);
		return -ENOMEM;
	}

	rd = buf->dma_sgt->sgl;
	wr = sgt->sgl;
	for (i = 0; i < sgt->orig_nents; ++i) {
		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
		rd = sg_next(rd);
		wr = sg_next(wr);
	}

	attach->dma_dir = DMA_NONE;
	dbuf_attach->priv = attach;

	return 0;
}

static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
	struct dma_buf_attachment *db_attach)
{
	struct vb2_dma_sg_attachment *attach = db_attach->priv;
	struct sg_table *sgt;

	if (!attach)
		return;

	sgt = &attach->sgt;

	/* release the scatterlist cache */
	if (attach->dma_dir != DMA_NONE)
		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
			attach->dma_dir);
	sg_free_table(sgt);
	kfree(attach);
	db_attach->priv = NULL;
}

static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
	struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
{
	struct vb2_dma_sg_attachment *attach = db_attach->priv;
	/* stealing dmabuf mutex to serialize map/unmap operations */
	struct mutex *lock = &db_attach->dmabuf->lock;
	struct sg_table *sgt;

	mutex_lock(lock);

	sgt = &attach->sgt;
	/* return previously mapped sg table */
	if (attach->dma_dir == dma_dir) {
		mutex_unlock(lock);
		return sgt;
	}

	/* release any previous cache */
	if (attach->dma_dir != DMA_NONE) {
		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
			attach->dma_dir);
		attach->dma_dir = DMA_NONE;
	}

	/* mapping to the client with new direction */
452 453 454
	sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
				dma_dir);
	if (!sgt->nents) {
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
		pr_err("failed to map scatterlist\n");
		mutex_unlock(lock);
		return ERR_PTR(-EIO);
	}

	attach->dma_dir = dma_dir;

	mutex_unlock(lock);

	return sgt;
}

static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
	struct sg_table *sgt, enum dma_data_direction dma_dir)
{
	/* nothing to be done here */
}

static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
{
	/* drop reference obtained in vb2_dma_sg_get_dmabuf */
	vb2_dma_sg_put(dbuf->priv);
}

static void *vb2_dma_sg_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
{
	struct vb2_dma_sg_buf *buf = dbuf->priv;

	return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
}

static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
{
	struct vb2_dma_sg_buf *buf = dbuf->priv;

	return vb2_dma_sg_vaddr(buf);
}

static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
	struct vm_area_struct *vma)
{
	return vb2_dma_sg_mmap(dbuf->priv, vma);
}

static struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
	.attach = vb2_dma_sg_dmabuf_ops_attach,
	.detach = vb2_dma_sg_dmabuf_ops_detach,
	.map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
	.unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
	.kmap = vb2_dma_sg_dmabuf_ops_kmap,
	.kmap_atomic = vb2_dma_sg_dmabuf_ops_kmap,
	.vmap = vb2_dma_sg_dmabuf_ops_vmap,
	.mmap = vb2_dma_sg_dmabuf_ops_mmap,
	.release = vb2_dma_sg_dmabuf_ops_release,
};

static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
	struct dma_buf *dbuf;
515 516 517 518 519 520
	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);

	exp_info.ops = &vb2_dma_sg_dmabuf_ops;
	exp_info.size = buf->size;
	exp_info.flags = flags;
	exp_info.priv = buf;
521 522 523 524

	if (WARN_ON(!buf->dma_sgt))
		return NULL;

525
	dbuf = dma_buf_export(&exp_info);
526 527 528 529 530 531 532 533 534
	if (IS_ERR(dbuf))
		return NULL;

	/* dmabuf keeps reference to vb2 buffer */
	atomic_inc(&buf->refcount);

	return dbuf;
}

535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603
/*********************************************/
/*       callbacks for DMABUF buffers        */
/*********************************************/

static int vb2_dma_sg_map_dmabuf(void *mem_priv)
{
	struct vb2_dma_sg_buf *buf = mem_priv;
	struct sg_table *sgt;

	if (WARN_ON(!buf->db_attach)) {
		pr_err("trying to pin a non attached buffer\n");
		return -EINVAL;
	}

	if (WARN_ON(buf->dma_sgt)) {
		pr_err("dmabuf buffer is already pinned\n");
		return 0;
	}

	/* get the associated scatterlist for this buffer */
	sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
	if (IS_ERR(sgt)) {
		pr_err("Error getting dmabuf scatterlist\n");
		return -EINVAL;
	}

	buf->dma_sgt = sgt;
	buf->vaddr = NULL;

	return 0;
}

static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
{
	struct vb2_dma_sg_buf *buf = mem_priv;
	struct sg_table *sgt = buf->dma_sgt;

	if (WARN_ON(!buf->db_attach)) {
		pr_err("trying to unpin a not attached buffer\n");
		return;
	}

	if (WARN_ON(!sgt)) {
		pr_err("dmabuf buffer is already unpinned\n");
		return;
	}

	if (buf->vaddr) {
		dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
		buf->vaddr = NULL;
	}
	dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);

	buf->dma_sgt = NULL;
}

static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
{
	struct vb2_dma_sg_buf *buf = mem_priv;

	/* if vb2 works correctly you should never detach mapped buffer */
	if (WARN_ON(buf->dma_sgt))
		vb2_dma_sg_unmap_dmabuf(buf);

	/* detach this attachment */
	dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
	kfree(buf);
}

604
static void *vb2_dma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
605 606 607 608 609 610 611 612 613 614 615 616
	unsigned long size, enum dma_data_direction dma_dir)
{
	struct vb2_dma_sg_buf *buf;
	struct dma_buf_attachment *dba;

	if (dbuf->size < size)
		return ERR_PTR(-EFAULT);

	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
	if (!buf)
		return ERR_PTR(-ENOMEM);

617
	buf->dev = dev;
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632
	/* create attachment for the dmabuf with the user device */
	dba = dma_buf_attach(dbuf, buf->dev);
	if (IS_ERR(dba)) {
		pr_err("failed to attach dmabuf\n");
		kfree(buf);
		return dba;
	}

	buf->dma_dir = dma_dir;
	buf->size = size;
	buf->db_attach = dba;

	return buf;
}

633 634 635 636
static void *vb2_dma_sg_cookie(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;

637
	return buf->dma_sgt;
638 639 640 641 642 643 644
}

const struct vb2_mem_ops vb2_dma_sg_memops = {
	.alloc		= vb2_dma_sg_alloc,
	.put		= vb2_dma_sg_put,
	.get_userptr	= vb2_dma_sg_get_userptr,
	.put_userptr	= vb2_dma_sg_put_userptr,
645 646
	.prepare	= vb2_dma_sg_prepare,
	.finish		= vb2_dma_sg_finish,
647 648 649
	.vaddr		= vb2_dma_sg_vaddr,
	.mmap		= vb2_dma_sg_mmap,
	.num_users	= vb2_dma_sg_num_users,
650
	.get_dmabuf	= vb2_dma_sg_get_dmabuf,
651 652 653 654
	.map_dmabuf	= vb2_dma_sg_map_dmabuf,
	.unmap_dmabuf	= vb2_dma_sg_unmap_dmabuf,
	.attach_dmabuf	= vb2_dma_sg_attach_dmabuf,
	.detach_dmabuf	= vb2_dma_sg_detach_dmabuf,
655 656 657 658 659 660 661
	.cookie		= vb2_dma_sg_cookie,
};
EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);

MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
MODULE_AUTHOR("Andrzej Pietrasiewicz");
MODULE_LICENSE("GPL");