videobuf2-core.c 76.1 KB
Newer Older
1
/*
2
 * videobuf2-core.c - video buffer 2 core framework
3 4 5
 *
 * Copyright (C) 2010 Samsung Electronics
 *
6
 * Author: Pawel Osciak <pawel@osciak.com>
7 8
 *	   Marek Szyprowski <m.szyprowski@samsung.com>
 *
H
Hans Verkuil 已提交
9 10 11
 * The vb2_thread implementation was based on code from videobuf-dvb.c:
 *	(c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
 *
12 13 14 15 16 17 18 19 20 21 22 23
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 */

#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/sched.h>
H
Hans Verkuil 已提交
24 25
#include <linux/freezer.h>
#include <linux/kthread.h>
26

27
#include <media/videobuf2-core.h>
28

29
#include <trace/events/vb2.h>
30

31 32
static int debug;
module_param(debug, int, 0644);
33

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
#define dprintk(level, fmt, arg...)					      \
	do {								      \
		if (debug >= level)					      \
			pr_info("vb2-core: %s: " fmt, __func__, ## arg); \
	} while (0)

#ifdef CONFIG_VIDEO_ADV_DEBUG

/*
 * If advanced debugging is on, then count how often each op is called
 * successfully, which can either be per-buffer or per-queue.
 *
 * This makes it easy to check that the 'init' and 'cleanup'
 * (and variations thereof) stay balanced.
 */

#define log_memop(vb, op)						\
	dprintk(2, "call_memop(%p, %d, %s)%s\n",			\
		(vb)->vb2_queue, (vb)->index, #op,			\
		(vb)->vb2_queue->mem_ops->op ? "" : " (nop)")

#define call_memop(vb, op, args...)					\
({									\
	struct vb2_queue *_q = (vb)->vb2_queue;				\
	int err;							\
									\
	log_memop(vb, op);						\
	err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0;		\
	if (!err)							\
		(vb)->cnt_mem_ ## op++;					\
	err;								\
})

#define call_ptr_memop(vb, op, args...)					\
({									\
	struct vb2_queue *_q = (vb)->vb2_queue;				\
	void *ptr;							\
									\
	log_memop(vb, op);						\
	ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL;		\
	if (!IS_ERR_OR_NULL(ptr))					\
		(vb)->cnt_mem_ ## op++;					\
	ptr;								\
})

#define call_void_memop(vb, op, args...)				\
({									\
	struct vb2_queue *_q = (vb)->vb2_queue;				\
									\
	log_memop(vb, op);						\
	if (_q->mem_ops->op)						\
		_q->mem_ops->op(args);					\
	(vb)->cnt_mem_ ## op++;						\
})

#define log_qop(q, op)							\
	dprintk(2, "call_qop(%p, %s)%s\n", q, #op,			\
		(q)->ops->op ? "" : " (nop)")

#define call_qop(q, op, args...)					\
({									\
	int err;							\
									\
	log_qop(q, op);							\
	err = (q)->ops->op ? (q)->ops->op(args) : 0;			\
	if (!err)							\
		(q)->cnt_ ## op++;					\
	err;								\
})

#define call_void_qop(q, op, args...)					\
({									\
	log_qop(q, op);							\
	if ((q)->ops->op)						\
		(q)->ops->op(args);					\
	(q)->cnt_ ## op++;						\
})

#define log_vb_qop(vb, op, args...)					\
	dprintk(2, "call_vb_qop(%p, %d, %s)%s\n",			\
		(vb)->vb2_queue, (vb)->index, #op,			\
		(vb)->vb2_queue->ops->op ? "" : " (nop)")

#define call_vb_qop(vb, op, args...)					\
({									\
	int err;							\
									\
	log_vb_qop(vb, op);						\
	err = (vb)->vb2_queue->ops->op ?				\
		(vb)->vb2_queue->ops->op(args) : 0;			\
	if (!err)							\
		(vb)->cnt_ ## op++;					\
	err;								\
})

#define call_void_vb_qop(vb, op, args...)				\
({									\
	log_vb_qop(vb, op);						\
	if ((vb)->vb2_queue->ops->op)					\
		(vb)->vb2_queue->ops->op(args);				\
	(vb)->cnt_ ## op++;						\
})

#else

#define call_memop(vb, op, args...)					\
	((vb)->vb2_queue->mem_ops->op ?					\
		(vb)->vb2_queue->mem_ops->op(args) : 0)

#define call_ptr_memop(vb, op, args...)					\
	((vb)->vb2_queue->mem_ops->op ?					\
		(vb)->vb2_queue->mem_ops->op(args) : NULL)

#define call_void_memop(vb, op, args...)				\
	do {								\
		if ((vb)->vb2_queue->mem_ops->op)			\
			(vb)->vb2_queue->mem_ops->op(args);		\
	} while (0)

#define call_qop(q, op, args...)					\
	((q)->ops->op ? (q)->ops->op(args) : 0)

#define call_void_qop(q, op, args...)					\
	do {								\
		if ((q)->ops->op)					\
			(q)->ops->op(args);				\
	} while (0)

#define call_vb_qop(vb, op, args...)					\
	((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)

#define call_void_vb_qop(vb, op, args...)				\
	do {								\
		if ((vb)->vb2_queue->ops->op)				\
			(vb)->vb2_queue->ops->op(args);			\
	} while (0)

#endif

#define call_bufop(q, op, args...)					\
({									\
	int ret = 0;							\
	if (q && q->buf_ops && q->buf_ops->op)				\
		ret = q->buf_ops->op(args);				\
	ret;								\
})
180

181 182 183 184 185 186
#define call_void_bufop(q, op, args...)					\
({									\
	if (q && q->buf_ops && q->buf_ops->op)				\
		q->buf_ops->op(args);					\
})

187
static void __vb2_queue_cancel(struct vb2_queue *q);
188
static void __enqueue_in_driver(struct vb2_buffer *vb);
189

190 191 192
/**
 * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
 */
193
static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
194 195
{
	struct vb2_queue *q = vb->vb2_queue;
196
	enum dma_data_direction dma_dir =
197
		q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
198 199 200
	void *mem_priv;
	int plane;

201 202 203 204
	/*
	 * Allocate memory for all planes in this buffer
	 * NOTE: mmapped areas should be page aligned
	 */
205
	for (plane = 0; plane < vb->num_planes; ++plane) {
206 207
		unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]);

208
		mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane],
209
				      size, dma_dir, q->gfp_flags);
210
		if (IS_ERR_OR_NULL(mem_priv))
211 212 213 214
			goto free;

		/* Associate allocator private data with this plane */
		vb->planes[plane].mem_priv = mem_priv;
215
		vb->planes[plane].length = q->plane_sizes[plane];
216 217 218 219 220
	}

	return 0;
free:
	/* Free already allocated memory if one of the allocations failed */
221
	for (; plane > 0; --plane) {
222
		call_void_memop(vb, put, vb->planes[plane - 1].mem_priv);
223 224
		vb->planes[plane - 1].mem_priv = NULL;
	}
225 226 227 228 229 230 231 232 233 234 235 236

	return -ENOMEM;
}

/**
 * __vb2_buf_mem_free() - free memory of the given buffer
 */
static void __vb2_buf_mem_free(struct vb2_buffer *vb)
{
	unsigned int plane;

	for (plane = 0; plane < vb->num_planes; ++plane) {
237
		call_void_memop(vb, put, vb->planes[plane].mem_priv);
238
		vb->planes[plane].mem_priv = NULL;
239
		dprintk(3, "freed plane %d of buffer %d\n", plane, vb->index);
240 241 242 243 244 245 246 247 248 249 250 251
	}
}

/**
 * __vb2_buf_userptr_put() - release userspace memory associated with
 * a USERPTR buffer
 */
static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
{
	unsigned int plane;

	for (plane = 0; plane < vb->num_planes; ++plane) {
252
		if (vb->planes[plane].mem_priv)
253
			call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
254
		vb->planes[plane].mem_priv = NULL;
255 256 257
	}
}

258 259 260 261
/**
 * __vb2_plane_dmabuf_put() - release memory associated with
 * a DMABUF shared plane
 */
262
static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
263 264 265 266 267
{
	if (!p->mem_priv)
		return;

	if (p->dbuf_mapped)
268
		call_void_memop(vb, unmap_dmabuf, p->mem_priv);
269

270
	call_void_memop(vb, detach_dmabuf, p->mem_priv);
271
	dma_buf_put(p->dbuf);
272 273 274
	p->mem_priv = NULL;
	p->dbuf = NULL;
	p->dbuf_mapped = 0;
275 276 277 278 279 280 281 282 283 284 285
}

/**
 * __vb2_buf_dmabuf_put() - release memory associated with
 * a DMABUF shared buffer
 */
static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
{
	unsigned int plane;

	for (plane = 0; plane < vb->num_planes; ++plane)
286
		__vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
287 288
}

289 290 291 292
/**
 * __setup_offsets() - setup unique offsets ("cookies") for every plane in
 * every buffer on the queue
 */
293
static void __setup_offsets(struct vb2_queue *q, unsigned int n)
294 295 296
{
	unsigned int buffer, plane;
	struct vb2_buffer *vb;
297
	unsigned long off;
298

299
	if (q->num_buffers) {
300
		struct vb2_plane *p;
301
		vb = q->bufs[q->num_buffers - 1];
302 303
		p = &vb->planes[vb->num_planes - 1];
		off = PAGE_ALIGN(p->m.offset + p->length);
304 305 306 307 308
	} else {
		off = 0;
	}

	for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
309 310 311 312 313
		vb = q->bufs[buffer];
		if (!vb)
			continue;

		for (plane = 0; plane < vb->num_planes; ++plane) {
314
			vb->planes[plane].m.offset = off;
315

316
			dprintk(3, "buffer %d, plane %d offset 0x%08lx\n",
317 318
					buffer, plane, off);

319
			off += vb->planes[plane].length;
320 321 322 323 324 325 326 327 328 329 330 331
			off = PAGE_ALIGN(off);
		}
	}
}

/**
 * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
 * video buffer memory for all buffers/planes on the queue and initializes the
 * queue
 *
 * Returns the number of buffers successfully allocated.
 */
332
static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
333
			     unsigned int num_buffers, unsigned int num_planes)
334
{
335
	unsigned int buffer, plane;
336 337 338 339 340 341 342
	struct vb2_buffer *vb;
	int ret;

	for (buffer = 0; buffer < num_buffers; ++buffer) {
		/* Allocate videobuf buffer structures */
		vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
		if (!vb) {
343
			dprintk(1, "memory alloc for buffer struct failed\n");
344 345 346 347 348 349
			break;
		}

		vb->state = VB2_BUF_STATE_DEQUEUED;
		vb->vb2_queue = q;
		vb->num_planes = num_planes;
350 351 352
		vb->index = q->num_buffers + buffer;
		vb->type = q->type;
		vb->memory = memory;
353 354
		for (plane = 0; plane < num_planes; ++plane)
			vb->planes[plane].length = q->plane_sizes[plane];
355
		q->bufs[vb->index] = vb;
356 357

		/* Allocate video buffer memory for the MMAP type */
358
		if (memory == VB2_MEMORY_MMAP) {
359
			ret = __vb2_buf_mem_alloc(vb);
360
			if (ret) {
361
				dprintk(1, "failed allocating memory for "
362 363
						"buffer %d\n", buffer);
				kfree(vb);
364
				q->bufs[vb->index] = NULL;
365 366 367 368 369 370 371
				break;
			}
			/*
			 * Call the driver-provided buffer initialization
			 * callback, if given. An error in initialization
			 * results in queue setup failure.
			 */
372
			ret = call_vb_qop(vb, buf_init, vb);
373
			if (ret) {
374
				dprintk(1, "buffer %d %p initialization"
375 376
					" failed\n", buffer, vb);
				__vb2_buf_mem_free(vb);
377
				q->bufs[vb->index] = NULL;
378 379 380 381 382 383
				kfree(vb);
				break;
			}
		}
	}

384
	if (memory == VB2_MEMORY_MMAP)
385
		__setup_offsets(q, buffer);
386

387
	dprintk(1, "allocated %d buffers, %d plane(s) each\n",
388
			buffer, num_planes);
389 390 391 392 393 394 395

	return buffer;
}

/**
 * __vb2_free_mem() - release all video buffer memory for a given queue
 */
396
static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
397 398 399 400
{
	unsigned int buffer;
	struct vb2_buffer *vb;

401 402
	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
	     ++buffer) {
403 404 405 406 407
		vb = q->bufs[buffer];
		if (!vb)
			continue;

		/* Free MMAP buffers or release USERPTR buffers */
408
		if (q->memory == VB2_MEMORY_MMAP)
409
			__vb2_buf_mem_free(vb);
410
		else if (q->memory == VB2_MEMORY_DMABUF)
411
			__vb2_buf_dmabuf_put(vb);
412 413 414 415 416 417
		else
			__vb2_buf_userptr_put(vb);
	}
}

/**
418 419 420
 * __vb2_queue_free() - free buffers at the end of the queue - video memory and
 * related information, if no buffers are left return the queue to an
 * uninitialized state. Might be called even if the queue has already been freed.
421
 */
422
static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
423 424 425
{
	unsigned int buffer;

426 427 428 429 430 431 432 433 434 435 436 437 438
	/*
	 * Sanity check: when preparing a buffer the queue lock is released for
	 * a short while (see __buf_prepare for the details), which would allow
	 * a race with a reqbufs which can call this function. Removing the
	 * buffers from underneath __buf_prepare is obviously a bad idea, so we
	 * check if any of the buffers is in the state PREPARING, and if so we
	 * just return -EAGAIN.
	 */
	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
	     ++buffer) {
		if (q->bufs[buffer] == NULL)
			continue;
		if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
H
Hans Verkuil 已提交
439
			dprintk(1, "preparing buffers, cannot free\n");
440 441 442 443
			return -EAGAIN;
		}
	}

444
	/* Call driver-provided cleanup function for each buffer, if provided */
445 446
	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
	     ++buffer) {
447 448 449
		struct vb2_buffer *vb = q->bufs[buffer];

		if (vb && vb->planes[0].mem_priv)
450
			call_void_vb_qop(vb, buf_cleanup, vb);
451 452 453
	}

	/* Release video buffer memory */
454
	__vb2_free_mem(q, buffers);
455

456 457 458 459 460 461 462 463 464 465
#ifdef CONFIG_VIDEO_ADV_DEBUG
	/*
	 * Check that all the calls were balances during the life-time of this
	 * queue. If not (or if the debug level is 1 or up), then dump the
	 * counters to the kernel log.
	 */
	if (q->num_buffers) {
		bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
				  q->cnt_wait_prepare != q->cnt_wait_finish;

466
		if (unbalanced || debug) {
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
			pr_info("vb2: counters for queue %p:%s\n", q,
				unbalanced ? " UNBALANCED!" : "");
			pr_info("vb2:     setup: %u start_streaming: %u stop_streaming: %u\n",
				q->cnt_queue_setup, q->cnt_start_streaming,
				q->cnt_stop_streaming);
			pr_info("vb2:     wait_prepare: %u wait_finish: %u\n",
				q->cnt_wait_prepare, q->cnt_wait_finish);
		}
		q->cnt_queue_setup = 0;
		q->cnt_wait_prepare = 0;
		q->cnt_wait_finish = 0;
		q->cnt_start_streaming = 0;
		q->cnt_stop_streaming = 0;
	}
	for (buffer = 0; buffer < q->num_buffers; ++buffer) {
		struct vb2_buffer *vb = q->bufs[buffer];
		bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
				  vb->cnt_mem_prepare != vb->cnt_mem_finish ||
				  vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
				  vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
				  vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
				  vb->cnt_buf_queue != vb->cnt_buf_done ||
				  vb->cnt_buf_prepare != vb->cnt_buf_finish ||
				  vb->cnt_buf_init != vb->cnt_buf_cleanup;

492
		if (unbalanced || debug) {
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
			pr_info("vb2:   counters for queue %p, buffer %d:%s\n",
				q, buffer, unbalanced ? " UNBALANCED!" : "");
			pr_info("vb2:     buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
				vb->cnt_buf_init, vb->cnt_buf_cleanup,
				vb->cnt_buf_prepare, vb->cnt_buf_finish);
			pr_info("vb2:     buf_queue: %u buf_done: %u\n",
				vb->cnt_buf_queue, vb->cnt_buf_done);
			pr_info("vb2:     alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
				vb->cnt_mem_alloc, vb->cnt_mem_put,
				vb->cnt_mem_prepare, vb->cnt_mem_finish,
				vb->cnt_mem_mmap);
			pr_info("vb2:     get_userptr: %u put_userptr: %u\n",
				vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
			pr_info("vb2:     attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
				vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
				vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
			pr_info("vb2:     get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
				vb->cnt_mem_get_dmabuf,
				vb->cnt_mem_num_users,
				vb->cnt_mem_vaddr,
				vb->cnt_mem_cookie);
		}
	}
#endif

518
	/* Free videobuf buffers */
519 520
	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
	     ++buffer) {
521 522 523 524
		kfree(q->bufs[buffer]);
		q->bufs[buffer] = NULL;
	}

525
	q->num_buffers -= buffers;
526
	if (!q->num_buffers) {
527
		q->memory = 0;
528 529
		INIT_LIST_HEAD(&q->queued_list);
	}
530
	return 0;
531 532
}

533
/**
534
 * vb2_buffer_in_use() - return true if the buffer is in use and
535 536
 * the queue cannot be freed (by the means of REQBUFS(0)) call
 */
537
bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
538 539 540
{
	unsigned int plane;
	for (plane = 0; plane < vb->num_planes; ++plane) {
541
		void *mem_priv = vb->planes[plane].mem_priv;
542 543 544 545 546 547
		/*
		 * If num_users() has not been provided, call_memop
		 * will return 0, apparently nobody cares about this
		 * case anyway. If num_users() returns more than 1,
		 * we are not the only user of the plane's memory.
		 */
548
		if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
549 550 551 552
			return true;
	}
	return false;
}
553
EXPORT_SYMBOL(vb2_buffer_in_use);
554 555 556 557 558 559 560 561 562

/**
 * __buffers_in_use() - return true if any buffers on the queue are in use and
 * the queue cannot be freed (by the means of REQBUFS(0)) call
 */
static bool __buffers_in_use(struct vb2_queue *q)
{
	unsigned int buffer;
	for (buffer = 0; buffer < q->num_buffers; ++buffer) {
563
		if (vb2_buffer_in_use(q, q->bufs[buffer]))
564 565 566 567 568
			return true;
	}
	return false;
}

569 570 571 572 573 574 575 576 577 578
/**
 * vb2_core_querybuf() - query video buffer information
 * @q:		videobuf queue
 * @index:	id number of the buffer
 * @pb:		buffer struct passed from userspace
 *
 * Should be called from vidioc_querybuf ioctl handler in driver.
 * The passed buffer should have been verified.
 * This function fills the relevant information for the userspace.
 */
579
void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
580
{
581
	call_void_bufop(q, fill_user_buffer, q->bufs[index], pb);
582
}
583
EXPORT_SYMBOL_GPL(vb2_core_querybuf);
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610

/**
 * __verify_userptr_ops() - verify that all memory operations required for
 * USERPTR queue type have been provided
 */
static int __verify_userptr_ops(struct vb2_queue *q)
{
	if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
	    !q->mem_ops->put_userptr)
		return -EINVAL;

	return 0;
}

/**
 * __verify_mmap_ops() - verify that all memory operations required for
 * MMAP queue type have been provided
 */
static int __verify_mmap_ops(struct vb2_queue *q)
{
	if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
	    !q->mem_ops->put || !q->mem_ops->mmap)
		return -EINVAL;

	return 0;
}

611 612 613 614 615 616 617 618 619 620 621 622 623 624
/**
 * __verify_dmabuf_ops() - verify that all memory operations required for
 * DMABUF queue type have been provided
 */
static int __verify_dmabuf_ops(struct vb2_queue *q)
{
	if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
	    !q->mem_ops->detach_dmabuf  || !q->mem_ops->map_dmabuf ||
	    !q->mem_ops->unmap_dmabuf)
		return -EINVAL;

	return 0;
}

625
/**
626
 * vb2_verify_memory_type() - Check whether the memory type and buffer type
627 628
 * passed to a buffer operation are compatible with the queue.
 */
629
int vb2_verify_memory_type(struct vb2_queue *q,
630
		enum vb2_memory memory, unsigned int type)
631
{
632 633
	if (memory != VB2_MEMORY_MMAP && memory != VB2_MEMORY_USERPTR &&
	    memory != VB2_MEMORY_DMABUF) {
H
Hans Verkuil 已提交
634
		dprintk(1, "unsupported memory type\n");
635 636 637 638
		return -EINVAL;
	}

	if (type != q->type) {
H
Hans Verkuil 已提交
639
		dprintk(1, "requested type is incorrect\n");
640 641 642 643 644 645 646
		return -EINVAL;
	}

	/*
	 * Make sure all the required memory ops for given memory type
	 * are available.
	 */
647
	if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) {
H
Hans Verkuil 已提交
648
		dprintk(1, "MMAP for current setup unsupported\n");
649 650 651
		return -EINVAL;
	}

652
	if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
H
Hans Verkuil 已提交
653
		dprintk(1, "USERPTR for current setup unsupported\n");
654 655 656
		return -EINVAL;
	}

657
	if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
H
Hans Verkuil 已提交
658
		dprintk(1, "DMABUF for current setup unsupported\n");
659 660 661
		return -EINVAL;
	}

662 663 664 665 666
	/*
	 * Place the busy tests at the end: -EBUSY can be ignored when
	 * create_bufs is called with count == 0, but count == 0 should still
	 * do the memory and type validation.
	 */
667
	if (vb2_fileio_is_active(q)) {
H
Hans Verkuil 已提交
668
		dprintk(1, "file io in progress\n");
669 670 671 672
		return -EBUSY;
	}
	return 0;
}
673
EXPORT_SYMBOL(vb2_verify_memory_type);
674 675

/**
676
 * vb2_core_reqbufs() - Initiate streaming
677
 * @q:		videobuf2 queue
678 679
 * @memory: memory type
 * @count: requested buffer count
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
 *
 * Should be called from vidioc_reqbufs ioctl handler of a driver.
 * This function:
 * 1) verifies streaming parameters passed from the userspace,
 * 2) sets up the queue,
 * 3) negotiates number of buffers and planes per buffer with the driver
 *    to be used during streaming,
 * 4) allocates internal buffer structures (struct vb2_buffer), according to
 *    the agreed parameters,
 * 5) for MMAP memory type, allocates actual video memory, using the
 *    memory handling/allocation routines provided during queue initialization
 *
 * If req->count is 0, all the memory will be freed instead.
 * If the queue has been allocated previously (by a previous vb2_reqbufs) call
 * and the queue is not busy, memory will be reallocated.
 *
 * The return values from this function are intended to be directly returned
 * from vidioc_reqbufs handler in driver.
 */
699
int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
700
		unsigned int *count)
701
{
702
	unsigned int num_buffers, allocated_buffers, num_planes = 0;
703
	int ret;
704 705

	if (q->streaming) {
H
Hans Verkuil 已提交
706
		dprintk(1, "streaming active\n");
707 708 709
		return -EBUSY;
	}

710
	if (*count == 0 || q->num_buffers != 0 || q->memory != memory) {
711 712 713 714
		/*
		 * We already have buffers allocated, so first check if they
		 * are not in use and can be freed.
		 */
715
		mutex_lock(&q->mmap_lock);
716
		if (q->memory == VB2_MEMORY_MMAP && __buffers_in_use(q)) {
717
			mutex_unlock(&q->mmap_lock);
H
Hans Verkuil 已提交
718
			dprintk(1, "memory in use, cannot free\n");
719 720 721
			return -EBUSY;
		}

722 723 724 725 726 727
		/*
		 * Call queue_cancel to clean up any buffers in the PREPARED or
		 * QUEUED state which is possible if buffers were prepared or
		 * queued without ever calling STREAMON.
		 */
		__vb2_queue_cancel(q);
728
		ret = __vb2_queue_free(q, q->num_buffers);
729
		mutex_unlock(&q->mmap_lock);
730 731
		if (ret)
			return ret;
732 733 734 735 736

		/*
		 * In case of REQBUFS(0) return immediately without calling
		 * driver's queue_setup() callback and allocating resources.
		 */
737
		if (*count == 0)
738
			return 0;
739 740 741 742 743
	}

	/*
	 * Make sure the requested values and current defaults are sane.
	 */
744
	num_buffers = min_t(unsigned int, *count, VB2_MAX_FRAME);
745
	num_buffers = max_t(unsigned int, num_buffers, q->min_buffers_needed);
746
	memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
747
	memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
748
	q->memory = memory;
749 750 751 752 753

	/*
	 * Ask the driver how many buffers and planes per buffer it requires.
	 * Driver also sets the size and allocator context for each plane.
	 */
754
	ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
755
		       q->plane_sizes, q->alloc_ctx);
756
	if (ret)
757 758 759
		return ret;

	/* Finally, allocate buffers and video memory */
760 761
	allocated_buffers =
		__vb2_queue_alloc(q, memory, num_buffers, num_planes);
762
	if (allocated_buffers == 0) {
763
		dprintk(1, "memory allocation failed\n");
764
		return -ENOMEM;
765 766
	}

767 768 769 770 771 772 773
	/*
	 * There is no point in continuing if we can't allocate the minimum
	 * number of buffers needed by this vb2_queue.
	 */
	if (allocated_buffers < q->min_buffers_needed)
		ret = -ENOMEM;

774 775 776
	/*
	 * Check if driver can handle the allocated number of buffers.
	 */
777
	if (!ret && allocated_buffers < num_buffers) {
778
		num_buffers = allocated_buffers;
779 780 781 782 783 784 785
		/*
		 * num_planes is set by the previous queue_setup(), but since it
		 * signals to queue_setup() whether it is called from create_bufs()
		 * vs reqbufs() we zero it here to signal that queue_setup() is
		 * called for the reqbufs() case.
		 */
		num_planes = 0;
786

787
		ret = call_qop(q, queue_setup, q, &num_buffers,
788
			       &num_planes, q->plane_sizes, q->alloc_ctx);
789

790
		if (!ret && allocated_buffers < num_buffers)
791 792 793
			ret = -ENOMEM;

		/*
794 795
		 * Either the driver has accepted a smaller number of buffers,
		 * or .queue_setup() returned an error
796
		 */
797 798
	}

799
	mutex_lock(&q->mmap_lock);
800 801 802
	q->num_buffers = allocated_buffers;

	if (ret < 0) {
803 804 805 806
		/*
		 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
		 * from q->num_buffers.
		 */
807
		__vb2_queue_free(q, allocated_buffers);
808
		mutex_unlock(&q->mmap_lock);
809
		return ret;
810
	}
811
	mutex_unlock(&q->mmap_lock);
812 813 814 815 816

	/*
	 * Return the number of successfully allocated buffers
	 * to the userspace.
	 */
817
	*count = allocated_buffers;
818
	q->waiting_for_buffers = !q->is_output;
819 820 821

	return 0;
}
822
EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
823

824
/**
825
 * vb2_core_create_bufs() - Allocate buffers and any required auxiliary structs
826
 * @q:		videobuf2 queue
827 828 829
 * @memory: memory type
 * @count: requested buffer count
 * @parg: parameter passed to device driver
830 831 832 833 834 835 836 837 838 839
 *
 * Should be called from vidioc_create_bufs ioctl handler of a driver.
 * This function:
 * 1) verifies parameter sanity
 * 2) calls the .queue_setup() queue operation
 * 3) performs any necessary memory allocations
 *
 * The return values from this function are intended to be directly returned
 * from vidioc_create_bufs handler in driver.
 */
840
int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
841 842
		unsigned int *count, unsigned requested_planes,
		const unsigned requested_sizes[])
843 844
{
	unsigned int num_planes = 0, num_buffers, allocated_buffers;
845
	int ret;
846

847
	if (q->num_buffers == VB2_MAX_FRAME) {
H
Hans Verkuil 已提交
848
		dprintk(1, "maximum number of buffers already allocated\n");
849 850 851 852 853 854
		return -ENOBUFS;
	}

	if (!q->num_buffers) {
		memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
		memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
855
		q->memory = memory;
856
		q->waiting_for_buffers = !q->is_output;
857 858
	}

859
	num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
860

861 862 863 864 865
	if (requested_planes && requested_sizes) {
		num_planes = requested_planes;
		memcpy(q->plane_sizes, requested_sizes, sizeof(q->plane_sizes));
	}

866 867 868 869
	/*
	 * Ask the driver, whether the requested number of buffers, planes per
	 * buffer and their sizes are acceptable
	 */
870
	ret = call_qop(q, queue_setup, q, &num_buffers,
871
		       &num_planes, q->plane_sizes, q->alloc_ctx);
872
	if (ret)
873 874 875
		return ret;

	/* Finally, allocate buffers and video memory */
876
	allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers,
877
				num_planes);
878
	if (allocated_buffers == 0) {
879
		dprintk(1, "memory allocation failed\n");
880
		return -ENOMEM;
881 882 883 884 885
	}

	/*
	 * Check if driver can handle the so far allocated number of buffers.
	 */
886 887
	if (allocated_buffers < num_buffers) {
		num_buffers = allocated_buffers;
888 889 890 891 892

		/*
		 * q->num_buffers contains the total number of buffers, that the
		 * queue driver has set up
		 */
893
		ret = call_qop(q, queue_setup, q, &num_buffers,
894 895 896 897 898 899 900 901 902 903 904
			       &num_planes, q->plane_sizes, q->alloc_ctx);

		if (!ret && allocated_buffers < num_buffers)
			ret = -ENOMEM;

		/*
		 * Either the driver has accepted a smaller number of buffers,
		 * or .queue_setup() returned an error
		 */
	}

905
	mutex_lock(&q->mmap_lock);
906 907 908
	q->num_buffers += allocated_buffers;

	if (ret < 0) {
909 910 911 912
		/*
		 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
		 * from q->num_buffers.
		 */
913
		__vb2_queue_free(q, allocated_buffers);
914
		mutex_unlock(&q->mmap_lock);
915
		return -ENOMEM;
916
	}
917
	mutex_unlock(&q->mmap_lock);
918 919 920 921 922

	/*
	 * Return the number of successfully allocated buffers
	 * to the userspace.
	 */
923
	*count = allocated_buffers;
924 925 926

	return 0;
}
927
EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
928

929 930 931 932 933 934 935 936 937 938
/**
 * vb2_plane_vaddr() - Return a kernel virtual address of a given plane
 * @vb:		vb2_buffer to which the plane in question belongs to
 * @plane_no:	plane number for which the address is to be returned
 *
 * This function returns a kernel virtual address of a given plane if
 * such a mapping exist, NULL otherwise.
 */
void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
{
939
	if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
940 941
		return NULL;

942
	return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959

}
EXPORT_SYMBOL_GPL(vb2_plane_vaddr);

/**
 * vb2_plane_cookie() - Return allocator specific cookie for the given plane
 * @vb:		vb2_buffer to which the plane in question belongs to
 * @plane_no:	plane number for which the cookie is to be returned
 *
 * This function returns an allocator specific cookie for a given plane if
 * available, NULL otherwise. The allocator should provide some simple static
 * inline function, which would convert this cookie to the allocator specific
 * type that can be used directly by the driver to access the buffer. This can
 * be for example physical address, pointer to scatter list or IOMMU mapping.
 */
void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
{
960
	if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
961 962
		return NULL;

963
	return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);
964 965 966 967 968 969
}
EXPORT_SYMBOL_GPL(vb2_plane_cookie);

/**
 * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished
 * @vb:		vb2_buffer returned from the driver
970 971 972
 * @state:	either VB2_BUF_STATE_DONE if the operation finished successfully,
 *		VB2_BUF_STATE_ERROR if the operation finished with an error or
 *		VB2_BUF_STATE_QUEUED if the driver wants to requeue buffers.
973 974
 *		If start_streaming fails then it should return buffers with state
 *		VB2_BUF_STATE_QUEUED to put them back into the queue.
975 976 977 978 979 980
 *
 * This function should be called by the driver after a hardware operation on
 * a buffer is finished and the buffer may be returned to userspace. The driver
 * cannot use this buffer anymore until it is queued back to it by videobuf
 * by the means of buf_queue callback. Only buffers previously queued to the
 * driver by buf_queue can be passed to this function.
981 982 983 984 985
 *
 * While streaming a buffer can only be returned in state DONE or ERROR.
 * The start_streaming op can also return them in case the DMA engine cannot
 * be started for some reason. In that case the buffers should be returned with
 * state QUEUED.
986 987 988 989 990
 */
void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
{
	struct vb2_queue *q = vb->vb2_queue;
	unsigned long flags;
991
	unsigned int plane;
992

993
	if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
994 995
		return;

996 997
	if (WARN_ON(state != VB2_BUF_STATE_DONE &&
		    state != VB2_BUF_STATE_ERROR &&
998 999
		    state != VB2_BUF_STATE_QUEUED &&
		    state != VB2_BUF_STATE_REQUEUEING))
1000
		state = VB2_BUF_STATE_ERROR;
1001

1002 1003 1004 1005 1006 1007 1008
#ifdef CONFIG_VIDEO_ADV_DEBUG
	/*
	 * Although this is not a callback, it still does have to balance
	 * with the buf_queue op. So update this counter manually.
	 */
	vb->cnt_buf_done++;
#endif
1009
	dprintk(4, "done processing on buffer %d, state: %d\n",
1010
			vb->index, state);
1011

1012 1013
	/* sync buffers */
	for (plane = 0; plane < vb->num_planes; ++plane)
1014
		call_void_memop(vb, finish, vb->planes[plane].mem_priv);
1015

1016
	spin_lock_irqsave(&q->done_lock, flags);
1017 1018 1019 1020 1021
	if (state == VB2_BUF_STATE_QUEUED ||
	    state == VB2_BUF_STATE_REQUEUEING) {
		vb->state = VB2_BUF_STATE_QUEUED;
	} else {
		/* Add the buffer to the done buffers list */
1022
		list_add_tail(&vb->done_entry, &q->done_list);
1023 1024
		vb->state = state;
	}
1025
	atomic_dec(&q->owned_by_drv_count);
1026 1027
	spin_unlock_irqrestore(&q->done_lock, flags);

1028 1029
	trace_vb2_buf_done(q, vb);

1030 1031 1032 1033
	switch (state) {
	case VB2_BUF_STATE_QUEUED:
		return;
	case VB2_BUF_STATE_REQUEUEING:
1034 1035
		if (q->start_streaming_called)
			__enqueue_in_driver(vb);
1036
		return;
1037 1038 1039 1040
	default:
		/* Inform any processes that may be waiting for buffers */
		wake_up(&q->done_wq);
		break;
1041
	}
1042 1043 1044
}
EXPORT_SYMBOL_GPL(vb2_buffer_done);

1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
/**
 * vb2_discard_done() - discard all buffers marked as DONE
 * @q:		videobuf2 queue
 *
 * This function is intended to be used with suspend/resume operations. It
 * discards all 'done' buffers as they would be too old to be requested after
 * resume.
 *
 * Drivers must stop the hardware and synchronize with interrupt handlers and/or
 * delayed works before calling this function to make sure no buffer will be
 * touched by the driver and/or hardware.
 */
void vb2_discard_done(struct vb2_queue *q)
{
	struct vb2_buffer *vb;
	unsigned long flags;

	spin_lock_irqsave(&q->done_lock, flags);
	list_for_each_entry(vb, &q->done_list, done_entry)
		vb->state = VB2_BUF_STATE_ERROR;
	spin_unlock_irqrestore(&q->done_lock, flags);
}
EXPORT_SYMBOL_GPL(vb2_discard_done);

1069 1070 1071
/**
 * __qbuf_mmap() - handle qbuf of an MMAP buffer
 */
1072
static int __qbuf_mmap(struct vb2_buffer *vb, const void *pb)
1073
{
1074 1075 1076
	int ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
			vb, pb, vb->planes);
	return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
1077 1078
}

1079 1080 1081
/**
 * __qbuf_userptr() - handle qbuf of a USERPTR buffer
 */
1082
static int __qbuf_userptr(struct vb2_buffer *vb, const void *pb)
1083
{
1084
	struct vb2_plane planes[VB2_MAX_PLANES];
1085 1086 1087 1088
	struct vb2_queue *q = vb->vb2_queue;
	void *mem_priv;
	unsigned int plane;
	int ret;
1089
	enum dma_data_direction dma_dir =
1090
		q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
1091
	bool reacquired = vb->planes[0].mem_priv == NULL;
1092

1093
	memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
1094
	/* Copy relevant information provided by the userspace */
1095 1096 1097
	ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, vb, pb, planes);
	if (ret)
		return ret;
1098 1099 1100

	for (plane = 0; plane < vb->num_planes; ++plane) {
		/* Skip the plane if already verified */
1101 1102 1103
		if (vb->planes[plane].m.userptr &&
			vb->planes[plane].m.userptr == planes[plane].m.userptr
			&& vb->planes[plane].length == planes[plane].length)
1104 1105
			continue;

H
Hans Verkuil 已提交
1106
		dprintk(3, "userspace address for plane %d changed, "
1107 1108
				"reacquiring memory\n", plane);

1109 1110
		/* Check if the provided plane buffer is large enough */
		if (planes[plane].length < q->plane_sizes[plane]) {
H
Hans Verkuil 已提交
1111
			dprintk(1, "provided buffer size %u is less than "
1112 1113 1114
						"setup size %u for plane %d\n",
						planes[plane].length,
						q->plane_sizes[plane], plane);
1115
			ret = -EINVAL;
1116 1117 1118
			goto err;
		}

1119
		/* Release previously acquired memory if present */
1120 1121 1122
		if (vb->planes[plane].mem_priv) {
			if (!reacquired) {
				reacquired = true;
1123
				call_void_vb_qop(vb, buf_cleanup, vb);
1124
			}
1125
			call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
1126
		}
1127 1128

		vb->planes[plane].mem_priv = NULL;
1129 1130 1131 1132
		vb->planes[plane].bytesused = 0;
		vb->planes[plane].length = 0;
		vb->planes[plane].m.userptr = 0;
		vb->planes[plane].data_offset = 0;
1133 1134

		/* Acquire each plane's memory */
1135
		mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane],
1136
				      planes[plane].m.userptr,
1137
				      planes[plane].length, dma_dir);
1138
		if (IS_ERR_OR_NULL(mem_priv)) {
H
Hans Verkuil 已提交
1139
			dprintk(1, "failed acquiring userspace "
1140
						"memory for plane %d\n", plane);
1141 1142
			ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
			goto err;
1143
		}
1144
		vb->planes[plane].mem_priv = mem_priv;
1145 1146 1147 1148 1149 1150
	}

	/*
	 * Now that everything is in order, copy relevant information
	 * provided by userspace.
	 */
1151 1152 1153 1154 1155 1156
	for (plane = 0; plane < vb->num_planes; ++plane) {
		vb->planes[plane].bytesused = planes[plane].bytesused;
		vb->planes[plane].length = planes[plane].length;
		vb->planes[plane].m.userptr = planes[plane].m.userptr;
		vb->planes[plane].data_offset = planes[plane].data_offset;
	}
1157

1158 1159 1160 1161 1162 1163 1164 1165
	if (reacquired) {
		/*
		 * One or more planes changed, so we must call buf_init to do
		 * the driver-specific initialization on the newly acquired
		 * buffer, if provided.
		 */
		ret = call_vb_qop(vb, buf_init, vb);
		if (ret) {
H
Hans Verkuil 已提交
1166
			dprintk(1, "buffer initialization failed\n");
1167 1168 1169 1170 1171 1172
			goto err;
		}
	}

	ret = call_vb_qop(vb, buf_prepare, vb);
	if (ret) {
H
Hans Verkuil 已提交
1173
		dprintk(1, "buffer preparation failed\n");
1174
		call_void_vb_qop(vb, buf_cleanup, vb);
1175 1176 1177
		goto err;
	}

1178 1179 1180
	return 0;
err:
	/* In case of errors, release planes that were already acquired */
1181 1182
	for (plane = 0; plane < vb->num_planes; ++plane) {
		if (vb->planes[plane].mem_priv)
1183 1184
			call_void_memop(vb, put_userptr,
				vb->planes[plane].mem_priv);
1185
		vb->planes[plane].mem_priv = NULL;
1186 1187
		vb->planes[plane].m.userptr = 0;
		vb->planes[plane].length = 0;
1188 1189 1190 1191 1192
	}

	return ret;
}

1193 1194 1195
/**
 * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
 */
1196
static int __qbuf_dmabuf(struct vb2_buffer *vb, const void *pb)
1197
{
1198
	struct vb2_plane planes[VB2_MAX_PLANES];
1199 1200 1201 1202
	struct vb2_queue *q = vb->vb2_queue;
	void *mem_priv;
	unsigned int plane;
	int ret;
1203
	enum dma_data_direction dma_dir =
1204
		q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
1205
	bool reacquired = vb->planes[0].mem_priv == NULL;
1206

1207
	memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
1208
	/* Copy relevant information provided by the userspace */
1209 1210 1211
	ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, vb, pb, planes);
	if (ret)
		return ret;
1212 1213 1214 1215 1216

	for (plane = 0; plane < vb->num_planes; ++plane) {
		struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);

		if (IS_ERR_OR_NULL(dbuf)) {
H
Hans Verkuil 已提交
1217
			dprintk(1, "invalid dmabuf fd for plane %d\n",
1218 1219 1220 1221 1222 1223 1224 1225 1226
				plane);
			ret = -EINVAL;
			goto err;
		}

		/* use DMABUF size if length is not provided */
		if (planes[plane].length == 0)
			planes[plane].length = dbuf->size;

1227
		if (planes[plane].length < q->plane_sizes[plane]) {
H
Hans Verkuil 已提交
1228
			dprintk(1, "invalid dmabuf length for plane %d\n",
1229
				plane);
1230 1231 1232 1233 1234 1235
			ret = -EINVAL;
			goto err;
		}

		/* Skip the plane if already verified */
		if (dbuf == vb->planes[plane].dbuf &&
1236
			vb->planes[plane].length == planes[plane].length) {
1237 1238 1239 1240
			dma_buf_put(dbuf);
			continue;
		}

H
Hans Verkuil 已提交
1241
		dprintk(1, "buffer for plane %d changed\n", plane);
1242

1243 1244
		if (!reacquired) {
			reacquired = true;
1245
			call_void_vb_qop(vb, buf_cleanup, vb);
1246 1247
		}

1248
		/* Release previously acquired memory if present */
1249
		__vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
1250 1251 1252 1253
		vb->planes[plane].bytesused = 0;
		vb->planes[plane].length = 0;
		vb->planes[plane].m.fd = 0;
		vb->planes[plane].data_offset = 0;
1254 1255

		/* Acquire each plane's memory */
1256 1257 1258
		mem_priv = call_ptr_memop(vb, attach_dmabuf,
			q->alloc_ctx[plane], dbuf, planes[plane].length,
			dma_dir);
1259
		if (IS_ERR(mem_priv)) {
H
Hans Verkuil 已提交
1260
			dprintk(1, "failed to attach dmabuf\n");
1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274
			ret = PTR_ERR(mem_priv);
			dma_buf_put(dbuf);
			goto err;
		}

		vb->planes[plane].dbuf = dbuf;
		vb->planes[plane].mem_priv = mem_priv;
	}

	/* TODO: This pins the buffer(s) with  dma_buf_map_attachment()).. but
	 * really we want to do this just before the DMA, not while queueing
	 * the buffer(s)..
	 */
	for (plane = 0; plane < vb->num_planes; ++plane) {
1275
		ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
1276
		if (ret) {
H
Hans Verkuil 已提交
1277
			dprintk(1, "failed to map dmabuf for plane %d\n",
1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
				plane);
			goto err;
		}
		vb->planes[plane].dbuf_mapped = 1;
	}

	/*
	 * Now that everything is in order, copy relevant information
	 * provided by userspace.
	 */
1288 1289 1290 1291 1292 1293
	for (plane = 0; plane < vb->num_planes; ++plane) {
		vb->planes[plane].bytesused = planes[plane].bytesused;
		vb->planes[plane].length = planes[plane].length;
		vb->planes[plane].m.fd = planes[plane].m.fd;
		vb->planes[plane].data_offset = planes[plane].data_offset;
	}
1294

1295 1296 1297 1298 1299 1300 1301
	if (reacquired) {
		/*
		 * Call driver-specific initialization on the newly acquired buffer,
		 * if provided.
		 */
		ret = call_vb_qop(vb, buf_init, vb);
		if (ret) {
H
Hans Verkuil 已提交
1302
			dprintk(1, "buffer initialization failed\n");
1303 1304 1305 1306 1307 1308
			goto err;
		}
	}

	ret = call_vb_qop(vb, buf_prepare, vb);
	if (ret) {
H
Hans Verkuil 已提交
1309
		dprintk(1, "buffer preparation failed\n");
1310
		call_void_vb_qop(vb, buf_cleanup, vb);
1311 1312 1313
		goto err;
	}

1314 1315 1316 1317 1318 1319 1320 1321
	return 0;
err:
	/* In case of errors, release planes that were already acquired */
	__vb2_buf_dmabuf_put(vb);

	return ret;
}

1322 1323 1324 1325 1326 1327
/**
 * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
 */
static void __enqueue_in_driver(struct vb2_buffer *vb)
{
	struct vb2_queue *q = vb->vb2_queue;
1328
	unsigned int plane;
1329 1330

	vb->state = VB2_BUF_STATE_ACTIVE;
1331
	atomic_inc(&q->owned_by_drv_count);
1332

1333 1334
	trace_vb2_buf_queue(q, vb);

1335 1336
	/* sync buffers */
	for (plane = 0; plane < vb->num_planes; ++plane)
1337
		call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
1338

1339
	call_void_vb_qop(vb, buf_queue, vb);
1340 1341
}

1342
static int __buf_prepare(struct vb2_buffer *vb, const void *pb)
1343 1344 1345 1346
{
	struct vb2_queue *q = vb->vb2_queue;
	int ret;

1347 1348 1349 1350 1351
	if (q->error) {
		dprintk(1, "fatal error occurred on queue\n");
		return -EIO;
	}

1352
	vb->state = VB2_BUF_STATE_PREPARING;
1353

1354
	switch (q->memory) {
1355
	case VB2_MEMORY_MMAP:
1356
		ret = __qbuf_mmap(vb, pb);
1357
		break;
1358
	case VB2_MEMORY_USERPTR:
1359
		ret = __qbuf_userptr(vb, pb);
1360
		break;
1361
	case VB2_MEMORY_DMABUF:
1362
		ret = __qbuf_dmabuf(vb, pb);
1363
		break;
1364 1365 1366 1367 1368 1369
	default:
		WARN(1, "Invalid queue type\n");
		ret = -EINVAL;
	}

	if (ret)
H
Hans Verkuil 已提交
1370
		dprintk(1, "buffer preparation failed: %d\n", ret);
1371
	vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED;
1372 1373 1374 1375

	return ret;
}

1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391
/**
 * vb2_core_prepare_buf() - Pass ownership of a buffer from userspace
 *			to the kernel
 * @q:		videobuf2 queue
 * @index:	id number of the buffer
 * @pb:		buffer structure passed from userspace to vidioc_prepare_buf
 *		handler in driver
 *
 * Should be called from vidioc_prepare_buf ioctl handler of a driver.
 * The passed buffer should have been verified.
 * This function calls buf_prepare callback in the driver (if provided),
 * in which driver-specific buffer initialization can be performed,
 *
 * The return values from this function are intended to be directly returned
 * from vidioc_prepare_buf handler in driver.
 */
1392
int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
{
	struct vb2_buffer *vb;
	int ret;

	vb = q->bufs[index];
	if (vb->state != VB2_BUF_STATE_DEQUEUED) {
		dprintk(1, "invalid buffer state %d\n",
			vb->state);
		return -EINVAL;
	}

	ret = __buf_prepare(vb, pb);
	if (ret)
		return ret;

	/* Fill buffer information for the userspace */
1409
	call_void_bufop(q, fill_user_buffer, vb, pb);
1410 1411 1412 1413 1414

	dprintk(1, "prepare of buffer %d succeeded\n", vb->index);

	return ret;
}
1415
EXPORT_SYMBOL_GPL(vb2_core_prepare_buf);
1416

1417 1418 1419 1420
/**
 * vb2_start_streaming() - Attempt to start streaming.
 * @q:		videobuf2 queue
 *
1421 1422 1423 1424 1425 1426
 * Attempt to start streaming. When this function is called there must be
 * at least q->min_buffers_needed buffers queued up (i.e. the minimum
 * number of buffers required for the DMA engine to function). If the
 * @start_streaming op fails it is supposed to return all the driver-owned
 * buffers back to vb2 in state QUEUED. Check if that happened and if
 * not warn and reclaim them forcefully.
1427 1428 1429
 */
static int vb2_start_streaming(struct vb2_queue *q)
{
1430
	struct vb2_buffer *vb;
1431 1432 1433
	int ret;

	/*
1434 1435
	 * If any buffers were queued before streamon,
	 * we can now pass them to driver for processing.
1436
	 */
1437 1438 1439 1440
	list_for_each_entry(vb, &q->queued_list, queued_entry)
		__enqueue_in_driver(vb);

	/* Tell the driver to start streaming */
1441
	q->start_streaming_called = 1;
1442 1443 1444
	ret = call_qop(q, start_streaming, q,
		       atomic_read(&q->owned_by_drv_count));
	if (!ret)
1445
		return 0;
1446

1447 1448
	q->start_streaming_called = 0;

H
Hans Verkuil 已提交
1449
	dprintk(1, "driver refused to start streaming\n");
1450 1451 1452
	/*
	 * If you see this warning, then the driver isn't cleaning up properly
	 * after a failed start_streaming(). See the start_streaming()
1453
	 * documentation in videobuf2-core.h for more information how buffers
1454 1455
	 * should be returned to vb2 in start_streaming().
	 */
1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
	if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
		unsigned i;

		/*
		 * Forcefully reclaim buffers if the driver did not
		 * correctly return them to vb2.
		 */
		for (i = 0; i < q->num_buffers; ++i) {
			vb = q->bufs[i];
			if (vb->state == VB2_BUF_STATE_ACTIVE)
				vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
		}
		/* Must be zero now */
		WARN_ON(atomic_read(&q->owned_by_drv_count));
1470
	}
1471 1472 1473 1474 1475 1476
	/*
	 * If done_list is not empty, then start_streaming() didn't call
	 * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or
	 * STATE_DONE.
	 */
	WARN_ON(!list_empty(&q->done_list));
1477 1478 1479
	return ret;
}

1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
/**
 * vb2_core_qbuf() - Queue a buffer from userspace
 * @q:		videobuf2 queue
 * @index:	id number of the buffer
 * @pb:		buffer structure passed from userspace to vidioc_qbuf handler
 *		in driver
 *
 * Should be called from vidioc_qbuf ioctl handler of a driver.
 * The passed buffer should have been verified.
 * This function:
 * 1) if necessary, calls buf_prepare callback in the driver (if provided), in
 *    which driver-specific buffer initialization can be performed,
 * 2) if streaming is on, queues the buffer in driver by the means of buf_queue
 *    callback for processing.
 *
 * The return values from this function are intended to be directly returned
 * from vidioc_qbuf handler in driver.
 */
1498
int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
1499
{
1500
	struct vb2_buffer *vb;
1501
	int ret;
1502

1503
	vb = q->bufs[index];
1504

1505 1506
	switch (vb->state) {
	case VB2_BUF_STATE_DEQUEUED:
1507
		ret = __buf_prepare(vb, pb);
1508
		if (ret)
1509
			return ret;
1510
		break;
1511 1512
	case VB2_BUF_STATE_PREPARED:
		break;
1513
	case VB2_BUF_STATE_PREPARING:
H
Hans Verkuil 已提交
1514
		dprintk(1, "buffer still being prepared\n");
1515
		return -EINVAL;
1516
	default:
H
Hans Verkuil 已提交
1517
		dprintk(1, "invalid buffer state %d\n", vb->state);
1518
		return -EINVAL;
1519 1520 1521 1522 1523 1524 1525
	}

	/*
	 * Add to the queued buffers list, a buffer will stay on it until
	 * dequeued in dqbuf.
	 */
	list_add_tail(&vb->queued_entry, &q->queued_list);
1526
	q->queued_count++;
1527
	q->waiting_for_buffers = false;
1528
	vb->state = VB2_BUF_STATE_QUEUED;
1529

1530
	call_void_bufop(q, copy_timestamp, vb, pb);
1531

1532 1533
	trace_vb2_qbuf(q, vb);

1534 1535 1536 1537
	/*
	 * If already streaming, give the buffer to driver for processing.
	 * If not, the buffer will be given to driver on next streamon.
	 */
1538
	if (q->start_streaming_called)
1539 1540
		__enqueue_in_driver(vb);

1541
	/* Fill buffer information for the userspace */
1542
	call_void_bufop(q, fill_user_buffer, vb, pb);
1543

1544 1545 1546 1547 1548 1549 1550 1551
	/*
	 * If streamon has been called, and we haven't yet called
	 * start_streaming() since not enough buffers were queued, and
	 * we now have reached the minimum number of queued buffers,
	 * then we can finally call start_streaming().
	 */
	if (q->streaming && !q->start_streaming_called &&
	    q->queued_count >= q->min_buffers_needed) {
1552 1553 1554 1555 1556
		ret = vb2_start_streaming(q);
		if (ret)
			return ret;
	}

1557
	dprintk(1, "qbuf of buffer %d succeeded\n", vb->index);
1558
	return 0;
1559
}
1560
EXPORT_SYMBOL_GPL(vb2_core_qbuf);
1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582

/**
 * __vb2_wait_for_done_vb() - wait for a buffer to become available
 * for dequeuing
 *
 * Will sleep if required for nonblocking == false.
 */
static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
{
	/*
	 * All operations on vb_done_list are performed under done_lock
	 * spinlock protection. However, buffers may be removed from
	 * it and returned to userspace only while holding both driver's
	 * lock and the done_lock spinlock. Thus we can be sure that as
	 * long as we hold the driver's lock, the list will remain not
	 * empty if list_empty() check succeeds.
	 */

	for (;;) {
		int ret;

		if (!q->streaming) {
1583
			dprintk(1, "streaming off, will not wait for buffers\n");
1584 1585 1586
			return -EINVAL;
		}

1587 1588 1589 1590 1591
		if (q->error) {
			dprintk(1, "Queue in error state, will not wait for buffers\n");
			return -EIO;
		}

1592 1593 1594 1595 1596
		if (q->last_buffer_dequeued) {
			dprintk(3, "last buffer dequeued already, will not wait for buffers\n");
			return -EPIPE;
		}

1597 1598 1599 1600 1601 1602 1603 1604
		if (!list_empty(&q->done_list)) {
			/*
			 * Found a buffer that we were waiting for.
			 */
			break;
		}

		if (nonblocking) {
1605
			dprintk(1, "nonblocking and no buffers to dequeue, "
1606 1607 1608 1609 1610 1611 1612 1613 1614
								"will not wait\n");
			return -EAGAIN;
		}

		/*
		 * We are streaming and blocking, wait for another buffer to
		 * become ready or for streamoff. Driver's lock is released to
		 * allow streamoff or qbuf to be called while waiting.
		 */
1615
		call_void_qop(q, wait_prepare, q);
1616 1617 1618 1619

		/*
		 * All locks have been released, it is safe to sleep now.
		 */
1620
		dprintk(3, "will sleep waiting for buffers\n");
1621
		ret = wait_event_interruptible(q->done_wq,
1622 1623
				!list_empty(&q->done_list) || !q->streaming ||
				q->error);
1624 1625 1626 1627 1628

		/*
		 * We need to reevaluate both conditions again after reacquiring
		 * the locks or return an error if one occurred.
		 */
1629
		call_void_qop(q, wait_finish, q);
1630
		if (ret) {
1631
			dprintk(1, "sleep was interrupted\n");
1632
			return ret;
1633
		}
1634 1635 1636 1637 1638 1639 1640 1641 1642 1643
	}
	return 0;
}

/**
 * __vb2_get_done_vb() - get a buffer ready for dequeuing
 *
 * Will sleep if required for nonblocking == false.
 */
static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
1644
				int nonblocking)
1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661
{
	unsigned long flags;
	int ret;

	/*
	 * Wait for at least one buffer to become available on the done_list.
	 */
	ret = __vb2_wait_for_done_vb(q, nonblocking);
	if (ret)
		return ret;

	/*
	 * Driver's lock has been held since we last verified that done_list
	 * is not empty, so no need for another list_empty(done_list) check.
	 */
	spin_lock_irqsave(&q->done_lock, flags);
	*vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
1662 1663 1664
	/*
	 * Only remove the buffer from done_list if v4l2_buffer can handle all
	 * the planes.
1665 1666
	 * Verifying planes is NOT necessary since it already has been checked
	 * before the buffer is queued/prepared. So it can never fail.
1667
	 */
1668
	list_del(&(*vb)->done_entry);
1669 1670
	spin_unlock_irqrestore(&q->done_lock, flags);

1671
	return ret;
1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685
}

/**
 * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2
 * @q:		videobuf2 queue
 *
 * This function will wait until all buffers that have been given to the driver
 * by buf_queue() are given back to vb2 with vb2_buffer_done(). It doesn't call
 * wait_prepare, wait_finish pair. It is intended to be called with all locks
 * taken, for example from stop_streaming() callback.
 */
int vb2_wait_for_all_buffers(struct vb2_queue *q)
{
	if (!q->streaming) {
1686
		dprintk(1, "streaming off, will not wait for buffers\n");
1687 1688 1689
		return -EINVAL;
	}

1690
	if (q->start_streaming_called)
1691
		wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));
1692 1693 1694 1695
	return 0;
}
EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);

1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710
/**
 * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
 */
static void __vb2_dqbuf(struct vb2_buffer *vb)
{
	struct vb2_queue *q = vb->vb2_queue;
	unsigned int i;

	/* nothing to do if the buffer is already dequeued */
	if (vb->state == VB2_BUF_STATE_DEQUEUED)
		return;

	vb->state = VB2_BUF_STATE_DEQUEUED;

	/* unmap DMABUF buffer */
1711
	if (q->memory == VB2_MEMORY_DMABUF)
1712 1713 1714
		for (i = 0; i < vb->num_planes; ++i) {
			if (!vb->planes[i].dbuf_mapped)
				continue;
1715
			call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
1716 1717 1718 1719
			vb->planes[i].dbuf_mapped = 0;
		}
}

1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740
/**
 * vb2_dqbuf() - Dequeue a buffer to the userspace
 * @q:		videobuf2 queue
 * @pb:		buffer structure passed from userspace to vidioc_dqbuf handler
 *		in driver
 * @nonblocking: if true, this call will not sleep waiting for a buffer if no
 *		 buffers ready for dequeuing are present. Normally the driver
 *		 would be passing (file->f_flags & O_NONBLOCK) here
 *
 * Should be called from vidioc_dqbuf ioctl handler of a driver.
 * The passed buffer should have been verified.
 * This function:
 * 1) calls buf_finish callback in the driver (if provided), in which
 *    driver can perform any additional operations that may be required before
 *    returning the buffer to userspace, such as cache sync,
 * 2) the buffer struct members are filled with relevant information for
 *    the userspace.
 *
 * The return values from this function are intended to be directly returned
 * from vidioc_dqbuf handler in driver.
 */
1741
int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking)
1742 1743 1744 1745
{
	struct vb2_buffer *vb = NULL;
	int ret;

1746
	ret = __vb2_get_done_vb(q, &vb, nonblocking);
1747
	if (ret < 0)
1748 1749 1750 1751
		return ret;

	switch (vb->state) {
	case VB2_BUF_STATE_DONE:
1752
		dprintk(3, "returning done buffer\n");
1753 1754
		break;
	case VB2_BUF_STATE_ERROR:
1755
		dprintk(3, "returning done buffer with errors\n");
1756 1757
		break;
	default:
1758
		dprintk(1, "invalid buffer state\n");
1759 1760 1761
		return -EINVAL;
	}

1762
	call_void_vb_qop(vb, buf_finish, vb);
1763

1764
	/* Fill buffer information for the userspace */
1765
	call_void_bufop(q, fill_user_buffer, vb, pb);
1766

1767 1768
	/* Remove from videobuf queue */
	list_del(&vb->queued_entry);
1769
	q->queued_count--;
1770 1771 1772

	trace_vb2_dqbuf(q, vb);

1773 1774
	/* go back to dequeued state */
	__vb2_dqbuf(vb);
1775 1776

	dprintk(1, "dqbuf of buffer %d, with state %d\n",
1777
			vb->index, vb->state);
1778 1779

	return 0;
1780 1781

}
1782
EXPORT_SYMBOL_GPL(vb2_core_dqbuf);
1783

1784 1785 1786 1787 1788 1789 1790
/**
 * __vb2_queue_cancel() - cancel and stop (pause) streaming
 *
 * Removes all queued buffers from driver's queue and all buffers queued by
 * userspace from videobuf's queue. Returns to state after reqbufs.
 */
static void __vb2_queue_cancel(struct vb2_queue *q)
1791
{
1792
	unsigned int i;
1793 1794 1795 1796 1797

	/*
	 * Tell driver to stop all transactions and release all queued
	 * buffers.
	 */
1798
	if (q->start_streaming_called)
1799
		call_void_qop(q, stop_streaming, q);
1800

1801 1802 1803
	/*
	 * If you see this warning, then the driver isn't cleaning up properly
	 * in stop_streaming(). See the stop_streaming() documentation in
1804
	 * videobuf2-core.h for more information how buffers should be returned
1805 1806
	 * to vb2 in stop_streaming().
	 */
1807 1808 1809 1810 1811 1812 1813
	if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
		for (i = 0; i < q->num_buffers; ++i)
			if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE)
				vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR);
		/* Must be zero now */
		WARN_ON(atomic_read(&q->owned_by_drv_count));
	}
1814

1815 1816 1817
	q->streaming = 0;
	q->start_streaming_called = 0;
	q->queued_count = 0;
1818
	q->error = 0;
1819

1820 1821 1822 1823 1824 1825 1826 1827 1828
	/*
	 * Remove all buffers from videobuf's list...
	 */
	INIT_LIST_HEAD(&q->queued_list);
	/*
	 * ...and done list; userspace will not receive any buffers it
	 * has not already dequeued before initiating cancel.
	 */
	INIT_LIST_HEAD(&q->done_list);
1829
	atomic_set(&q->owned_by_drv_count, 0);
1830 1831 1832 1833
	wake_up_all(&q->done_wq);

	/*
	 * Reinitialize all buffers for next use.
1834 1835 1836 1837 1838 1839
	 * Make sure to call buf_finish for any queued buffers. Normally
	 * that's done in dqbuf, but that's not going to happen when we
	 * cancel the whole queue. Note: this code belongs here, not in
	 * __vb2_dqbuf() since in vb2_internal_dqbuf() there is a critical
	 * call to __fill_v4l2_buffer() after buf_finish(). That order can't
	 * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
1840
	 */
1841 1842 1843 1844 1845
	for (i = 0; i < q->num_buffers; ++i) {
		struct vb2_buffer *vb = q->bufs[i];

		if (vb->state != VB2_BUF_STATE_DEQUEUED) {
			vb->state = VB2_BUF_STATE_PREPARED;
1846
			call_void_vb_qop(vb, buf_finish, vb);
1847 1848 1849
		}
		__vb2_dqbuf(vb);
	}
1850 1851
}

1852
int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
1853
{
1854
	int ret;
1855 1856

	if (type != q->type) {
H
Hans Verkuil 已提交
1857
		dprintk(1, "invalid stream type\n");
1858 1859 1860 1861
		return -EINVAL;
	}

	if (q->streaming) {
H
Hans Verkuil 已提交
1862
		dprintk(3, "already streaming\n");
1863
		return 0;
1864 1865
	}

1866
	if (!q->num_buffers) {
H
Hans Verkuil 已提交
1867
		dprintk(1, "no buffers have been allocated\n");
1868 1869 1870
		return -EINVAL;
	}

1871
	if (q->num_buffers < q->min_buffers_needed) {
H
Hans Verkuil 已提交
1872
		dprintk(1, "need at least %u allocated buffers\n",
1873 1874 1875
				q->min_buffers_needed);
		return -EINVAL;
	}
1876

1877
	/*
1878 1879
	 * Tell driver to start streaming provided sufficient buffers
	 * are available.
1880
	 */
1881 1882 1883 1884 1885 1886
	if (q->queued_count >= q->min_buffers_needed) {
		ret = vb2_start_streaming(q);
		if (ret) {
			__vb2_queue_cancel(q);
			return ret;
		}
1887 1888 1889
	}

	q->streaming = 1;
1890

H
Hans Verkuil 已提交
1891
	dprintk(3, "successful\n");
1892 1893
	return 0;
}
1894
EXPORT_SYMBOL_GPL(vb2_core_streamon);
1895

1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916
/**
 * vb2_queue_error() - signal a fatal error on the queue
 * @q:		videobuf2 queue
 *
 * Flag that a fatal unrecoverable error has occurred and wake up all processes
 * waiting on the queue. Polling will now set POLLERR and queuing and dequeuing
 * buffers will return -EIO.
 *
 * The error flag will be cleared when cancelling the queue, either from
 * vb2_streamoff or vb2_queue_release. Drivers should thus not call this
 * function before starting the stream, otherwise the error flag will remain set
 * until the queue is released when closing the device node.
 */
void vb2_queue_error(struct vb2_queue *q)
{
	q->error = 1;

	wake_up_all(&q->done_wq);
}
EXPORT_SYMBOL_GPL(vb2_queue_error);

1917
int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
1918
{
1919
	if (type != q->type) {
H
Hans Verkuil 已提交
1920
		dprintk(1, "invalid stream type\n");
1921 1922 1923 1924 1925 1926
		return -EINVAL;
	}

	/*
	 * Cancel will pause streaming and remove all buffers from the driver
	 * and videobuf, effectively returning control over them to userspace.
1927 1928 1929 1930 1931
	 *
	 * Note that we do this even if q->streaming == 0: if you prepare or
	 * queue buffers, and then call streamoff without ever having called
	 * streamon, you would still expect those buffers to be returned to
	 * their normal dequeued state.
1932 1933
	 */
	__vb2_queue_cancel(q);
1934
	q->waiting_for_buffers = !q->is_output;
1935
	q->last_buffer_dequeued = false;
1936

H
Hans Verkuil 已提交
1937
	dprintk(3, "successful\n");
1938 1939
	return 0;
}
1940
EXPORT_SYMBOL_GPL(vb2_core_streamoff);
1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959

/**
 * __find_plane_by_offset() - find plane associated with the given offset off
 */
static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
			unsigned int *_buffer, unsigned int *_plane)
{
	struct vb2_buffer *vb;
	unsigned int buffer, plane;

	/*
	 * Go over all buffers and their planes, comparing the given offset
	 * with an offset assigned to each plane. If a match is found,
	 * return its buffer and plane numbers.
	 */
	for (buffer = 0; buffer < q->num_buffers; ++buffer) {
		vb = q->bufs[buffer];

		for (plane = 0; plane < vb->num_planes; ++plane) {
1960
			if (vb->planes[plane].m.offset == off) {
1961 1962 1963 1964 1965 1966 1967 1968 1969 1970
				*_buffer = buffer;
				*_plane = plane;
				return 0;
			}
		}
	}

	return -EINVAL;
}

1971
/**
1972
 * vb2_core_expbuf() - Export a buffer as a file descriptor
1973
 * @q:		videobuf2 queue
1974 1975 1976 1977 1978 1979
 * @fd:		file descriptor associated with DMABUF (set by driver) *
 * @type:	buffer type
 * @index:	id number of the buffer
 * @plane:	index of the plane to be exported, 0 for single plane queues
 * @flags:	flags for newly created file, currently only O_CLOEXEC is
 *		supported, refer to manual of open syscall for more details
1980 1981 1982 1983
 *
 * The return values from this function are intended to be directly returned
 * from vidioc_expbuf handler in driver.
 */
1984
int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
1985
		unsigned int index, unsigned int plane, unsigned int flags)
1986 1987 1988 1989 1990 1991
{
	struct vb2_buffer *vb = NULL;
	struct vb2_plane *vb_plane;
	int ret;
	struct dma_buf *dbuf;

1992
	if (q->memory != VB2_MEMORY_MMAP) {
1993
		dprintk(1, "queue is not currently set up for mmap\n");
1994 1995 1996 1997
		return -EINVAL;
	}

	if (!q->mem_ops->get_dmabuf) {
1998
		dprintk(1, "queue does not support DMA buffer exporting\n");
1999 2000 2001
		return -EINVAL;
	}

2002
	if (flags & ~(O_CLOEXEC | O_ACCMODE)) {
2003
		dprintk(1, "queue does support only O_CLOEXEC and access mode flags\n");
2004 2005 2006
		return -EINVAL;
	}

2007
	if (type != q->type) {
H
Hans Verkuil 已提交
2008
		dprintk(1, "invalid buffer type\n");
2009 2010 2011
		return -EINVAL;
	}

2012
	if (index >= q->num_buffers) {
2013 2014 2015 2016
		dprintk(1, "buffer index out of range\n");
		return -EINVAL;
	}

2017
	vb = q->bufs[index];
2018

2019
	if (plane >= vb->num_planes) {
2020 2021 2022 2023
		dprintk(1, "buffer plane out of range\n");
		return -EINVAL;
	}

2024 2025 2026 2027 2028
	if (vb2_fileio_is_active(q)) {
		dprintk(1, "expbuf: file io in progress\n");
		return -EBUSY;
	}

2029
	vb_plane = &vb->planes[plane];
2030

2031 2032
	dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv,
				flags & O_ACCMODE);
2033
	if (IS_ERR_OR_NULL(dbuf)) {
2034
		dprintk(1, "failed to export buffer %d, plane %d\n",
2035
			index, plane);
2036 2037 2038
		return -EINVAL;
	}

2039
	ret = dma_buf_fd(dbuf, flags & ~O_ACCMODE);
2040 2041
	if (ret < 0) {
		dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
2042
			index, plane, ret);
2043 2044 2045 2046 2047
		dma_buf_put(dbuf);
		return ret;
	}

	dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
2048 2049
		index, plane, ret);
	*fd = ret;
2050 2051 2052

	return 0;
}
2053
EXPORT_SYMBOL_GPL(vb2_core_expbuf);
2054

2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077
/**
 * vb2_mmap() - map video buffers into application address space
 * @q:		videobuf2 queue
 * @vma:	vma passed to the mmap file operation handler in the driver
 *
 * Should be called from mmap file operation handler of a driver.
 * This function maps one plane of one of the available video buffers to
 * userspace. To map whole video memory allocated on reqbufs, this function
 * has to be called once per each plane per each buffer previously allocated.
 *
 * When the userspace application calls mmap, it passes to it an offset returned
 * to it earlier by the means of vidioc_querybuf handler. That offset acts as
 * a "cookie", which is then used to identify the plane to be mapped.
 * This function finds a plane with a matching offset and a mapping is performed
 * by the means of a provided memory operation.
 *
 * The return values from this function are intended to be directly returned
 * from the mmap handler in driver.
 */
int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
{
	unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
	struct vb2_buffer *vb;
H
Hans Verkuil 已提交
2078
	unsigned int buffer = 0, plane = 0;
2079
	int ret;
2080
	unsigned long length;
2081

2082
	if (q->memory != VB2_MEMORY_MMAP) {
2083
		dprintk(1, "queue is not currently set up for mmap\n");
2084 2085 2086 2087 2088 2089 2090
		return -EINVAL;
	}

	/*
	 * Check memory area access mode.
	 */
	if (!(vma->vm_flags & VM_SHARED)) {
2091
		dprintk(1, "invalid vma flags, VM_SHARED needed\n");
2092 2093
		return -EINVAL;
	}
2094
	if (q->is_output) {
2095
		if (!(vma->vm_flags & VM_WRITE)) {
2096
			dprintk(1, "invalid vma flags, VM_WRITE needed\n");
2097 2098 2099 2100
			return -EINVAL;
		}
	} else {
		if (!(vma->vm_flags & VM_READ)) {
2101
			dprintk(1, "invalid vma flags, VM_READ needed\n");
2102 2103 2104
			return -EINVAL;
		}
	}
2105 2106 2107 2108
	if (vb2_fileio_is_active(q)) {
		dprintk(1, "mmap: file io in progress\n");
		return -EBUSY;
	}
2109 2110 2111 2112 2113 2114 2115 2116 2117 2118

	/*
	 * Find the plane corresponding to the offset passed by userspace.
	 */
	ret = __find_plane_by_offset(q, off, &buffer, &plane);
	if (ret)
		return ret;

	vb = q->bufs[buffer];

2119 2120 2121 2122 2123
	/*
	 * MMAP requires page_aligned buffers.
	 * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
	 * so, we need to do the same here.
	 */
2124
	length = PAGE_ALIGN(vb->planes[plane].length);
2125 2126 2127
	if (length < (vma->vm_end - vma->vm_start)) {
		dprintk(1,
			"MMAP invalid, as it would overflow buffer length\n");
2128 2129 2130
		return -EINVAL;
	}

2131
	mutex_lock(&q->mmap_lock);
2132
	ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
2133
	mutex_unlock(&q->mmap_lock);
2134
	if (ret)
2135 2136
		return ret;

2137
	dprintk(3, "buffer %d, plane %d successfully mapped\n", buffer, plane);
2138 2139 2140 2141
	return 0;
}
EXPORT_SYMBOL_GPL(vb2_mmap);

2142 2143 2144 2145 2146 2147 2148 2149 2150 2151
#ifndef CONFIG_MMU
unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
				    unsigned long addr,
				    unsigned long len,
				    unsigned long pgoff,
				    unsigned long flags)
{
	unsigned long off = pgoff << PAGE_SHIFT;
	struct vb2_buffer *vb;
	unsigned int buffer, plane;
2152
	void *vaddr;
2153 2154
	int ret;

2155
	if (q->memory != VB2_MEMORY_MMAP) {
2156
		dprintk(1, "queue is not currently set up for mmap\n");
2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168
		return -EINVAL;
	}

	/*
	 * Find the plane corresponding to the offset passed by userspace.
	 */
	ret = __find_plane_by_offset(q, off, &buffer, &plane);
	if (ret)
		return ret;

	vb = q->bufs[buffer];

2169 2170
	vaddr = vb2_plane_vaddr(vb, plane);
	return vaddr ? (unsigned long)vaddr : -EINVAL;
2171 2172 2173 2174
}
EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
#endif

2175
/**
2176
 * vb2_core_queue_init() - initialize a videobuf2 queue
2177 2178 2179 2180 2181 2182
 * @q:		videobuf2 queue; this structure should be allocated in driver
 *
 * The vb2_queue structure should be allocated by the driver. The driver is
 * responsible of clearing it's content and setting initial values for some
 * required entries before calling this function.
 * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
2183
 * to the struct vb2_queue description in include/media/videobuf2-core.h
2184 2185
 * for more information.
 */
2186
int vb2_core_queue_init(struct vb2_queue *q)
2187
{
2188 2189 2190 2191 2192 2193 2194 2195 2196
	/*
	 * Sanity check
	 */
	if (WARN_ON(!q)			  ||
	    WARN_ON(!q->ops)		  ||
	    WARN_ON(!q->mem_ops)	  ||
	    WARN_ON(!q->type)		  ||
	    WARN_ON(!q->io_modes)	  ||
	    WARN_ON(!q->ops->queue_setup) ||
2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210
	    WARN_ON(!q->ops->buf_queue))
		return -EINVAL;

	INIT_LIST_HEAD(&q->queued_list);
	INIT_LIST_HEAD(&q->done_list);
	spin_lock_init(&q->done_lock);
	mutex_init(&q->mmap_lock);
	init_waitqueue_head(&q->done_wq);

	if (q->buf_struct_size == 0)
		q->buf_struct_size = sizeof(struct vb2_buffer);

	return 0;
}
2211
EXPORT_SYMBOL_GPL(vb2_core_queue_init);
2212

2213 2214
static int __vb2_init_fileio(struct vb2_queue *q, int read);
static int __vb2_cleanup_fileio(struct vb2_queue *q);
2215
/**
2216
 * vb2_core_queue_release() - stop streaming, release the queue and free memory
2217 2218 2219 2220 2221 2222
 * @q:		videobuf2 queue
 *
 * This function stops streaming and performs necessary clean ups, including
 * freeing video buffer memory. The driver is responsible for freeing
 * the vb2_queue structure itself.
 */
2223
void vb2_core_queue_release(struct vb2_queue *q)
2224
{
2225
	__vb2_cleanup_fileio(q);
2226
	__vb2_queue_cancel(q);
2227
	mutex_lock(&q->mmap_lock);
2228
	__vb2_queue_free(q, q->num_buffers);
2229
	mutex_unlock(&q->mmap_lock);
2230
}
2231
EXPORT_SYMBOL_GPL(vb2_core_queue_release);
2232

2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843
/**
 * vb2_core_poll() - implements poll userspace operation
 * @q:		videobuf2 queue
 * @file:	file argument passed to the poll file operation handler
 * @wait:	wait argument passed to the poll file operation handler
 *
 * This function implements poll file operation handler for a driver.
 * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
 * be informed that the file descriptor of a video device is available for
 * reading.
 * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
 * will be reported as available for writing.
 *
 * The return values from this function are intended to be directly returned
 * from poll handler in driver.
 */
unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
		poll_table *wait)
{
	unsigned long req_events = poll_requested_events(wait);
	struct vb2_buffer *vb = NULL;
	unsigned long flags;

	if (!q->is_output && !(req_events & (POLLIN | POLLRDNORM)))
		return 0;
	if (q->is_output && !(req_events & (POLLOUT | POLLWRNORM)))
		return 0;

	/*
	 * Start file I/O emulator only if streaming API has not been used yet.
	 */
	if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
		if (!q->is_output && (q->io_modes & VB2_READ) &&
				(req_events & (POLLIN | POLLRDNORM))) {
			if (__vb2_init_fileio(q, 1))
				return POLLERR;
		}
		if (q->is_output && (q->io_modes & VB2_WRITE) &&
				(req_events & (POLLOUT | POLLWRNORM))) {
			if (__vb2_init_fileio(q, 0))
				return POLLERR;
			/*
			 * Write to OUTPUT queue can be done immediately.
			 */
			return POLLOUT | POLLWRNORM;
		}
	}

	/*
	 * There is nothing to wait for if the queue isn't streaming, or if the
	 * error flag is set.
	 */
	if (!vb2_is_streaming(q) || q->error)
		return POLLERR;

	/*
	 * For output streams you can call write() as long as there are fewer
	 * buffers queued than there are buffers available.
	 */
	if (q->is_output && q->fileio && q->queued_count < q->num_buffers)
		return POLLOUT | POLLWRNORM;

	if (list_empty(&q->done_list)) {
		/*
		 * If the last buffer was dequeued from a capture queue,
		 * return immediately. DQBUF will return -EPIPE.
		 */
		if (q->last_buffer_dequeued)
			return POLLIN | POLLRDNORM;

		poll_wait(file, &q->done_wq, wait);
	}

	/*
	 * Take first buffer available for dequeuing.
	 */
	spin_lock_irqsave(&q->done_lock, flags);
	if (!list_empty(&q->done_list))
		vb = list_first_entry(&q->done_list, struct vb2_buffer,
					done_entry);
	spin_unlock_irqrestore(&q->done_lock, flags);

	if (vb && (vb->state == VB2_BUF_STATE_DONE
			|| vb->state == VB2_BUF_STATE_ERROR)) {
		return (q->is_output) ?
				POLLOUT | POLLWRNORM :
				POLLIN | POLLRDNORM;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(vb2_core_poll);

/**
 * struct vb2_fileio_buf - buffer context used by file io emulator
 *
 * vb2 provides a compatibility layer and emulator of file io (read and
 * write) calls on top of streaming API. This structure is used for
 * tracking context related to the buffers.
 */
struct vb2_fileio_buf {
	void *vaddr;
	unsigned int size;
	unsigned int pos;
	unsigned int queued:1;
};

/**
 * struct vb2_fileio_data - queue context used by file io emulator
 *
 * @cur_index:	the index of the buffer currently being read from or
 *		written to. If equal to q->num_buffers then a new buffer
 *		must be dequeued.
 * @initial_index: in the read() case all buffers are queued up immediately
 *		in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
 *		buffers. However, in the write() case no buffers are initially
 *		queued, instead whenever a buffer is full it is queued up by
 *		__vb2_perform_fileio(). Only once all available buffers have
 *		been queued up will __vb2_perform_fileio() start to dequeue
 *		buffers. This means that initially __vb2_perform_fileio()
 *		needs to know what buffer index to use when it is queuing up
 *		the buffers for the first time. That initial index is stored
 *		in this field. Once it is equal to q->num_buffers all
 *		available buffers have been queued and __vb2_perform_fileio()
 *		should start the normal dequeue/queue cycle.
 *
 * vb2 provides a compatibility layer and emulator of file io (read and
 * write) calls on top of streaming API. For proper operation it required
 * this structure to save the driver state between each call of the read
 * or write function.
 */
struct vb2_fileio_data {
	unsigned int count;
	unsigned int type;
	unsigned int memory;
	struct vb2_buffer *b;
	struct vb2_fileio_buf bufs[VB2_MAX_FRAME];
	unsigned int cur_index;
	unsigned int initial_index;
	unsigned int q_count;
	unsigned int dq_count;
	unsigned read_once:1;
	unsigned write_immediately:1;
};

/**
 * __vb2_init_fileio() - initialize file io emulator
 * @q:		videobuf2 queue
 * @read:	mode selector (1 means read, 0 means write)
 */
static int __vb2_init_fileio(struct vb2_queue *q, int read)
{
	struct vb2_fileio_data *fileio;
	int i, ret;
	unsigned int count = 0;

	/*
	 * Sanity check
	 */
	if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
		    (!read && !(q->io_modes & VB2_WRITE))))
		return -EINVAL;

	/*
	 * Check if device supports mapping buffers to kernel virtual space.
	 */
	if (!q->mem_ops->vaddr)
		return -EBUSY;

	/*
	 * Check if streaming api has not been already activated.
	 */
	if (q->streaming || q->num_buffers > 0)
		return -EBUSY;

	/*
	 * Start with count 1, driver can increase it in queue_setup()
	 */
	count = 1;

	dprintk(3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n",
		(read) ? "read" : "write", count, q->fileio_read_once,
		q->fileio_write_immediately);

	fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
	if (fileio == NULL)
		return -ENOMEM;

	fileio->b = kzalloc(q->buf_struct_size, GFP_KERNEL);
	if (fileio->b == NULL)
		return -ENOMEM;

	fileio->read_once = q->fileio_read_once;
	fileio->write_immediately = q->fileio_write_immediately;

	/*
	 * Request buffers and use MMAP type to force driver
	 * to allocate buffers by itself.
	 */
	fileio->count = count;
	fileio->memory = VB2_MEMORY_MMAP;
	fileio->type = q->type;
	q->fileio = fileio;
	ret = vb2_core_reqbufs(q, fileio->memory, &fileio->count);
	if (ret)
		goto err_kfree;

	/*
	 * Check if plane_count is correct
	 * (multiplane buffers are not supported).
	 */
	if (q->bufs[0]->num_planes != 1) {
		ret = -EBUSY;
		goto err_reqbufs;
	}

	/*
	 * Get kernel address of each buffer.
	 */
	for (i = 0; i < q->num_buffers; i++) {
		fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
		if (fileio->bufs[i].vaddr == NULL) {
			ret = -EINVAL;
			goto err_reqbufs;
		}
		fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
	}

	/*
	 * Read mode requires pre queuing of all buffers.
	 */
	if (read) {
		/*
		 * Queue all buffers.
		 */
		for (i = 0; i < q->num_buffers; i++) {
			struct vb2_buffer *b = fileio->b;

			memset(b, 0, q->buf_struct_size);
			b->type = q->type;
			b->memory = q->memory;
			b->index = i;
			ret = vb2_core_qbuf(q, i, b);
			if (ret)
				goto err_reqbufs;
			fileio->bufs[i].queued = 1;
		}
		/*
		 * All buffers have been queued, so mark that by setting
		 * initial_index to q->num_buffers
		 */
		fileio->initial_index = q->num_buffers;
		fileio->cur_index = q->num_buffers;
	}

	/*
	 * Start streaming.
	 */
	ret = vb2_core_streamon(q, q->type);
	if (ret)
		goto err_reqbufs;

	return ret;

err_reqbufs:
	fileio->count = 0;
	vb2_core_reqbufs(q, fileio->memory, &fileio->count);

err_kfree:
	q->fileio = NULL;
	kfree(fileio);
	return ret;
}

/**
 * __vb2_cleanup_fileio() - free resourced used by file io emulator
 * @q:		videobuf2 queue
 */
static int __vb2_cleanup_fileio(struct vb2_queue *q)
{
	struct vb2_fileio_data *fileio = q->fileio;

	if (fileio) {
		vb2_core_streamoff(q, q->type);
		q->fileio = NULL;
		fileio->count = 0;
		vb2_core_reqbufs(q, fileio->memory, &fileio->count);
		kfree(fileio->b);
		kfree(fileio);
		dprintk(3, "file io emulator closed\n");
	}
	return 0;
}

/**
 * __vb2_perform_fileio() - perform a single file io (read or write) operation
 * @q:		videobuf2 queue
 * @data:	pointed to target userspace buffer
 * @count:	number of bytes to read or write
 * @ppos:	file handle position tracking pointer
 * @nonblock:	mode selector (1 means blocking calls, 0 means nonblocking)
 * @read:	access mode selector (1 means read, 0 means write)
 */
static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
		loff_t *ppos, int nonblock, int read)
{
	struct vb2_fileio_data *fileio;
	struct vb2_fileio_buf *buf;
	bool is_multiplanar = q->is_multiplanar;
	/*
	 * When using write() to write data to an output video node the vb2 core
	 * should copy timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody
	 * else is able to provide this information with the write() operation.
	 */
	bool copy_timestamp = !read && q->copy_timestamp;
	int ret, index;

	dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n",
		read ? "read" : "write", (long)*ppos, count,
		nonblock ? "non" : "");

	if (!data)
		return -EINVAL;

	/*
	 * Initialize emulator on first call.
	 */
	if (!vb2_fileio_is_active(q)) {
		ret = __vb2_init_fileio(q, read);
		dprintk(3, "vb2_init_fileio result: %d\n", ret);
		if (ret)
			return ret;
	}
	fileio = q->fileio;

	/*
	 * Check if we need to dequeue the buffer.
	 */
	index = fileio->cur_index;
	if (index >= q->num_buffers) {
		struct vb2_buffer *b = fileio->b;

		/*
		 * Call vb2_dqbuf to get buffer back.
		 */
		memset(b, 0, q->buf_struct_size);
		b->type = q->type;
		b->memory = q->memory;
		ret = vb2_core_dqbuf(q, b, nonblock);
		dprintk(5, "vb2_dqbuf result: %d\n", ret);
		if (ret)
			return ret;
		fileio->dq_count += 1;

		fileio->cur_index = index = b->index;
		buf = &fileio->bufs[index];

		/*
		 * Get number of bytes filled by the driver
		 */
		buf->pos = 0;
		buf->queued = 0;
		buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
				 : vb2_plane_size(q->bufs[index], 0);
		/* Compensate for data_offset on read in the multiplanar case. */
		if (is_multiplanar && read &&
				b->planes[0].data_offset < buf->size) {
			buf->pos = b->planes[0].data_offset;
			buf->size -= buf->pos;
		}
	} else {
		buf = &fileio->bufs[index];
	}

	/*
	 * Limit count on last few bytes of the buffer.
	 */
	if (buf->pos + count > buf->size) {
		count = buf->size - buf->pos;
		dprintk(5, "reducing read count: %zd\n", count);
	}

	/*
	 * Transfer data to userspace.
	 */
	dprintk(3, "copying %zd bytes - buffer %d, offset %u\n",
		count, index, buf->pos);
	if (read)
		ret = copy_to_user(data, buf->vaddr + buf->pos, count);
	else
		ret = copy_from_user(buf->vaddr + buf->pos, data, count);
	if (ret) {
		dprintk(3, "error copying data\n");
		return -EFAULT;
	}

	/*
	 * Update counters.
	 */
	buf->pos += count;
	*ppos += count;

	/*
	 * Queue next buffer if required.
	 */
	if (buf->pos == buf->size || (!read && fileio->write_immediately)) {
		struct vb2_buffer *b = fileio->b;

		/*
		 * Check if this is the last buffer to read.
		 */
		if (read && fileio->read_once && fileio->dq_count == 1) {
			dprintk(3, "read limit reached\n");
			return __vb2_cleanup_fileio(q);
		}

		/*
		 * Call vb2_qbuf and give buffer to the driver.
		 */
		memset(b, 0, q->buf_struct_size);
		b->type = q->type;
		b->memory = q->memory;
		b->index = index;
		b->planes[0].bytesused = buf->pos;

		if (copy_timestamp)
			b->timestamp = ktime_get_ns();
		ret = vb2_core_qbuf(q, index, b);
		dprintk(5, "vb2_dbuf result: %d\n", ret);
		if (ret)
			return ret;

		/*
		 * Buffer has been queued, update the status
		 */
		buf->pos = 0;
		buf->queued = 1;
		buf->size = vb2_plane_size(q->bufs[index], 0);
		fileio->q_count += 1;
		/*
		 * If we are queuing up buffers for the first time, then
		 * increase initial_index by one.
		 */
		if (fileio->initial_index < q->num_buffers)
			fileio->initial_index++;
		/*
		 * The next buffer to use is either a buffer that's going to be
		 * queued for the first time (initial_index < q->num_buffers)
		 * or it is equal to q->num_buffers, meaning that the next
		 * time we need to dequeue a buffer since we've now queued up
		 * all the 'first time' buffers.
		 */
		fileio->cur_index = fileio->initial_index;
	}

	/*
	 * Return proper number of bytes processed.
	 */
	if (ret == 0)
		ret = count;
	return ret;
}

size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
		loff_t *ppos, int nonblocking)
{
	return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
}
EXPORT_SYMBOL_GPL(vb2_read);

size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
		loff_t *ppos, int nonblocking)
{
	return __vb2_perform_fileio(q, (char __user *) data, count,
							ppos, nonblocking, 0);
}
EXPORT_SYMBOL_GPL(vb2_write);

struct vb2_threadio_data {
	struct task_struct *thread;
	vb2_thread_fnc fnc;
	void *priv;
	bool stop;
};

static int vb2_thread(void *data)
{
	struct vb2_queue *q = data;
	struct vb2_threadio_data *threadio = q->threadio;
	struct vb2_fileio_data *fileio = q->fileio;
	bool copy_timestamp = false;
	int prequeue = 0;
	int index = 0;
	int ret = 0;

	if (q->is_output) {
		prequeue = q->num_buffers;
		copy_timestamp = q->copy_timestamp;
	}

	set_freezable();

	for (;;) {
		struct vb2_buffer *vb;
		struct vb2_buffer *b = fileio->b;

		/*
		 * Call vb2_dqbuf to get buffer back.
		 */
		memset(b, 0, q->buf_struct_size);
		b->type = q->type;
		b->memory = q->memory;
		if (prequeue) {
			b->index = index++;
			prequeue--;
		} else {
			call_void_qop(q, wait_finish, q);
			if (!threadio->stop)
				ret = vb2_core_dqbuf(q, b, 0);
			call_void_qop(q, wait_prepare, q);
			dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
		}
		if (ret || threadio->stop)
			break;
		try_to_freeze();

		vb = q->bufs[b->index];
		if (b->state == VB2_BUF_STATE_DONE)
			if (threadio->fnc(vb, threadio->priv))
				break;
		call_void_qop(q, wait_finish, q);
		if (copy_timestamp)
			b->timestamp = ktime_get_ns();;
		if (!threadio->stop)
			ret = vb2_core_qbuf(q, b->index, b);
		call_void_qop(q, wait_prepare, q);
		if (ret || threadio->stop)
			break;
	}

	/* Hmm, linux becomes *very* unhappy without this ... */
	while (!kthread_should_stop()) {
		set_current_state(TASK_INTERRUPTIBLE);
		schedule();
	}
	return 0;
}

/*
 * This function should not be used for anything else but the videobuf2-dvb
 * support. If you think you have another good use-case for this, then please
 * contact the linux-media mailinglist first.
 */
int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
		     const char *thread_name)
{
	struct vb2_threadio_data *threadio;
	int ret = 0;

	if (q->threadio)
		return -EBUSY;
	if (vb2_is_busy(q))
		return -EBUSY;
	if (WARN_ON(q->fileio))
		return -EBUSY;

	threadio = kzalloc(sizeof(*threadio), GFP_KERNEL);
	if (threadio == NULL)
		return -ENOMEM;
	threadio->fnc = fnc;
	threadio->priv = priv;

	ret = __vb2_init_fileio(q, !q->is_output);
	dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
	if (ret)
		goto nomem;
	q->threadio = threadio;
	threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name);
	if (IS_ERR(threadio->thread)) {
		ret = PTR_ERR(threadio->thread);
		threadio->thread = NULL;
		goto nothread;
	}
	return 0;

nothread:
	__vb2_cleanup_fileio(q);
nomem:
	kfree(threadio);
	return ret;
}
EXPORT_SYMBOL_GPL(vb2_thread_start);

int vb2_thread_stop(struct vb2_queue *q)
{
	struct vb2_threadio_data *threadio = q->threadio;
	int err;

	if (threadio == NULL)
		return 0;
	threadio->stop = true;
	/* Wake up all pending sleeps in the thread */
	vb2_queue_error(q);
	err = kthread_stop(threadio->thread);
	__vb2_cleanup_fileio(q);
	threadio->thread = NULL;
	kfree(threadio);
	q->threadio = NULL;
	return err;
}
EXPORT_SYMBOL_GPL(vb2_thread_stop);

2844
MODULE_DESCRIPTION("Media buffer core framework");
2845
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
2846
MODULE_LICENSE("GPL");