提交 84200af0 编写于 作者: J John Cox 提交者: Zheng Zengkai

media: v4l2-mem2mem: allow request job buffer processing after job finish

raspberrypi inclusion
category: feature
bugzilla: 50432

--------------------------------

Allow the capture buffer to be detached from a v4l2 request job such
that another job can start before the capture buffer is returned. This
allows h/w codecs that can process multiple requests at the same time
to operate more efficiently.
Signed-off-by: NJohn Cox <jc@kynesim.co.uk>
Signed-off-by: NFang Yafen <yafen@iscas.ac.cn>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 8753e30c
...@@ -416,15 +416,18 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) ...@@ -416,15 +416,18 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
{ {
struct v4l2_m2m_dev *m2m_dev; struct v4l2_m2m_dev *m2m_dev;
unsigned long flags; unsigned long flags;
bool det_abort_req;
m2m_dev = m2m_ctx->m2m_dev; m2m_dev = m2m_ctx->m2m_dev;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags); spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
det_abort_req = !list_empty(&m2m_ctx->det_list);
m2m_ctx->job_flags |= TRANS_ABORT; m2m_ctx->job_flags |= TRANS_ABORT;
if (m2m_ctx->job_flags & TRANS_RUNNING) { if (m2m_ctx->job_flags & TRANS_RUNNING) {
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
if (m2m_dev->m2m_ops->job_abort) if (m2m_dev->m2m_ops->job_abort)
m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
det_abort_req = false;
dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx); dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx);
wait_event(m2m_ctx->finished, wait_event(m2m_ctx->finished,
!(m2m_ctx->job_flags & TRANS_RUNNING)); !(m2m_ctx->job_flags & TRANS_RUNNING));
...@@ -438,6 +441,11 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) ...@@ -438,6 +441,11 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
/* Do nothing, was not on queue/running */ /* Do nothing, was not on queue/running */
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
} }
/* Wait for detached buffers to come back too */
if (det_abort_req && m2m_dev->m2m_ops->job_abort)
m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
wait_event(m2m_ctx->det_empty, list_empty(&m2m_ctx->det_list));
} }
/* /*
...@@ -475,6 +483,7 @@ static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, ...@@ -475,6 +483,7 @@ static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
list_del(&m2m_dev->curr_ctx->queue); list_del(&m2m_dev->curr_ctx->queue);
m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
m2m_ctx->cap_detached = false;
wake_up(&m2m_dev->curr_ctx->finished); wake_up(&m2m_dev->curr_ctx->finished);
m2m_dev->curr_ctx = NULL; m2m_dev->curr_ctx = NULL;
return true; return true;
...@@ -502,6 +511,80 @@ void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, ...@@ -502,6 +511,80 @@ void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
} }
EXPORT_SYMBOL(v4l2_m2m_job_finish); EXPORT_SYMBOL(v4l2_m2m_job_finish);
struct vb2_v4l2_buffer *_v4l2_m2m_cap_buf_detach(struct v4l2_m2m_ctx *m2m_ctx)
{
struct vb2_v4l2_buffer *buf;
buf = v4l2_m2m_dst_buf_remove(m2m_ctx);
list_add_tail(&container_of(buf, struct v4l2_m2m_buffer, vb)->list,
&m2m_ctx->det_list);
m2m_ctx->cap_detached = true;
buf->is_held = true;
buf->det_state = VB2_BUF_STATE_ACTIVE;
return buf;
}
struct vb2_v4l2_buffer *v4l2_m2m_cap_buf_detach(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx)
{
unsigned long flags;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
dst_buf = NULL;
src_buf = v4l2_m2m_next_src_buf(m2m_ctx);
if (!(src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF) &&
!m2m_ctx->cap_detached)
dst_buf = _v4l2_m2m_cap_buf_detach(m2m_ctx);
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
return dst_buf;
}
EXPORT_SYMBOL(v4l2_m2m_cap_buf_detach);
static void _v4l2_m2m_cap_buf_return(struct v4l2_m2m_ctx *m2m_ctx,
struct vb2_v4l2_buffer *buf,
enum vb2_buffer_state state)
{
buf->det_state = state;
/*
* Always signal done in the order we got stuff
* Stop if we find a buf that is still in use
*/
while (!list_empty(&m2m_ctx->det_list)) {
buf = &list_first_entry(&m2m_ctx->det_list,
struct v4l2_m2m_buffer, list)->vb;
state = buf->det_state;
if (state != VB2_BUF_STATE_DONE &&
state != VB2_BUF_STATE_ERROR)
return;
list_del(&container_of(buf, struct v4l2_m2m_buffer, vb)->list);
buf->det_state = VB2_BUF_STATE_DEQUEUED;
v4l2_m2m_buf_done(buf, state);
}
wake_up(&m2m_ctx->det_empty);
}
void v4l2_m2m_cap_buf_return(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx,
struct vb2_v4l2_buffer *buf,
enum vb2_buffer_state state)
{
unsigned long flags;
if (!buf)
return;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
_v4l2_m2m_cap_buf_return(m2m_ctx, buf, state);
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
}
EXPORT_SYMBOL(v4l2_m2m_cap_buf_return);
void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev, void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_m2m_ctx *m2m_ctx,
enum vb2_buffer_state state) enum vb2_buffer_state state)
...@@ -512,14 +595,21 @@ void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev, ...@@ -512,14 +595,21 @@ void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
spin_lock_irqsave(&m2m_dev->job_spinlock, flags); spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
src_buf = v4l2_m2m_src_buf_remove(m2m_ctx); src_buf = v4l2_m2m_src_buf_remove(m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx);
if (WARN_ON(!src_buf || !dst_buf)) if (WARN_ON(!src_buf))
goto unlock; goto unlock;
dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; if (!m2m_ctx->cap_detached) {
if (!dst_buf->is_held) { dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx);
v4l2_m2m_dst_buf_remove(m2m_ctx); if (WARN_ON(!dst_buf))
v4l2_m2m_buf_done(dst_buf, state); goto unlock;
dst_buf->is_held = src_buf->flags
& V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
if (!dst_buf->is_held) {
dst_buf = _v4l2_m2m_cap_buf_detach(m2m_ctx);
_v4l2_m2m_cap_buf_return(m2m_ctx, dst_buf, state);
}
} }
/* /*
* If the request API is being used, returning the OUTPUT * If the request API is being used, returning the OUTPUT
...@@ -1167,12 +1257,14 @@ struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, ...@@ -1167,12 +1257,14 @@ struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
m2m_ctx->priv = drv_priv; m2m_ctx->priv = drv_priv;
m2m_ctx->m2m_dev = m2m_dev; m2m_ctx->m2m_dev = m2m_dev;
init_waitqueue_head(&m2m_ctx->finished); init_waitqueue_head(&m2m_ctx->finished);
init_waitqueue_head(&m2m_ctx->det_empty);
out_q_ctx = &m2m_ctx->out_q_ctx; out_q_ctx = &m2m_ctx->out_q_ctx;
cap_q_ctx = &m2m_ctx->cap_q_ctx; cap_q_ctx = &m2m_ctx->cap_q_ctx;
INIT_LIST_HEAD(&out_q_ctx->rdy_queue); INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
INIT_LIST_HEAD(&cap_q_ctx->rdy_queue); INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
INIT_LIST_HEAD(&m2m_ctx->det_list);
spin_lock_init(&out_q_ctx->rdy_spinlock); spin_lock_init(&out_q_ctx->rdy_spinlock);
spin_lock_init(&cap_q_ctx->rdy_spinlock); spin_lock_init(&cap_q_ctx->rdy_spinlock);
......
...@@ -92,6 +92,9 @@ struct v4l2_m2m_queue_ctx { ...@@ -92,6 +92,9 @@ struct v4l2_m2m_queue_ctx {
* %TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT. * %TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT.
* @finished: Wait queue used to signalize when a job queue finished. * @finished: Wait queue used to signalize when a job queue finished.
* @priv: Instance private data * @priv: Instance private data
* @cap_detached: Current job's capture buffer has been detached
* @det_list: List of detached (post-job but still in flight) capture buffers
* @det_empty: Wait queue signalled when det_list goes empty
* *
* The memory to memory context is specific to a file handle, NOT to e.g. * The memory to memory context is specific to a file handle, NOT to e.g.
* a device. * a device.
...@@ -120,6 +123,11 @@ struct v4l2_m2m_ctx { ...@@ -120,6 +123,11 @@ struct v4l2_m2m_ctx {
wait_queue_head_t finished; wait_queue_head_t finished;
void *priv; void *priv;
/* Detached buffer handling */
bool cap_detached;
struct list_head det_list;
wait_queue_head_t det_empty;
}; };
/** /**
...@@ -326,6 +334,45 @@ void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev); ...@@ -326,6 +334,45 @@ void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev);
*/ */
void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev); void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev);
/**
* v4l2_m2m_cap_buf_detach() - detach the capture buffer from the job and
* return it.
*
* @m2m_dev: opaque pointer to the internal data to handle M2M context
* @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
*
* This function is designed to be used in conjunction with
* v4l2_m2m_buf_done_and_job_finish(). It allows the next job to start
* execution before the capture buffer is returned to the user which can be
* important if the underlying processing has multiple phases that are more
* efficiently executed in parallel.
*
* If used then it must be called before v4l2_m2m_buf_done_and_job_finish()
* as otherwise the buffer will have already gone.
*
* It is the callers reponsibilty to ensure that all detached buffers are
* returned.
*/
struct vb2_v4l2_buffer *v4l2_m2m_cap_buf_detach(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx);
/**
* v4l2_m2m_cap_buf_return() - return a capture buffer, previously detached
* with v4l2_m2m_cap_buf_detach() to the user.
*
* @m2m_dev: opaque pointer to the internal data to handle M2M context
* @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
* @buf: the buffer to return
* @state: vb2 buffer state passed to v4l2_m2m_buf_done().
*
* Buffers returned by this function will be returned to the user in the order
* of the original jobs rather than the order in which this function is called.
*/
void v4l2_m2m_cap_buf_return(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx,
struct vb2_v4l2_buffer *buf,
enum vb2_buffer_state state);
/** /**
* v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
* *
......
...@@ -37,6 +37,8 @@ struct video_device; ...@@ -37,6 +37,8 @@ struct video_device;
* @request_fd: the request_fd associated with this buffer * @request_fd: the request_fd associated with this buffer
* @is_held: if true, then this capture buffer was held * @is_held: if true, then this capture buffer was held
* @planes: plane information (userptr/fd, length, bytesused, data_offset). * @planes: plane information (userptr/fd, length, bytesused, data_offset).
* @det_state: if a detached request capture buffer then this contains its
* current state
* *
* Should contain enough information to be able to cover all the fields * Should contain enough information to be able to cover all the fields
* of &struct v4l2_buffer at ``videodev2.h``. * of &struct v4l2_buffer at ``videodev2.h``.
...@@ -51,6 +53,7 @@ struct vb2_v4l2_buffer { ...@@ -51,6 +53,7 @@ struct vb2_v4l2_buffer {
__s32 request_fd; __s32 request_fd;
bool is_held; bool is_held;
struct vb2_plane planes[VB2_MAX_PLANES]; struct vb2_plane planes[VB2_MAX_PLANES];
enum vb2_buffer_state det_state;
}; };
/* VB2 V4L2 flags as set in vb2_queue.subsystem_flags */ /* VB2 V4L2 flags as set in vb2_queue.subsystem_flags */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册