summaryrefslogtreecommitdiff
path: root/drivers/media/v4l2-core
diff options
context:
space:
mode:
authorHans Verkuil <hverkuil-cisco@xs4all.nl>2019-10-11 06:32:41 -0300
committerMauro Carvalho Chehab <mchehab+samsung@kernel.org>2019-10-21 07:39:10 -0300
commitf8cca8c97a63d77f48334cde81d15014f43530ef (patch)
treebad32414191b8dfb2242ffbddaf7a9daf9e26082 /drivers/media/v4l2-core
parent137272cdf7cc5be835f44216e6003769d1638480 (diff)
media: v4l2-mem2mem: support held capture buffers
Check for held buffers that are ready to be returned to vb2 in __v4l2_m2m_try_queue(). This avoids drivers having to handle this case. Add v4l2_m2m_buf_done_and_job_finish() to correctly return source and destination buffers and mark the job as finished while taking a held destination buffer into account (i.e. that buffer won't be returned). This has to be done while job_spinlock is held to avoid race conditions. Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl> Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
Diffstat (limited to 'drivers/media/v4l2-core')
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c130
1 files changed, 97 insertions, 33 deletions
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 19937dd3c6f6..79c3656f24f7 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -284,7 +284,8 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx)
{
- unsigned long flags_job, flags_out, flags_cap;
+ unsigned long flags_job;
+ struct vb2_v4l2_buffer *dst, *src;
dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
@@ -307,20 +308,30 @@ static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
goto job_unlock;
}
- spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
- if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
- && !m2m_ctx->out_q_ctx.buffered) {
+ src = v4l2_m2m_next_src_buf(m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(m2m_ctx);
+ if (!src && !m2m_ctx->out_q_ctx.buffered) {
dprintk("No input buffers available\n");
- goto out_unlock;
+ goto job_unlock;
}
- spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
- if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
- && !m2m_ctx->cap_q_ctx.buffered) {
+ if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
dprintk("No output buffers available\n");
- goto cap_unlock;
+ goto job_unlock;
+ }
+
+ if (src && dst &&
+ dst->is_held && dst->vb2_buf.copied_timestamp &&
+ dst->vb2_buf.timestamp != src->vb2_buf.timestamp) {
+ dst->is_held = false;
+ v4l2_m2m_dst_buf_remove(m2m_ctx);
+ v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
+ dst = v4l2_m2m_next_dst_buf(m2m_ctx);
+
+ if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
+ dprintk("No output buffers available after returning held buffer\n");
+ goto job_unlock;
+ }
}
- spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
if (m2m_dev->m2m_ops->job_ready
&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
@@ -331,13 +342,6 @@ static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
m2m_ctx->job_flags |= TRANS_QUEUED;
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
- return;
-
-cap_unlock:
- spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
-out_unlock:
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
job_unlock:
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
}
@@ -412,37 +416,97 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
}
}
-void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
- struct v4l2_m2m_ctx *m2m_ctx)
+/*
+ * Schedule the next job, called from v4l2_m2m_job_finish() or
+ * v4l2_m2m_buf_done_and_job_finish().
+ */
+static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
{
- unsigned long flags;
+ /*
+ * This instance might have more buffers ready, but since we do not
+ * allow more than one job on the job_queue per instance, each has
+ * to be scheduled separately after the previous one finishes.
+ */
+ __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
- spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ /*
+ * We might be running in atomic context,
+ * but the job must be run in non-atomic context.
+ */
+ schedule_work(&m2m_dev->job_work);
+}
+
+/*
+ * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or
+ * v4l2_m2m_buf_done_and_job_finish().
+ */
+static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
+{
if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
dprintk("Called by an instance not currently running\n");
- return;
+ return false;
}
list_del(&m2m_dev->curr_ctx->queue);
m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
wake_up(&m2m_dev->curr_ctx->finished);
m2m_dev->curr_ctx = NULL;
+ return true;
+}
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
-
- /* This instance might have more buffers ready, but since we do not
- * allow more than one job on the job_queue per instance, each has
- * to be scheduled separately after the previous one finishes. */
- __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
+void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
+{
+ unsigned long flags;
+ bool schedule_next;
- /* We might be running in atomic context,
- * but the job must be run in non-atomic context.
+ /*
+ * This function should not be used for drivers that support
+ * holding capture buffers. Those should use
+ * v4l2_m2m_buf_done_and_job_finish() instead.
*/
- schedule_work(&m2m_dev->job_work);
+ WARN_ON(m2m_ctx->cap_q_ctx.q.subsystem_flags &
+ VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF);
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ if (schedule_next)
+ v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
}
EXPORT_SYMBOL(v4l2_m2m_job_finish);
+void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx,
+ enum vb2_buffer_state state)
+{
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ bool schedule_next = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ src_buf = v4l2_m2m_src_buf_remove(m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx);
+
+ if (WARN_ON(!src_buf || !dst_buf))
+ goto unlock;
+ v4l2_m2m_buf_done(src_buf, state);
+ dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
+ if (!dst_buf->is_held) {
+ v4l2_m2m_dst_buf_remove(m2m_ctx);
+ v4l2_m2m_buf_done(dst_buf, state);
+ }
+ schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
+unlock:
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ if (schedule_next)
+ v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
+}
+EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish);
+
int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_requestbuffers *reqbufs)
{