diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-31 10:53:29 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-31 10:53:29 -0700 |
commit | b3491d8430dd25f0a4e00c33d60da22a9bd9d052 (patch) | |
tree | a3506967c871971bf9e5920dca5316c5346763df /drivers/media | |
parent | 59fc453b21f767f2fb0ff4dc0a947e9b9c9e6d14 (diff) | |
parent | e4183d3256e3cd668e899d06af66da5aac3a51af (diff) |
Merge tag 'media/v4.20-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media
Pull new experimental media request API from Mauro Carvalho Chehab:
"A new media request API
This API is needed to support device drivers that can dynamically
change their parameters for each new frame. The latest versions of
Google camera and codec HAL depends on such feature.
At this stage, it supports only stateless codecs.
It has been discussed for a long time (at least over the last 3-4
years), and we finally reached to something that seem to work.
This series contain both the API and core changes required to support
it and a new m2m decoder driver (cedrus).
As the current API is still experimental, the only real driver using
it (cedrus) was added at staging[1]. We intend to keep it there for a
while, in order to test the API. Only when we're sure that this API
works for other cases (like encoders), we'll move this driver out of
staging and set the API into a stone.
[1] We added support for the vivid virtual driver (used only for
testing) to it too, as it makes easier to test the API for the ones
that don't have the cedrus hardware"
* tag 'media/v4.20-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media: (53 commits)
media: dt-bindings: Document the Rockchip VPU bindings
media: platform: Add Cedrus VPU decoder driver
media: dt-bindings: media: Document bindings for the Cedrus VPU driver
media: v4l: Add definition for the Sunxi tiled NV12 format
media: v4l: Add definitions for MPEG-2 slice format and metadata
media: videobuf2-core: Rework and rename helper for request buffer count
media: v4l2-ctrls.c: initialize an error return code with zero
media: v4l2-compat-ioctl32.c: add missing documentation for a field
media: media-request: update documentation
media: media-request: EPERM -> EACCES/EBUSY
media: v4l2-ctrls: improve media_request_(un)lock_for_update
media: v4l2-ctrls: use media_request_(un)lock_for_access
media: media-request: add media_request_(un)lock_for_access
media: vb2: set reqbufs/create_bufs capabilities
media: videodev2.h: add new capabilities for buffer types
media: buffer.rst: only set V4L2_BUF_FLAG_REQUEST_FD for QBUF
media: v4l2-ctrls: return -EACCES if request wasn't completed
media: media-request: return -EINVAL for invalid request_fds
media: vivid: add request support
media: vivid: add mc
...
Diffstat (limited to 'drivers/media')
47 files changed, 2090 insertions, 336 deletions
diff --git a/drivers/media/Makefile b/drivers/media/Makefile index 594b462ddf0e..985d35ec6b29 100644 --- a/drivers/media/Makefile +++ b/drivers/media/Makefile @@ -3,7 +3,8 @@ # Makefile for the kernel multimedia device drivers. # -media-objs := media-device.o media-devnode.o media-entity.o +media-objs := media-device.o media-devnode.o media-entity.o \ + media-request.o # # I2C drivers should come before other drivers, otherwise they'll fail diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index d6d22cf77066..975ff5669f72 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -356,6 +356,8 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, vb->planes[plane].length = plane_sizes[plane]; vb->planes[plane].min_length = plane_sizes[plane]; } + call_void_bufop(q, init_buffer, vb); + q->bufs[vb->index] = vb; /* Allocate video buffer memory for the MMAP type */ @@ -497,8 +499,9 @@ static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers) pr_info(" buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n", vb->cnt_buf_init, vb->cnt_buf_cleanup, vb->cnt_buf_prepare, vb->cnt_buf_finish); - pr_info(" buf_queue: %u buf_done: %u\n", - vb->cnt_buf_queue, vb->cnt_buf_done); + pr_info(" buf_queue: %u buf_done: %u buf_request_complete: %u\n", + vb->cnt_buf_queue, vb->cnt_buf_done, + vb->cnt_buf_request_complete); pr_info(" alloc: %u put: %u prepare: %u finish: %u mmap: %u\n", vb->cnt_mem_alloc, vb->cnt_mem_put, vb->cnt_mem_prepare, vb->cnt_mem_finish, @@ -683,7 +686,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, } /* - * Call queue_cancel to clean up any buffers in the PREPARED or + * Call queue_cancel to clean up any buffers in the * QUEUED state which is possible if buffers were prepared or * queued without ever calling STREAMON. */ @@ -930,6 +933,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) /* sync buffers */ for (plane = 0; plane < vb->num_planes; ++plane) call_void_memop(vb, finish, vb->planes[plane].mem_priv); + vb->synced = false; } spin_lock_irqsave(&q->done_lock, flags); @@ -942,6 +946,14 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) vb->state = state; } atomic_dec(&q->owned_by_drv_count); + + if (vb->req_obj.req) { + /* This is not supported at the moment */ + WARN_ON(state == VB2_BUF_STATE_REQUEUEING); + media_request_object_unbind(&vb->req_obj); + media_request_object_put(&vb->req_obj); + } + spin_unlock_irqrestore(&q->done_lock, flags); trace_vb2_buf_done(q, vb); @@ -976,20 +988,19 @@ EXPORT_SYMBOL_GPL(vb2_discard_done); /* * __prepare_mmap() - prepare an MMAP buffer */ -static int __prepare_mmap(struct vb2_buffer *vb, const void *pb) +static int __prepare_mmap(struct vb2_buffer *vb) { int ret = 0; - if (pb) - ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, - vb, pb, vb->planes); + ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, + vb, vb->planes); return ret ? ret : call_vb_qop(vb, buf_prepare, vb); } /* * __prepare_userptr() - prepare a USERPTR buffer */ -static int __prepare_userptr(struct vb2_buffer *vb, const void *pb) +static int __prepare_userptr(struct vb2_buffer *vb) { struct vb2_plane planes[VB2_MAX_PLANES]; struct vb2_queue *q = vb->vb2_queue; @@ -1000,12 +1011,10 @@ static int __prepare_userptr(struct vb2_buffer *vb, const void *pb) memset(planes, 0, sizeof(planes[0]) * vb->num_planes); /* Copy relevant information provided by the userspace */ - if (pb) { - ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, - vb, pb, planes); - if (ret) - return ret; - } + ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, + vb, planes); + if (ret) + return ret; for (plane = 0; plane < vb->num_planes; ++plane) { /* Skip the plane if already verified */ @@ -1105,7 +1114,7 @@ err: /* * __prepare_dmabuf() - prepare a DMABUF buffer */ -static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb) +static int __prepare_dmabuf(struct vb2_buffer *vb) { struct vb2_plane planes[VB2_MAX_PLANES]; struct vb2_queue *q = vb->vb2_queue; @@ -1116,12 +1125,10 @@ static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb) memset(planes, 0, sizeof(planes[0]) * vb->num_planes); /* Copy relevant information provided by the userspace */ - if (pb) { - ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, - vb, pb, planes); - if (ret) - return ret; - } + ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, + vb, planes); + if (ret) + return ret; for (plane = 0; plane < vb->num_planes; ++plane) { struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd); @@ -1250,9 +1257,10 @@ static void __enqueue_in_driver(struct vb2_buffer *vb) call_void_vb_qop(vb, buf_queue, vb); } -static int __buf_prepare(struct vb2_buffer *vb, const void *pb) +static int __buf_prepare(struct vb2_buffer *vb) { struct vb2_queue *q = vb->vb2_queue; + enum vb2_buffer_state orig_state = vb->state; unsigned int plane; int ret; @@ -1261,26 +1269,31 @@ static int __buf_prepare(struct vb2_buffer *vb, const void *pb) return -EIO; } + if (vb->prepared) + return 0; + WARN_ON(vb->synced); + vb->state = VB2_BUF_STATE_PREPARING; switch (q->memory) { case VB2_MEMORY_MMAP: - ret = __prepare_mmap(vb, pb); + ret = __prepare_mmap(vb); break; case VB2_MEMORY_USERPTR: - ret = __prepare_userptr(vb, pb); + ret = __prepare_userptr(vb); break; case VB2_MEMORY_DMABUF: - ret = __prepare_dmabuf(vb, pb); + ret = __prepare_dmabuf(vb); break; default: WARN(1, "Invalid queue type\n"); ret = -EINVAL; + break; } if (ret) { dprintk(1, "buffer preparation failed: %d\n", ret); - vb->state = VB2_BUF_STATE_DEQUEUED; + vb->state = orig_state; return ret; } @@ -1288,11 +1301,98 @@ static int __buf_prepare(struct vb2_buffer *vb, const void *pb) for (plane = 0; plane < vb->num_planes; ++plane) call_void_memop(vb, prepare, vb->planes[plane].mem_priv); - vb->state = VB2_BUF_STATE_PREPARED; + vb->synced = true; + vb->prepared = true; + vb->state = orig_state; return 0; } +static int vb2_req_prepare(struct media_request_object *obj) +{ + struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); + int ret; + + if (WARN_ON(vb->state != VB2_BUF_STATE_IN_REQUEST)) + return -EINVAL; + + mutex_lock(vb->vb2_queue->lock); + ret = __buf_prepare(vb); + mutex_unlock(vb->vb2_queue->lock); + return ret; +} + +static void __vb2_dqbuf(struct vb2_buffer *vb); + +static void vb2_req_unprepare(struct media_request_object *obj) +{ + struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); + + mutex_lock(vb->vb2_queue->lock); + __vb2_dqbuf(vb); + vb->state = VB2_BUF_STATE_IN_REQUEST; + mutex_unlock(vb->vb2_queue->lock); + WARN_ON(!vb->req_obj.req); +} + +int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, + struct media_request *req); + +static void vb2_req_queue(struct media_request_object *obj) +{ + struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); + + mutex_lock(vb->vb2_queue->lock); + vb2_core_qbuf(vb->vb2_queue, vb->index, NULL, NULL); + mutex_unlock(vb->vb2_queue->lock); +} + +static void vb2_req_unbind(struct media_request_object *obj) +{ + struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); + + if (vb->state == VB2_BUF_STATE_IN_REQUEST) + call_void_bufop(vb->vb2_queue, init_buffer, vb); +} + +static void vb2_req_release(struct media_request_object *obj) +{ + struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); + + if (vb->state == VB2_BUF_STATE_IN_REQUEST) + vb->state = VB2_BUF_STATE_DEQUEUED; +} + +static const struct media_request_object_ops vb2_core_req_ops = { + .prepare = vb2_req_prepare, + .unprepare = vb2_req_unprepare, + .queue = vb2_req_queue, + .unbind = vb2_req_unbind, + .release = vb2_req_release, +}; + +bool vb2_request_object_is_buffer(struct media_request_object *obj) +{ + return obj->ops == &vb2_core_req_ops; +} +EXPORT_SYMBOL_GPL(vb2_request_object_is_buffer); + +unsigned int vb2_request_buffer_cnt(struct media_request *req) +{ + struct media_request_object *obj; + unsigned long flags; + unsigned int buffer_cnt = 0; + + spin_lock_irqsave(&req->lock, flags); + list_for_each_entry(obj, &req->objects, list) + if (vb2_request_object_is_buffer(obj)) + buffer_cnt++; + spin_unlock_irqrestore(&req->lock, flags); + + return buffer_cnt; +} +EXPORT_SYMBOL_GPL(vb2_request_buffer_cnt); + int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb) { struct vb2_buffer *vb; @@ -1304,8 +1404,12 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb) vb->state); return -EINVAL; } + if (vb->prepared) { + dprintk(1, "buffer already prepared\n"); + return -EINVAL; + } - ret = __buf_prepare(vb, pb); + ret = __buf_prepare(vb); if (ret) return ret; @@ -1314,7 +1418,7 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb) dprintk(2, "prepare of buffer %d succeeded\n", vb->index); - return ret; + return 0; } EXPORT_SYMBOL_GPL(vb2_core_prepare_buf); @@ -1381,7 +1485,8 @@ static int vb2_start_streaming(struct vb2_queue *q) return ret; } -int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb) +int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, + struct media_request *req) { struct vb2_buffer *vb; int ret; @@ -1393,13 +1498,57 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb) vb = q->bufs[index]; - switch (vb->state) { - case VB2_BUF_STATE_DEQUEUED: - ret = __buf_prepare(vb, pb); + if ((req && q->uses_qbuf) || + (!req && vb->state != VB2_BUF_STATE_IN_REQUEST && + q->uses_requests)) { + dprintk(1, "queue in wrong mode (qbuf vs requests)\n"); + return -EBUSY; + } + + if (req) { + int ret; + + q->uses_requests = 1; + if (vb->state != VB2_BUF_STATE_DEQUEUED) { + dprintk(1, "buffer %d not in dequeued state\n", + vb->index); + return -EINVAL; + } + + media_request_object_init(&vb->req_obj); + + /* Make sure the request is in a safe state for updating. */ + ret = media_request_lock_for_update(req); if (ret) return ret; - break; - case VB2_BUF_STATE_PREPARED: + ret = media_request_object_bind(req, &vb2_core_req_ops, + q, true, &vb->req_obj); + media_request_unlock_for_update(req); + if (ret) + return ret; + + vb->state = VB2_BUF_STATE_IN_REQUEST; + /* Fill buffer information for the userspace */ + if (pb) { + call_void_bufop(q, copy_timestamp, vb, pb); + call_void_bufop(q, fill_user_buffer, vb, pb); + } + + dprintk(2, "qbuf of buffer %d succeeded\n", vb->index); + return 0; + } + + if (vb->state != VB2_BUF_STATE_IN_REQUEST) + q->uses_qbuf = 1; + + switch (vb->state) { + case VB2_BUF_STATE_DEQUEUED: + case VB2_BUF_STATE_IN_REQUEST: + if (!vb->prepared) { + ret = __buf_prepare(vb); + if (ret) + return ret; + } break; case VB2_BUF_STATE_PREPARING: dprintk(1, "buffer still being prepared\n"); @@ -1600,6 +1749,11 @@ static void __vb2_dqbuf(struct vb2_buffer *vb) call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv); vb->planes[i].dbuf_mapped = 0; } + if (vb->req_obj.req) { + media_request_object_unbind(&vb->req_obj); + media_request_object_put(&vb->req_obj); + } + call_void_bufop(q, init_buffer, vb); } int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, @@ -1625,6 +1779,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, } call_void_vb_qop(vb, buf_finish, vb); + vb->prepared = false; if (pindex) *pindex = vb->index; @@ -1688,6 +1843,8 @@ static void __vb2_queue_cancel(struct vb2_queue *q) q->start_streaming_called = 0; q->queued_count = 0; q->error = 0; + q->uses_requests = 0; + q->uses_qbuf = 0; /* * Remove all buffers from videobuf's list... @@ -1712,19 +1869,38 @@ static void __vb2_queue_cancel(struct vb2_queue *q) */ for (i = 0; i < q->num_buffers; ++i) { struct vb2_buffer *vb = q->bufs[i]; + struct media_request *req = vb->req_obj.req; + + /* + * If a request is associated with this buffer, then + * call buf_request_cancel() to give the driver to complete() + * related request objects. Otherwise those objects would + * never complete. + */ + if (req) { + enum media_request_state state; + unsigned long flags; + + spin_lock_irqsave(&req->lock, flags); + state = req->state; + spin_unlock_irqrestore(&req->lock, flags); - if (vb->state == VB2_BUF_STATE_PREPARED || - vb->state == VB2_BUF_STATE_QUEUED) { + if (state == MEDIA_REQUEST_STATE_QUEUED) + call_void_vb_qop(vb, buf_request_complete, vb); + } + + if (vb->synced) { unsigned int plane; for (plane = 0; plane < vb->num_planes; ++plane) call_void_memop(vb, finish, vb->planes[plane].mem_priv); + vb->synced = false; } - if (vb->state != VB2_BUF_STATE_DEQUEUED) { - vb->state = VB2_BUF_STATE_PREPARED; + if (vb->prepared) { call_void_vb_qop(vb, buf_finish, vb); + vb->prepared = false; } __vb2_dqbuf(vb); } @@ -2281,7 +2457,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read) * Queue all buffers. */ for (i = 0; i < q->num_buffers; i++) { - ret = vb2_core_qbuf(q, i, NULL); + ret = vb2_core_qbuf(q, i, NULL, NULL); if (ret) goto err_reqbufs; fileio->bufs[i].queued = 1; @@ -2460,7 +2636,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_ if (copy_timestamp) b->timestamp = ktime_get_ns(); - ret = vb2_core_qbuf(q, index, NULL); + ret = vb2_core_qbuf(q, index, NULL, NULL); dprintk(5, "vb2_dbuf result: %d\n", ret); if (ret) return ret; @@ -2563,7 +2739,7 @@ static int vb2_thread(void *data) if (copy_timestamp) vb->timestamp = ktime_get_ns(); if (!threadio->stop) - ret = vb2_core_qbuf(q, vb->index, NULL); + ret = vb2_core_qbuf(q, vb->index, NULL, NULL); call_void_qop(q, wait_prepare, q); if (ret || threadio->stop) break; diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c index 886a2d8d5c6c..a17033ab2c22 100644 --- a/drivers/media/common/videobuf2/videobuf2-v4l2.c +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c @@ -25,6 +25,7 @@ #include <linux/kthread.h> #include <media/v4l2-dev.h> +#include <media/v4l2-device.h> #include <media/v4l2-fh.h> #include <media/v4l2-event.h> #include <media/v4l2-common.h> @@ -40,10 +41,12 @@ module_param(debug, int, 0644); pr_info("vb2-v4l2: %s: " fmt, __func__, ## arg); \ } while (0) -/* Flags that are set by the vb2 core */ +/* Flags that are set by us */ #define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \ V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \ V4L2_BUF_FLAG_PREPARED | \ + V4L2_BUF_FLAG_IN_REQUEST | \ + V4L2_BUF_FLAG_REQUEST_FD | \ V4L2_BUF_FLAG_TIMESTAMP_MASK) /* Output buffer flags that should be passed on to the driver */ #define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \ @@ -118,6 +121,16 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b) return 0; } +/* + * __init_v4l2_vb2_buffer() - initialize the v4l2_vb2_buffer struct + */ +static void __init_v4l2_vb2_buffer(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + + vbuf->request_fd = -1; +} + static void __copy_timestamp(struct vb2_buffer *vb, const void *pb) { const struct v4l2_buffer *b = pb; @@ -154,9 +167,181 @@ static void vb2_warn_zero_bytesused(struct vb2_buffer *vb) pr_warn("use the actual size instead.\n"); } -static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b, - const char *opname) +static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b) { + struct vb2_queue *q = vb->vb2_queue; + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct vb2_plane *planes = vbuf->planes; + unsigned int plane; + int ret; + + ret = __verify_length(vb, b); + if (ret < 0) { + dprintk(1, "plane parameters verification failed: %d\n", ret); + return ret; + } + if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) { + /* + * If the format's field is ALTERNATE, then the buffer's field + * should be either TOP or BOTTOM, not ALTERNATE since that + * makes no sense. The driver has to know whether the + * buffer represents a top or a bottom field in order to + * program any DMA correctly. Using ALTERNATE is wrong, since + * that just says that it is either a top or a bottom field, + * but not which of the two it is. + */ + dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n"); + return -EINVAL; + } + vbuf->sequence = 0; + vbuf->request_fd = -1; + + if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { + switch (b->memory) { + case VB2_MEMORY_USERPTR: + for (plane = 0; plane < vb->num_planes; ++plane) { + planes[plane].m.userptr = + b->m.planes[plane].m.userptr; + planes[plane].length = + b->m.planes[plane].length; + } + break; + case VB2_MEMORY_DMABUF: + for (plane = 0; plane < vb->num_planes; ++plane) { + planes[plane].m.fd = + b->m.planes[plane].m.fd; + planes[plane].length = + b->m.planes[plane].length; + } + break; + default: + for (plane = 0; plane < vb->num_planes; ++plane) { + planes[plane].m.offset = + vb->planes[plane].m.offset; + planes[plane].length = + vb->planes[plane].length; + } + break; + } + + /* Fill in driver-provided information for OUTPUT types */ + if (V4L2_TYPE_IS_OUTPUT(b->type)) { + /* + * Will have to go up to b->length when API starts + * accepting variable number of planes. + * + * If bytesused == 0 for the output buffer, then fall + * back to the full buffer size. In that case + * userspace clearly never bothered to set it and + * it's a safe assumption that they really meant to + * use the full plane sizes. + * + * Some drivers, e.g. old codec drivers, use bytesused == 0 + * as a way to indicate that streaming is finished. + * In that case, the driver should use the + * allow_zero_bytesused flag to keep old userspace + * applications working. + */ + for (plane = 0; plane < vb->num_planes; ++plane) { + struct vb2_plane *pdst = &planes[plane]; + struct v4l2_plane *psrc = &b->m.planes[plane]; + + if (psrc->bytesused == 0) + vb2_warn_zero_bytesused(vb); + + if (vb->vb2_queue->allow_zero_bytesused) + pdst->bytesused = psrc->bytesused; + else + pdst->bytesused = psrc->bytesused ? + psrc->bytesused : pdst->length; + pdst->data_offset = psrc->data_offset; + } + } + } else { + /* + * Single-planar buffers do not use planes array, + * so fill in relevant v4l2_buffer struct fields instead. + * In videobuf we use our internal V4l2_planes struct for + * single-planar buffers as well, for simplicity. + * + * If bytesused == 0 for the output buffer, then fall back + * to the full buffer size as that's a sensible default. + * + * Some drivers, e.g. old codec drivers, use bytesused == 0 as + * a way to indicate that streaming is finished. In that case, + * the driver should use the allow_zero_bytesused flag to keep + * old userspace applications working. + */ + switch (b->memory) { + case VB2_MEMORY_USERPTR: + planes[0].m.userptr = b->m.userptr; + planes[0].length = b->length; + break; + case VB2_MEMORY_DMABUF: + planes[0].m.fd = b->m.fd; + planes[0].length = b->length; + break; + default: + planes[0].m.offset = vb->planes[0].m.offset; + planes[0].length = vb->planes[0].length; + break; + } + + planes[0].data_offset = 0; + if (V4L2_TYPE_IS_OUTPUT(b->type)) { + if (b->bytesused == 0) + vb2_warn_zero_bytesused(vb); + + if (vb->vb2_queue->allow_zero_bytesused) + planes[0].bytesused = b->bytesused; + else + planes[0].bytesused = b->bytesused ? + b->bytesused : planes[0].length; + } else + planes[0].bytesused = 0; + + } + + /* Zero flags that we handle */ + vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS; + if (!vb->vb2_queue->copy_timestamp || !V4L2_TYPE_IS_OUTPUT(b->type)) { + /* + * Non-COPY timestamps and non-OUTPUT queues will get + * their timestamp and timestamp source flags from the + * queue. + */ + vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; + } + + if (V4L2_TYPE_IS_OUTPUT(b->type)) { + /* + * For output buffers mask out the timecode flag: + * this will be handled later in vb2_qbuf(). + * The 'field' is valid metadata for this output buffer + * and so that needs to be copied here. + */ + vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE; + vbuf->field = b->field; + } else { + /* Zero any output buffer flags as this is a capture buffer */ + vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS; + /* Zero last flag, this is a signal from driver to userspace */ + vbuf->flags &= ~V4L2_BUF_FLAG_LAST; + } + + return 0; +} + +static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev, + struct v4l2_buffer *b, + const char *opname, + struct media_request **p_req) +{ + struct media_request *req; + struct vb2_v4l2_buffer *vbuf; + struct vb2_buffer *vb; + int ret; + if (b->type != q->type) { dprintk(1, "%s: invalid buffer type\n", opname); return -EINVAL; @@ -178,7 +363,82 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b, return -EINVAL; } - return __verify_planes_array(q->bufs[b->index], b); + vb = q->bufs[b->index]; + vbuf = to_vb2_v4l2_buffer(vb); + ret = __verify_planes_array(vb, b); + if (ret) + return ret; + + if (!vb->prepared) { + /* Copy relevant information provided by the userspace */ + memset(vbuf->planes, 0, + sizeof(vbuf->planes[0]) * vb->num_planes); + ret = vb2_fill_vb2_v4l2_buffer(vb, b); + if (ret) + return ret; + } + + if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) { + if (q->uses_requests) { + dprintk(1, "%s: queue uses requests\n", opname); + return -EBUSY; + } + return 0; + } else if (!q->supports_requests) { + dprintk(1, "%s: queue does not support requests\n", opname); + return -EACCES; + } else if (q->uses_qbuf) { + dprintk(1, "%s: queue does not use requests\n", opname); + return -EBUSY; + } + + /* + * For proper locking when queueing a request you need to be able + * to lock access to the vb2 queue, so check that there is a lock + * that we can use. In addition p_req must be non-NULL. + */ + if (WARN_ON(!q->lock || !p_req)) + return -EINVAL; + + /* + * Make sure this op is implemented by the driver. It's easy to forget + * this callback, but is it important when canceling a buffer in a + * queued request. + */ + if (WARN_ON(!q->ops->buf_request_complete)) + return -EINVAL; + + if (vb->state != VB2_BUF_STATE_DEQUEUED) { + dprintk(1, "%s: buffer is not in dequeued state\n", opname); + return -EINVAL; + } + + if (b->request_fd < 0) { + dprintk(1, "%s: request_fd < 0\n", opname); + return -EINVAL; + } + + req = media_request_get_by_fd(mdev, b->request_fd); + if (IS_ERR(req)) { + dprintk(1, "%s: invalid request_fd\n", opname); + return PTR_ERR(req); + } + + /* + * Early sanity check. This is checked again when the buffer + * is bound to the request in vb2_core_qbuf(). + */ + if (req->state != MEDIA_REQUEST_STATE_IDLE && + req->state != MEDIA_REQUEST_STATE_UPDATING) { + dprintk(1, "%s: request is not idle\n", opname); + media_request_put(req); + return -EBUSY; + } + + *p_req = req; + vbuf->request_fd = b->request_fd; + + return 0; } /* @@ -204,7 +464,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb) b->timecode = vbuf->timecode; b->sequence = vbuf->sequence; b->reserved2 = 0; - b->reserved = 0; + b->request_fd = 0; if (q->is_multiplanar) { /* @@ -261,15 +521,15 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb) case VB2_BUF_STATE_ACTIVE: b->flags |= V4L2_BUF_FLAG_QUEUED; break; + case VB2_BUF_STATE_IN_REQUEST: + b->flags |= V4L2_BUF_FLAG_IN_REQUEST; + break; case VB2_BUF_STATE_ERROR: b->flags |= V4L2_BUF_FLAG_ERROR; /* fall through */ case VB2_BUF_STATE_DONE: b->flags |= V4L2_BUF_FLAG_DONE; break; - case VB2_BUF_STATE_PREPARED: - b->flags |= V4L2_BUF_FLAG_PREPARED; - break; case VB2_BUF_STATE_PREPARING: case VB2_BUF_STATE_DEQUEUED: case VB2_BUF_STATE_REQUEUEING: @@ -277,8 +537,17 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb) break; } + if ((vb->state == VB2_BUF_STATE_DEQUEUED || + vb->state == VB2_BUF_STATE_IN_REQUEST) && + vb->synced && vb->prepared) + b->flags |= V4L2_BUF_FLAG_PREPARED; + if (vb2_buffer_in_use(q, vb)) b->flags |= V4L2_BUF_FLAG_MAPPED; + if (vbuf->request_fd >= 0) { + b->flags |= V4L2_BUF_FLAG_REQUEST_FD; + b->request_fd = vbuf->request_fd; + } if (!q->is_output && b->flags & V4L2_BUF_FLAG_DONE && @@ -291,158 +560,28 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb) * v4l2_buffer by the userspace. It also verifies that struct * v4l2_buffer has a valid number of planes. */ -static int __fill_vb2_buffer(struct vb2_buffer *vb, - const void *pb, struct vb2_plane *planes) +static int __fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes) { - struct vb2_queue *q = vb->vb2_queue; - const struct v4l2_buffer *b = pb; struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); unsigned int plane; - int ret; - - ret = __verify_length(vb, b); - if (ret < 0) { - dprintk(1, "plane parameters verification failed: %d\n", ret); - return ret; - } - if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) { - /* - * If the format's field is ALTERNATE, then the buffer's field - * should be either TOP or BOTTOM, not ALTERNATE since that - * makes no sense. The driver has to know whether the - * buffer represents a top or a bottom field in order to - * program any DMA correctly. Using ALTERNATE is wrong, since - * that just says that it is either a top or a bottom field, - * but not which of the two it is. - */ - dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n"); - return -EINVAL; - } - vb->timestamp = 0; - vbuf->sequence = 0; - if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { - if (b->memory == VB2_MEMORY_USERPTR) { - for (plane = 0; plane < vb->num_planes; ++plane) { - planes[plane].m.userptr = - b->m.planes[plane].m.userptr; - planes[plane].length = - b->m.planes[plane].length; - } - } - if (b->memory == VB2_MEMORY_DMABUF) { - for (plane = 0; plane < vb->num_planes; ++plane) { - planes[plane].m.fd = - b->m.planes[plane].m.fd; - planes[plane].length = - b->m.planes[plane].length; - } - } - - /* Fill in driver-provided information for OUTPUT types */ - if (V4L2_TYPE_IS_OUTPUT(b->type)) { - /* - * Will have to go up to b->length when API starts - * accepting variable number of planes. - * - * If bytesused == 0 for the output buffer, then fall - * back to the full buffer size. In that case - * userspace clearly never bothered to set it and - * it's a safe assumption that they really meant to - * use the full plane sizes. - * - * Some drivers, e.g. old codec drivers, use bytesused == 0 - * as a way to indicate that streaming is finished. - * In that case, the driver should use the - * allow_zero_bytesused flag to keep old userspace - * applications working. - */ - for (plane = 0; plane < vb->num_planes; ++plane) { - struct vb2_plane *pdst = &planes[plane]; - struct v4l2_plane *psrc = &b->m.planes[plane]; - - if (psrc->bytesused == 0) - vb2_warn_zero_bytesused(vb); - - if (vb->vb2_queue->allow_zero_bytesused) - pdst->bytesused = psrc->bytesused; - else - pdst->bytesused = psrc->bytesused ? - psrc->bytesused : pdst->length; - pdst->data_offset = psrc->data_offset; - } - } - } else { - /* - * Single-planar buffers do not use planes array, - * so fill in relevant v4l2_buffer struct fields instead. - * In videobuf we use our internal V4l2_planes struct for - * single-planar buffers as well, for simplicity. - * - * If bytesused == 0 for the output buffer, then fall back - * to the full buffer size as that's a sensible default. - * - * Some drivers, e.g. old codec drivers, use bytesused == 0 as - * a way to indicate that streaming is finished. In that case, - * the driver should use the allow_zero_bytesused flag to keep - * old userspace applications working. - */ - if (b->memory == VB2_MEMORY_USERPTR) { - planes[0].m.userptr = b->m.userptr; - planes[0].length = b->length; - } + if (!vb->vb2_queue->is_output || !vb->vb2_queue->copy_timestamp) + vb->timestamp = 0; - if (b->memory == VB2_MEMORY_DMABUF) { - planes[0].m.fd = b->m.fd; - planes[0].length = b->length; + for (plane = 0; plane < vb->num_planes; ++plane) { + if (vb->vb2_queue->memory != VB2_MEMORY_MMAP) { + planes[plane].m = vbuf->planes[plane].m; + planes[plane].length = vbuf->planes[plane].length; } - - if (V4L2_TYPE_IS_OUTPUT(b->type)) { - if (b->bytesused == 0) - vb2_warn_zero_bytesused(vb); - - if (vb->vb2_queue->allow_zero_bytesused) - planes[0].bytesused = b->bytesused; - else - planes[0].bytesused = b->bytesused ? - b->bytesused : planes[0].length; - } else - planes[0].bytesused = 0; - - } - - /* Zero flags that the vb2 core handles */ - vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS; - if (!vb->vb2_queue->copy_timestamp || !V4L2_TYPE_IS_OUTPUT(b->type)) { - /* - * Non-COPY timestamps and non-OUTPUT queues will get - * their timestamp and timestamp source flags from the - * queue. - */ - vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; + planes[plane].bytesused = vbuf->planes[plane].bytesused; + planes[plane].data_offset = vbuf->planes[plane].data_offset; } - - if (V4L2_TYPE_IS_OUTPUT(b->type)) { - /* - * For output buffers mask out the timecode flag: - * this will be handled later in vb2_qbuf(). - * The 'field' is valid metadata for this output buffer - * and so that needs to be copied here. - */ - vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE; - vbuf->field = b->field; - } else { - /* Zero any output buffer flags as this is a capture buffer */ - vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS; - /* Zero last flag, this is a signal from driver to userspace */ - vbuf->flags &= ~V4L2_BUF_FLAG_LAST; - } - return 0; } static const struct vb2_buf_ops v4l2_buf_ops = { .verify_planes_array = __verify_planes_array_core, + .init_buffer = __init_v4l2_vb2_buffer, .fill_user_buffer = __fill_v4l2_buffer, .fill_vb2_buffer = __fill_vb2_buffer, .copy_timestamp = __copy_timestamp, @@ -483,15 +622,30 @@ int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b) } EXPORT_SYMBOL(vb2_querybuf); +static void fill_buf_caps(struct vb2_queue *q, u32 *caps) +{ + *caps = 0; + if (q->io_modes & VB2_MMAP) + *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP; + if (q->io_modes & VB2_USERPTR) + *caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR; + if (q->io_modes & VB2_DMABUF) + *caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF; + if (q->supports_requests) + *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS; +} + int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) { int ret = vb2_verify_memory_type(q, req->memory, req->type); + fill_buf_caps(q, &req->capabilities); return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count); } EXPORT_SYMBOL_GPL(vb2_reqbufs); -int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b) +int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev, + struct v4l2_buffer *b) { int ret; @@ -500,7 +654,10 @@ int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b) return -EBUSY; } - ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf"); + if (b->flags & V4L2_BUF_FLAG_REQUEST_FD) + return -EINVAL; + + ret = vb2_queue_or_prepare_buf(q, mdev, b, "prepare_buf", NULL); return ret ? ret : vb2_core_prepare_buf(q, b->index, b); } @@ -514,6 +671,7 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) int ret = vb2_verify_memory_type(q, create->memory, f->type); unsigned i; + fill_buf_caps(q, &create->capabilities); create->index = q->num_buffers; if (create->count == 0) return ret != -EBUSY ? ret : 0; @@ -560,8 +718,10 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) } EXPORT_SYMBOL_GPL(vb2_create_bufs); -int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b) +int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev, + struct v4l2_buffer *b) { + struct media_request *req = NULL; int ret; if (vb2_fileio_is_active(q)) { @@ -569,8 +729,13 @@ int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b) return -EBUSY; } - ret = vb2_queue_or_prepare_buf(q, b, "qbuf"); - return ret ? ret : vb2_core_qbuf(q, b->index, b); + ret = vb2_queue_or_prepare_buf(q, mdev, b, "qbuf", &req); + if (ret) + return ret; + ret = vb2_core_qbuf(q, b->index, b, req); + if (req) + media_request_put(req); + return ret; } EXPORT_SYMBOL_GPL(vb2_qbuf); @@ -714,6 +879,7 @@ int vb2_ioctl_reqbufs(struct file *file, void *priv, struct video_device *vdev = video_devdata(file); int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type); + fill_buf_caps(vdev->queue, &p->capabilities); if (res) return res; if (vb2_queue_is_busy(vdev, file)) @@ -735,6 +901,7 @@ int vb2_ioctl_create_bufs(struct file *file, void *priv, p->format.type); p->index = vdev->queue->num_buffers; + fill_buf_caps(vdev->queue, &p->capabilities); /* * If count == 0, then just check if memory and type are valid. * Any -EBUSY result from vb2_verify_memory_type can be mapped to 0. @@ -760,7 +927,7 @@ int vb2_ioctl_prepare_buf(struct file *file, void *priv, if (vb2_queue_is_busy(vdev, file)) return -EBUSY; - return vb2_prepare_buf(vdev->queue, p); + return vb2_prepare_buf(vdev->queue, vdev->v4l2_dev->mdev, p); } EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf); @@ -779,7 +946,7 @@ int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p) if (vb2_queue_is_busy(vdev, file)) return -EBUSY; - return vb2_qbuf(vdev->queue, p); + return vb2_qbuf(vdev->queue, vdev->v4l2_dev->mdev, p); } EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf); @@ -961,6 +1128,57 @@ void vb2_ops_wait_finish(struct vb2_queue *vq) } EXPORT_SYMBOL_GPL(vb2_ops_wait_finish); +/* + * Note that this function is called during validation time and + * thus the req_queue_mutex is held to ensure no request objects + * can be added or deleted while validating. So there is no need + * to protect the objects list. + */ +int vb2_request_validate(struct media_request *req) +{ + struct media_request_object *obj; + int ret = 0; + + if (!vb2_request_buffer_cnt(req)) + return -ENOENT; + + list_for_each_entry(obj, &req->objects, list) { + if (!obj->ops->prepare) + continue; + + ret = obj->ops->prepare(obj); + if (ret) + break; + } + + if (ret) { + list_for_each_entry_continue_reverse(obj, &req->objects, list) + if (obj->ops->unprepare) + obj->ops->unprepare(obj); + return ret; + } + return 0; +} +EXPORT_SYMBOL_GPL(vb2_request_validate); + +void vb2_request_queue(struct media_request *req) +{ + struct media_request_object *obj, *obj_safe; + + /* + * Queue all objects. Note that buffer objects are at the end of the + * objects list, after all other object types. Once buffer objects + * are queued, the driver might delete them immediately (if the driver + * processes the buffer at once), so we have to use + * list_for_each_entry_safe() to handle the case where the object we + * queue is deleted. + */ + list_for_each_entry_safe(obj, obj_safe, &req->objects, list) + if (obj->ops->queue) + obj->ops->queue(obj); +} +EXPORT_SYMBOL_GPL(vb2_request_queue); + MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2"); MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c index c90b1fd94735..6974f1731529 100644 --- a/drivers/media/dvb-core/dvb_vb2.c +++ b/drivers/media/dvb-core/dvb_vb2.c @@ -146,8 +146,7 @@ static void _fill_dmx_buffer(struct vb2_buffer *vb, void *pb) dprintk(3, "[%s]\n", ctx->name); } -static int _fill_vb2_buffer(struct vb2_buffer *vb, - const void *pb, struct vb2_plane *planes) +static int _fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes) { struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); @@ -385,7 +384,7 @@ int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b) { int ret; - ret = vb2_core_qbuf(&ctx->vb_q, b->index, b); + ret = vb2_core_qbuf(&ctx->vb_q, b->index, b, NULL); if (ret) { dprintk(1, "[%s] index=%d errno=%d\n", ctx->name, b->index, ret); diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c index 8ef91b1598e3..d6673f4fb47b 100644 --- a/drivers/media/dvb-frontends/rtl2832_sdr.c +++ b/drivers/media/dvb-frontends/rtl2832_sdr.c @@ -1394,7 +1394,8 @@ static int rtl2832_sdr_probe(struct platform_device *pdev) case RTL2832_SDR_TUNER_E4000: v4l2_ctrl_handler_init(&dev->hdl, 9); if (subdev) - v4l2_ctrl_add_handler(&dev->hdl, subdev->ctrl_handler, NULL); + v4l2_ctrl_add_handler(&dev->hdl, subdev->ctrl_handler, + NULL, true); break; case RTL2832_SDR_TUNER_R820T: case RTL2832_SDR_TUNER_R828D: @@ -1423,7 +1424,7 @@ static int rtl2832_sdr_probe(struct platform_device *pdev) v4l2_ctrl_handler_init(&dev->hdl, 2); if (subdev) v4l2_ctrl_add_handler(&dev->hdl, subdev->ctrl_handler, - NULL); + NULL, true); break; default: v4l2_ctrl_handler_init(&dev->hdl, 0); diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c index 4c7190db420e..bed24372e61f 100644 --- a/drivers/media/media-device.c +++ b/drivers/media/media-device.c @@ -30,6 +30,7 @@ #include <media/media-device.h> #include <media/media-devnode.h> #include <media/media-entity.h> +#include <media/media-request.h> #ifdef CONFIG_MEDIA_CONTROLLER @@ -377,10 +378,19 @@ static long media_device_get_topology(struct media_device *mdev, void *arg) return ret; } +static long media_device_request_alloc(struct media_device *mdev, + int *alloc_fd) +{ + if (!mdev->ops || !mdev->ops->req_validate || !mdev->ops->req_queue) + return -ENOTTY; + + return media_request_alloc(mdev, alloc_fd); +} + static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd) { - /* All media IOCTLs are _IOWR() */ - if (copy_from_user(karg, uarg, _IOC_SIZE(cmd))) + if ((_IOC_DIR(cmd) & _IOC_WRITE) && + copy_from_user(karg, uarg, _IOC_SIZE(cmd))) return -EFAULT; return 0; @@ -388,8 +398,8 @@ static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd) static long copy_arg_to_user(void __user *uarg, void *karg, unsigned int cmd) { - /* All media IOCTLs are _IOWR() */ - if (copy_to_user(uarg, karg, _IOC_SIZE(cmd))) + if ((_IOC_DIR(cmd) & _IOC_READ) && + copy_to_user(uarg, karg, _IOC_SIZE(cmd))) return -EFAULT; return 0; @@ -425,6 +435,7 @@ static const struct media_ioctl_info ioctl_info[] = { MEDIA_IOC(ENUM_LINKS, media_device_enum_links, MEDIA_IOC_FL_GRAPH_MUTEX), MEDIA_IOC(SETUP_LINK, media_device_setup_link, MEDIA_IOC_FL_GRAPH_MUTEX), MEDIA_IOC(G_TOPOLOGY, media_device_get_topology, MEDIA_IOC_FL_GRAPH_MUTEX), + MEDIA_IOC(REQUEST_ALLOC, media_device_request_alloc, 0), }; static long media_device_ioctl(struct file *filp, unsigned int cmd, @@ -691,9 +702,13 @@ void media_device_init(struct media_device *mdev) INIT_LIST_HEAD(&mdev->pads); INIT_LIST_HEAD(&mdev->links); INIT_LIST_HEAD(&mdev->entity_notify); + + mutex_init(&mdev->req_queue_mutex); mutex_init(&mdev->graph_mutex); ida_init(&mdev->entity_internal_idx); + atomic_set(&mdev->request_id, 0); + dev_dbg(mdev->dev, "Media device initialized\n"); } EXPORT_SYMBOL_GPL(media_device_init); @@ -704,6 +719,7 @@ void media_device_cleanup(struct media_device *mdev) mdev->entity_internal_idx_max = 0; media_graph_walk_cleanup(&mdev->pm_count_walk); mutex_destroy(&mdev->graph_mutex); + mutex_destroy(&mdev->req_queue_mutex); } EXPORT_SYMBOL_GPL(media_device_cleanup); diff --git a/drivers/media/media-request.c b/drivers/media/media-request.c new file mode 100644 index 000000000000..4e9db1fed697 --- /dev/null +++ b/drivers/media/media-request.c @@ -0,0 +1,501 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Media device request objects + * + * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018 Google, Inc. + * + * Author: Hans Verkuil <hans.verkuil@cisco.com> + * Author: Sakari Ailus <sakari.ailus@linux.intel.com> + */ + +#include <linux/anon_inodes.h> +#include <linux/file.h> +#include <linux/refcount.h> + +#include <media/media-device.h> +#include <media/media-request.h> + +static const char * const request_state[] = { + [MEDIA_REQUEST_STATE_IDLE] = "idle", + [MEDIA_REQUEST_STATE_VALIDATING] = "validating", + [MEDIA_REQUEST_STATE_QUEUED] = "queued", + [MEDIA_REQUEST_STATE_COMPLETE] = "complete", + [MEDIA_REQUEST_STATE_CLEANING] = "cleaning", + [MEDIA_REQUEST_STATE_UPDATING] = "updating", +}; + +static const char * +media_request_state_str(enum media_request_state state) +{ + BUILD_BUG_ON(ARRAY_SIZE(request_state) != NR_OF_MEDIA_REQUEST_STATE); + + if (WARN_ON(state >= ARRAY_SIZE(request_state))) + return "invalid"; + return request_state[state]; +} + +static void media_request_clean(struct media_request *req) +{ + struct media_request_object *obj, *obj_safe; + + /* Just a sanity check. No other code path is allowed to change this. */ + WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING); + WARN_ON(req->updating_count); + WARN_ON(req->access_count); + + list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { + media_request_object_unbind(obj); + media_request_object_put(obj); + } + + req->updating_count = 0; + req->access_count = 0; + WARN_ON(req->num_incomplete_objects); + req->num_incomplete_objects = 0; + wake_up_interruptible_all(&req->poll_wait); +} + +static void media_request_release(struct kref *kref) +{ + struct media_request *req = + container_of(kref, struct media_request, kref); + struct media_device *mdev = req->mdev; + + dev_dbg(mdev->dev, "request: release %s\n", req->debug_str); + + /* No other users, no need for a spinlock */ + req->state = MEDIA_REQUEST_STATE_CLEANING; + + media_request_clean(req); + + if (mdev->ops->req_free) + mdev->ops->req_free(req); + else + kfree(req); +} + +void media_request_put(struct media_request *req) +{ + kref_put(&req->kref, media_request_release); +} +EXPORT_SYMBOL_GPL(media_request_put); + +static int media_request_close(struct inode *inode, struct file *filp) +{ + struct media_request *req = filp->private_data; + + media_request_put(req); + return 0; +} + +static __poll_t media_request_poll(struct file *filp, + struct poll_table_struct *wait) +{ + struct media_request *req = filp->private_data; + unsigned long flags; + __poll_t ret = 0; + + if (!(poll_requested_events(wait) & EPOLLPRI)) + return 0; + + spin_lock_irqsave(&req->lock, flags); + if (req->state == MEDIA_REQUEST_STATE_COMPLETE) { + ret = EPOLLPRI; + goto unlock; + } + if (req->state != MEDIA_REQUEST_STATE_QUEUED) { + ret = EPOLLERR; + goto unlock; + } + + poll_wait(filp, &req->poll_wait, wait); + +unlock: + spin_unlock_irqrestore(&req->lock, flags); + return ret; +} + +static long media_request_ioctl_queue(struct media_request *req) +{ + struct media_device *mdev = req->mdev; + enum media_request_state state; + unsigned long flags; + int ret; + + dev_dbg(mdev->dev, "request: queue %s\n", req->debug_str); + + /* + * Ensure the request that is validated will be the one that gets queued + * next by serialising the queueing process. This mutex is also used + * to serialize with canceling a vb2 queue and with setting values such + * as controls in a request. + */ + mutex_lock(&mdev->req_queue_mutex); + + media_request_get(req); + + spin_lock_irqsave(&req->lock, flags); + if (req->state == MEDIA_REQUEST_STATE_IDLE) + req->state = MEDIA_REQUEST_STATE_VALIDATING; + state = req->state; + spin_unlock_irqrestore(&req->lock, flags); + if (state != MEDIA_REQUEST_STATE_VALIDATING) { + dev_dbg(mdev->dev, + "request: unable to queue %s, request in state %s\n", + req->debug_str, media_request_state_str(state)); + media_request_put(req); + mutex_unlock(&mdev->req_queue_mutex); + return -EBUSY; + } + + ret = mdev->ops->req_validate(req); + + /* + * If the req_validate was successful, then we mark the state as QUEUED + * and call req_queue. The reason we set the state first is that this + * allows req_queue to unbind or complete the queued objects in case + * they are immediately 'consumed'. State changes from QUEUED to another + * state can only happen if either the driver changes the state or if + * the user cancels the vb2 queue. The driver can only change the state + * after each object is queued through the req_queue op (and note that + * that op cannot fail), so setting the state to QUEUED up front is + * safe. + * + * The other reason for changing the state is if the vb2 queue is + * canceled, and that uses the req_queue_mutex which is still locked + * while req_queue is called, so that's safe as well. + */ + spin_lock_irqsave(&req->lock, flags); + req->state = ret ? MEDIA_REQUEST_STATE_IDLE + : MEDIA_REQUEST_STATE_QUEUED; + spin_unlock_irqrestore(&req->lock, flags); + + if (!ret) + mdev->ops->req_queue(req); + + mutex_unlock(&mdev->req_queue_mutex); + + if (ret) { + dev_dbg(mdev->dev, "request: can't queue %s (%d)\n", + req->debug_str, ret); + media_request_put(req); + } + + return ret; +} + +static long media_request_ioctl_reinit(struct media_request *req) +{ + struct media_device *mdev = req->mdev; + unsigned long flags; + + spin_lock_irqsave(&req->lock, flags); + if (req->state != MEDIA_REQUEST_STATE_IDLE && + req->state != MEDIA_REQUEST_STATE_COMPLETE) { + dev_dbg(mdev->dev, + "request: %s not in idle or complete state, cannot reinit\n", + req->debug_str); + spin_unlock_irqrestore(&req->lock, flags); + return -EBUSY; + } + if (req->access_count) { + dev_dbg(mdev->dev, + "request: %s is being accessed, cannot reinit\n", + req->debug_str); + spin_unlock_irqrestore(&req->lock, flags); + return -EBUSY; + } + req->state = MEDIA_REQUEST_STATE_CLEANING; + spin_unlock_irqrestore(&req->lock, flags); + + media_request_clean(req); + + spin_lock_irqsave(&req->lock, flags); + req->state = MEDIA_REQUEST_STATE_IDLE; + spin_unlock_irqrestore(&req->lock, flags); + + return 0; +} + +static long media_request_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct media_request *req = filp->private_data; + + switch (cmd) { + case MEDIA_REQUEST_IOC_QUEUE: + return media_request_ioctl_queue(req); + case MEDIA_REQUEST_IOC_REINIT: + return media_request_ioctl_reinit(req); + default: + return -ENOIOCTLCMD; + } +} + +static const struct file_operations request_fops = { + .owner = THIS_MODULE, + .poll = media_request_poll, + .unlocked_ioctl = media_request_ioctl, + .release = media_request_close, +}; + +struct media_request * +media_request_get_by_fd(struct media_device *mdev, int request_fd) +{ + struct file *filp; + struct media_request *req; + + if (!mdev || !mdev->ops || + !mdev->ops->req_validate || !mdev->ops->req_queue) + return ERR_PTR(-EACCES); + + filp = fget(request_fd); + if (!filp) + goto err_no_req_fd; + + if (filp->f_op != &request_fops) + goto err_fput; + req = filp->private_data; + if (req->mdev != mdev) + goto err_fput; + + /* + * Note: as long as someone has an open filehandle of the request, + * the request can never be released. The fget() above ensures that + * even if userspace closes the request filehandle, the release() + * fop won't be called, so the media_request_get() always succeeds + * and there is no race condition where the request was released + * before media_request_get() is called. + */ + media_request_get(req); + fput(filp); + + return req; + +err_fput: + fput(filp); + +err_no_req_fd: + dev_dbg(mdev->dev, "cannot find request_fd %d\n", request_fd); + return ERR_PTR(-EINVAL); +} +EXPORT_SYMBOL_GPL(media_request_get_by_fd); + +int media_request_alloc(struct media_device *mdev, int *alloc_fd) +{ + struct media_request *req; + struct file *filp; + int fd; + int ret; + + /* Either both are NULL or both are non-NULL */ + if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free)) + return -ENOMEM; + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) + return fd; + + filp = anon_inode_getfile("request", &request_fops, NULL, O_CLOEXEC); + if (IS_ERR(filp)) { + ret = PTR_ERR(filp); + goto err_put_fd; + } + + if (mdev->ops->req_alloc) + req = mdev->ops->req_alloc(mdev); + else + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) { + ret = -ENOMEM; + goto err_fput; + } + + filp->private_data = req; + req->mdev = mdev; + req->state = MEDIA_REQUEST_STATE_IDLE; + req->num_incomplete_objects = 0; + kref_init(&req->kref); + INIT_LIST_HEAD(&req->objects); + spin_lock_init(&req->lock); + init_waitqueue_head(&req->poll_wait); + req->updating_count = 0; + req->access_count = 0; + + *alloc_fd = fd; + + snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d", + atomic_inc_return(&mdev->request_id), fd); + dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str); + + fd_install(fd, filp); + + return 0; + +err_fput: + fput(filp); + +err_put_fd: + put_unused_fd(fd); + + return ret; +} + +static void media_request_object_release(struct kref *kref) +{ + struct media_request_object *obj = + container_of(kref, struct media_request_object, kref); + struct media_request *req = obj->req; + + if (WARN_ON(req)) + media_request_object_unbind(obj); + obj->ops->release(obj); +} + +struct media_request_object * +media_request_object_find(struct media_request *req, + const struct media_request_object_ops *ops, + void *priv) +{ + struct media_request_object *obj; + struct media_request_object *found = NULL; + unsigned long flags; + + if (WARN_ON(!ops || !priv)) + return NULL; + + spin_lock_irqsave(&req->lock, flags); + list_for_each_entry(obj, &req->objects, list) { + if (obj->ops == ops && obj->priv == priv) { + media_request_object_get(obj); + found = obj; + break; + } + } + spin_unlock_irqrestore(&req->lock, flags); + return found; +} +EXPORT_SYMBOL_GPL(media_request_object_find); + +void media_request_object_put(struct media_request_object *obj) +{ + kref_put(&obj->kref, media_request_object_release); +} +EXPORT_SYMBOL_GPL(media_request_object_put); + +void media_request_object_init(struct media_request_object *obj) +{ + obj->ops = NULL; + obj->req = NULL; + obj->priv = NULL; + obj->completed = false; + INIT_LIST_HEAD(&obj->list); + kref_init(&obj->kref); +} +EXPORT_SYMBOL_GPL(media_request_object_init); + +int media_request_object_bind(struct media_request *req, + const struct media_request_object_ops *ops, + void *priv, bool is_buffer, + struct media_request_object *obj) +{ + unsigned long flags; + int ret = -EBUSY; + + if (WARN_ON(!ops->release)) + return -EACCES; + + spin_lock_irqsave(&req->lock, flags); + + if (WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING)) + goto unlock; + + obj->req = req; + obj->ops = ops; + obj->priv = priv; + + if (is_buffer) + list_add_tail(&obj->list, &req->objects); + else + list_add(&obj->list, &req->objects); + req->num_incomplete_objects++; + ret = 0; + +unlock: + spin_unlock_irqrestore(&req->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(media_request_object_bind); + +void media_request_object_unbind(struct media_request_object *obj) +{ + struct media_request *req = obj->req; + unsigned long flags; + bool completed = false; + + if (WARN_ON(!req)) + return; + + spin_lock_irqsave(&req->lock, flags); + list_del(&obj->list); + obj->req = NULL; + + if (req->state == MEDIA_REQUEST_STATE_COMPLETE) + goto unlock; + + if (WARN_ON(req->state == MEDIA_REQUEST_STATE_VALIDATING)) + goto unlock; + + if (req->state == MEDIA_REQUEST_STATE_CLEANING) { + if (!obj->completed) + req->num_incomplete_objects--; + goto unlock; + } + + if (WARN_ON(!req->num_incomplete_objects)) + goto unlock; + + req->num_incomplete_objects--; + if (req->state == MEDIA_REQUEST_STATE_QUEUED && + !req->num_incomplete_objects) { + req->state = MEDIA_REQUEST_STATE_COMPLETE; + completed = true; + wake_up_interruptible_all(&req->poll_wait); + } + +unlock: + spin_unlock_irqrestore(&req->lock, flags); + if (obj->ops->unbind) + obj->ops->unbind(obj); + if (completed) + media_request_put(req); +} +EXPORT_SYMBOL_GPL(media_request_object_unbind); + +void media_request_object_complete(struct media_request_object *obj) +{ + struct media_request *req = obj->req; + unsigned long flags; + bool completed = false; + + spin_lock_irqsave(&req->lock, flags); + if (obj->completed) + goto unlock; + obj->completed = true; + if (WARN_ON(!req->num_incomplete_objects) || + WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED)) + goto unlock; + + if (!--req->num_incomplete_objects) { + req->state = MEDIA_REQUEST_STATE_COMPLETE; + wake_up_interruptible_all(&req->poll_wait); + completed = true; + } +unlock: + spin_unlock_irqrestore(&req->lock, flags); + if (completed) + media_request_put(req); +} +EXPORT_SYMBOL_GPL(media_request_object_complete); diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index b2cfcbb0008e..d4906c04dc6e 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c @@ -4210,7 +4210,7 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) /* register video4linux + input */ if (!bttv_tvcards[btv->c.type].no_video) { v4l2_ctrl_add_handler(&btv->radio_ctrl_handler, hdl, - v4l2_ctrl_radio_filter); + v4l2_ctrl_radio_filter, false); if (btv->radio_ctrl_handler.error) { result = btv->radio_ctrl_handler.error; goto fail2; diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c index 3083434bb636..a00b77d80ed9 100644 --- a/drivers/media/pci/cx23885/cx23885-417.c +++ b/drivers/media/pci/cx23885/cx23885-417.c @@ -1527,7 +1527,7 @@ int cx23885_417_register(struct cx23885_dev *dev) dev->cxhdl.priv = dev; dev->cxhdl.func = cx23885_api_func; cx2341x_handler_set_50hz(&dev->cxhdl, tsport->height == 576); - v4l2_ctrl_add_handler(&dev->ctrl_handler, &dev->cxhdl.hdl, NULL); + v4l2_ctrl_add_handler(&dev->ctrl_handler, &dev->cxhdl.hdl, NULL, false); /* Allocate and initialize V4L video device */ dev->v4l_device = cx23885_video_dev_alloc(tsport, diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c index 199756547f03..6c0bb9fe4a31 100644 --- a/drivers/media/pci/cx88/cx88-blackbird.c +++ b/drivers/media/pci/cx88/cx88-blackbird.c @@ -1183,7 +1183,7 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv) err = cx2341x_handler_init(&dev->cxhdl, 36); if (err) goto fail_core; - v4l2_ctrl_add_handler(&dev->cxhdl.hdl, &core->video_hdl, NULL); + v4l2_ctrl_add_handler(&dev->cxhdl.hdl, &core->video_hdl, NULL, false); /* blackbird stuff */ pr_info("cx23416 based mpeg encoder (blackbird reference design)\n"); diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c index df4e7a0686e0..e1549d352f70 100644 --- a/drivers/media/pci/cx88/cx88-video.c +++ b/drivers/media/pci/cx88/cx88-video.c @@ -1378,7 +1378,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev, if (vc->id == V4L2_CID_CHROMA_AGC) core->chroma_agc = vc; } - v4l2_ctrl_add_handler(&core->video_hdl, &core->audio_hdl, NULL); + v4l2_ctrl_add_handler(&core->video_hdl, &core->audio_hdl, NULL, false); /* load and configure helper modules */ diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c index 747a082229dc..9735bbb908e3 100644 --- a/drivers/media/pci/saa7134/saa7134-empress.c +++ b/drivers/media/pci/saa7134/saa7134-empress.c @@ -265,9 +265,9 @@ static int empress_init(struct saa7134_dev *dev) "%s empress (%s)", dev->name, saa7134_boards[dev->board].name); v4l2_ctrl_handler_init(hdl, 21); - v4l2_ctrl_add_handler(hdl, &dev->ctrl_handler, empress_ctrl_filter); + v4l2_ctrl_add_handler(hdl, &dev->ctrl_handler, empress_ctrl_filter, false); if (dev->empress_sd) - v4l2_ctrl_add_handler(hdl, dev->empress_sd->ctrl_handler, NULL); + v4l2_ctrl_add_handler(hdl, dev->empress_sd->ctrl_handler, NULL, true); if (hdl->error) { video_device_release(dev->empress_dev); return hdl->error; diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c index 1a22ae7cbdd9..8f28741ebb35 100644 --- a/drivers/media/pci/saa7134/saa7134-video.c +++ b/drivers/media/pci/saa7134/saa7134-video.c @@ -2137,7 +2137,7 @@ int saa7134_video_init1(struct saa7134_dev *dev) hdl = &dev->radio_ctrl_handler; v4l2_ctrl_handler_init(hdl, 2); v4l2_ctrl_add_handler(hdl, &dev->ctrl_handler, - v4l2_ctrl_radio_filter); + v4l2_ctrl_radio_filter, false); if (hdl->error) return hdl->error; } diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c index f56220e549bb..3e9fcf4f8a13 100644 --- a/drivers/media/platform/exynos4-is/fimc-capture.c +++ b/drivers/media/platform/exynos4-is/fimc-capture.c @@ -1424,7 +1424,7 @@ static int fimc_link_setup(struct media_entity *entity, return 0; return v4l2_ctrl_add_handler(&vc->ctx->ctrls.handler, - sensor->ctrl_handler, NULL); + sensor->ctrl_handler, NULL, true); } static const struct media_entity_operations fimc_sd_media_ops = { diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c index 5658f6a326f7..078d64114b24 100644 --- a/drivers/media/platform/omap3isp/ispvideo.c +++ b/drivers/media/platform/omap3isp/ispvideo.c @@ -940,7 +940,7 @@ isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b) int ret; mutex_lock(&video->queue_lock); - ret = vb2_qbuf(&vfh->queue, b); + ret = vb2_qbuf(&vfh->queue, video->video.v4l2_dev->mdev, b); mutex_unlock(&video->queue_lock); return ret; @@ -1028,7 +1028,7 @@ static int isp_video_check_external_subdevs(struct isp_video *video, ctrls.count = 1; ctrls.controls = &ctrl; - ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &ctrls); + ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, NULL, &ctrls); if (ret < 0) { dev_warn(isp->dev, "no pixel rate control in subdev %s\n", pipe->external->name); diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c index a3f135364474..f476b2f1eb35 100644 --- a/drivers/media/platform/rcar-vin/rcar-core.c +++ b/drivers/media/platform/rcar-vin/rcar-core.c @@ -475,7 +475,7 @@ static int rvin_parallel_subdevice_attach(struct rvin_dev *vin, return ret; ret = v4l2_ctrl_add_handler(&vin->ctrl_handler, subdev->ctrl_handler, - NULL); + NULL, true); if (ret < 0) { v4l2_ctrl_handler_free(&vin->ctrl_handler); return ret; diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c index 8483dc36715d..c417ff8f6fe5 100644 --- a/drivers/media/platform/rcar_drif.c +++ b/drivers/media/platform/rcar_drif.c @@ -1164,7 +1164,7 @@ static int rcar_drif_notify_complete(struct v4l2_async_notifier *notifier) } ret = v4l2_ctrl_add_handler(&sdr->ctrl_hdl, - sdr->ep.subdev->ctrl_handler, NULL); + sdr->ep.subdev->ctrl_handler, NULL, true); if (ret) { rdrif_err(sdr, "failed: ctrl add hdlr ret %d\n", ret); goto error; diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c index 23b008d1a47b..c3fc94ef251e 100644 --- a/drivers/media/platform/s3c-camif/camif-capture.c +++ b/drivers/media/platform/s3c-camif/camif-capture.c @@ -943,7 +943,7 @@ static int s3c_camif_qbuf(struct file *file, void *priv, if (vp->owner && vp->owner != priv) return -EBUSY; - return vb2_qbuf(&vp->vb_queue, buf); + return vb2_qbuf(&vp->vb_queue, vp->vdev.v4l2_dev->mdev, buf); } static int s3c_camif_dqbuf(struct file *file, void *priv, @@ -981,7 +981,7 @@ static int s3c_camif_prepare_buf(struct file *file, void *priv, struct v4l2_buffer *b) { struct camif_vp *vp = video_drvdata(file); - return vb2_prepare_buf(&vp->vb_queue, b); + return vb2_prepare_buf(&vp->vb_queue, vp->vdev.v4l2_dev->mdev, b); } static int s3c_camif_g_selection(struct file *file, void *priv, diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c index 670ca869babb..ece59ce1b149 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c @@ -632,9 +632,9 @@ static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) return -EIO; } if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) - return vb2_qbuf(&ctx->vq_src, buf); + return vb2_qbuf(&ctx->vq_src, NULL, buf); else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) - return vb2_qbuf(&ctx->vq_dst, buf); + return vb2_qbuf(&ctx->vq_dst, NULL, buf); return -EINVAL; } diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c index 7037d48bdc2c..8fcf627dedfb 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c @@ -1621,9 +1621,9 @@ static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) mfc_err("Call on QBUF after EOS command\n"); return -EIO; } - return vb2_qbuf(&ctx->vq_src, buf); + return vb2_qbuf(&ctx->vq_src, NULL, buf); } else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { - return vb2_qbuf(&ctx->vq_dst, buf); + return vb2_qbuf(&ctx->vq_dst, NULL, buf); } return -EINVAL; } diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c index 0a70fb67c401..21034339cdcb 100644 --- a/drivers/media/platform/soc_camera/soc_camera.c +++ b/drivers/media/platform/soc_camera/soc_camera.c @@ -394,7 +394,7 @@ static int soc_camera_qbuf(struct file *file, void *priv, if (icd->streamer != file) return -EBUSY; - return vb2_qbuf(&icd->vb2_vidq, p); + return vb2_qbuf(&icd->vb2_vidq, NULL, p); } static int soc_camera_dqbuf(struct file *file, void *priv, @@ -430,7 +430,7 @@ static int soc_camera_prepare_buf(struct file *file, void *priv, { struct soc_camera_device *icd = file->private_data; - return vb2_prepare_buf(&icd->vb2_vidq, b); + return vb2_prepare_buf(&icd->vb2_vidq, NULL, b); } static int soc_camera_expbuf(struct file *file, void *priv, @@ -1181,7 +1181,8 @@ static int soc_camera_probe_finish(struct soc_camera_device *icd) v4l2_subdev_call(sd, video, g_tvnorms, &icd->vdev->tvnorms); - ret = v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler, NULL); + ret = v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler, + NULL, true); if (ret < 0) return ret; diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c index 60c522ee2e03..af150a0395df 100644 --- a/drivers/media/platform/vim2m.c +++ b/drivers/media/platform/vim2m.c @@ -3,7 +3,8 @@ * * This is a virtual device driver for testing mem-to-mem videobuf framework. * It simulates a device that uses memory buffers for both source and - * destination, processes the data and issues an "irq" (simulated by a timer). + * destination, processes the data and issues an "irq" (simulated by a delayed + * workqueue). * The device is capable of multi-instance, multi-buffer-per-transaction * operation (via the mem2mem framework). * @@ -19,7 +20,6 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/fs.h> -#include <linux/timer.h> #include <linux/sched.h> #include <linux/slab.h> @@ -148,7 +148,7 @@ struct vim2m_dev { struct mutex dev_mutex; spinlock_t irqlock; - struct timer_list timer; + struct delayed_work work_run; struct v4l2_m2m_dev *m2m_dev; }; @@ -336,12 +336,6 @@ static int device_process(struct vim2m_ctx *ctx, return 0; } -static void schedule_irq(struct vim2m_dev *dev, int msec_timeout) -{ - dprintk(dev, "Scheduling a simulated irq\n"); - mod_timer(&dev->timer, jiffies + msecs_to_jiffies(msec_timeout)); -} - /* * mem2mem callbacks */ @@ -385,15 +379,24 @@ static void device_run(void *priv) src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); + /* Apply request controls if any */ + v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req, + &ctx->hdl); + device_process(ctx, src_buf, dst_buf); - /* Run a timer, which simulates a hardware irq */ - schedule_irq(dev, ctx->transtime); + /* Complete request controls if any */ + v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req, + &ctx->hdl); + + /* Run delayed work, which simulates a hardware irq */ + schedule_delayed_work(&dev->work_run, msecs_to_jiffies(ctx->transtime)); } -static void device_isr(struct timer_list *t) +static void device_work(struct work_struct *w) { - struct vim2m_dev *vim2m_dev = from_timer(vim2m_dev, t, timer); + struct vim2m_dev *vim2m_dev = + container_of(w, struct vim2m_dev, work_run.work); struct vim2m_ctx *curr_ctx; struct vb2_v4l2_buffer *src_vb, *dst_vb; unsigned long flags; @@ -805,6 +808,7 @@ static void vim2m_stop_streaming(struct vb2_queue *q) struct vb2_v4l2_buffer *vbuf; unsigned long flags; + flush_scheduled_work(); for (;;) { if (V4L2_TYPE_IS_OUTPUT(q->type)) vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); @@ -812,12 +816,21 @@ static void vim2m_stop_streaming(struct vb2_queue *q) vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); if (vbuf == NULL) return; + v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req, + &ctx->hdl); spin_lock_irqsave(&ctx->dev->irqlock, flags); v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR); spin_unlock_irqrestore(&ctx->dev->irqlock, flags); } } +static void vim2m_buf_request_complete(struct vb2_buffer *vb) +{ + struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + + v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->hdl); +} + static const struct vb2_ops vim2m_qops = { .queue_setup = vim2m_queue_setup, .buf_prepare = vim2m_buf_prepare, @@ -826,6 +839,7 @@ static const struct vb2_ops vim2m_qops = { .stop_streaming = vim2m_stop_streaming, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, + .buf_request_complete = vim2m_buf_request_complete, }; static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) @@ -841,6 +855,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds src_vq->mem_ops = &vb2_vmalloc_memops; src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; src_vq->lock = &ctx->dev->dev_mutex; + src_vq->supports_requests = true; ret = vb2_queue_init(src_vq); if (ret) @@ -992,6 +1007,11 @@ static const struct v4l2_m2m_ops m2m_ops = { .job_abort = job_abort, }; +static const struct media_device_ops m2m_media_ops = { + .req_validate = vb2_request_validate, + .req_queue = vb2_m2m_request_queue, +}; + static int vim2m_probe(struct platform_device *pdev) { struct vim2m_dev *dev; @@ -1015,6 +1035,7 @@ static int vim2m_probe(struct platform_device *pdev) vfd = &dev->vfd; vfd->lock = &dev->dev_mutex; vfd->v4l2_dev = &dev->v4l2_dev; + INIT_DELAYED_WORK(&dev->work_run, device_work); ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); if (ret) { @@ -1026,7 +1047,6 @@ static int vim2m_probe(struct platform_device *pdev) v4l2_info(&dev->v4l2_dev, "Device registered as /dev/video%d\n", vfd->num); - timer_setup(&dev->timer, device_isr, 0); platform_set_drvdata(pdev, dev); dev->m2m_dev = v4l2_m2m_init(&m2m_ops); @@ -1040,6 +1060,7 @@ static int vim2m_probe(struct platform_device *pdev) dev->mdev.dev = &pdev->dev; strscpy(dev->mdev.model, "vim2m", sizeof(dev->mdev.model)); media_device_init(&dev->mdev); + dev->mdev.ops = &m2m_media_ops; dev->v4l2_dev.mdev = &dev->mdev; ret = v4l2_m2m_register_media_controller(dev->m2m_dev, @@ -1083,7 +1104,6 @@ static int vim2m_remove(struct platform_device *pdev) media_device_cleanup(&dev->mdev); #endif v4l2_m2m_release(dev->m2m_dev); - del_timer_sync(&dev->timer); video_unregister_device(&dev->vfd); v4l2_device_unregister(&dev->v4l2_dev); diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c index 06961e7d8036..626e2b24a403 100644 --- a/drivers/media/platform/vivid/vivid-core.c +++ b/drivers/media/platform/vivid/vivid-core.c @@ -627,6 +627,13 @@ static void vivid_dev_release(struct v4l2_device *v4l2_dev) kfree(dev); } +#ifdef CONFIG_MEDIA_CONTROLLER +static const struct media_device_ops vivid_media_ops = { + .req_validate = vb2_request_validate, + .req_queue = vb2_request_queue, +}; +#endif + static int vivid_create_instance(struct platform_device *pdev, int inst) { static const struct v4l2_dv_timings def_dv_timings = @@ -657,6 +664,16 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) dev->inst = inst; +#ifdef CONFIG_MEDIA_CONTROLLER + dev->v4l2_dev.mdev = &dev->mdev; + + /* Initialize media device */ + strlcpy(dev->mdev.model, VIVID_MODULE_NAME, sizeof(dev->mdev.model)); + dev->mdev.dev = &pdev->dev; + media_device_init(&dev->mdev); + dev->mdev.ops = &vivid_media_ops; +#endif + /* register v4l2_device */ snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s-%03d", VIVID_MODULE_NAME, inst); @@ -1060,6 +1077,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) q->min_buffers_needed = 2; q->lock = &dev->mutex; q->dev = dev->v4l2_dev.dev; + q->supports_requests = true; ret = vb2_queue_init(q); if (ret) @@ -1080,6 +1098,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) q->min_buffers_needed = 2; q->lock = &dev->mutex; q->dev = dev->v4l2_dev.dev; + q->supports_requests = true; ret = vb2_queue_init(q); if (ret) @@ -1100,6 +1119,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) q->min_buffers_needed = 2; q->lock = &dev->mutex; q->dev = dev->v4l2_dev.dev; + q->supports_requests = true; ret = vb2_queue_init(q); if (ret) @@ -1120,6 +1140,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) q->min_buffers_needed = 2; q->lock = &dev->mutex; q->dev = dev->v4l2_dev.dev; + q->supports_requests = true; ret = vb2_queue_init(q); if (ret) @@ -1139,6 +1160,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) q->min_buffers_needed = 8; q->lock = &dev->mutex; q->dev = dev->v4l2_dev.dev; + q->supports_requests = true; ret = vb2_queue_init(q); if (ret) @@ -1174,6 +1196,13 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) vfd->lock = &dev->mutex; video_set_drvdata(vfd, dev); +#ifdef CONFIG_MEDIA_CONTROLLER + dev->vid_cap_pad.flags = MEDIA_PAD_FL_SINK; + ret = media_entity_pads_init(&vfd->entity, 1, &dev->vid_cap_pad); + if (ret) + goto unreg_dev; +#endif + #ifdef CONFIG_VIDEO_VIVID_CEC if (in_type_counter[HDMI]) { struct cec_adapter *adap; @@ -1226,6 +1255,13 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) vfd->lock = &dev->mutex; video_set_drvdata(vfd, dev); +#ifdef CONFIG_MEDIA_CONTROLLER + dev->vid_out_pad.flags = MEDIA_PAD_FL_SOURCE; + ret = media_entity_pads_init(&vfd->entity, 1, &dev->vid_out_pad); + if (ret) + goto unreg_dev; +#endif + #ifdef CONFIG_VIDEO_VIVID_CEC for (i = 0; i < dev->num_outputs; i++) { struct cec_adapter *adap; @@ -1275,6 +1311,13 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) vfd->tvnorms = tvnorms_cap; video_set_drvdata(vfd, dev); +#ifdef CONFIG_MEDIA_CONTROLLER + dev->vbi_cap_pad.flags = MEDIA_PAD_FL_SINK; + ret = media_entity_pads_init(&vfd->entity, 1, &dev->vbi_cap_pad); + if (ret) + goto unreg_dev; +#endif + ret = video_register_device(vfd, VFL_TYPE_VBI, vbi_cap_nr[inst]); if (ret < 0) goto unreg_dev; @@ -1300,6 +1343,13 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) vfd->tvnorms = tvnorms_out; video_set_drvdata(vfd, dev); +#ifdef CONFIG_MEDIA_CONTROLLER + dev->vbi_out_pad.flags = MEDIA_PAD_FL_SOURCE; + ret = media_entity_pads_init(&vfd->entity, 1, &dev->vbi_out_pad); + if (ret) + goto unreg_dev; +#endif + ret = video_register_device(vfd, VFL_TYPE_VBI, vbi_out_nr[inst]); if (ret < 0) goto unreg_dev; @@ -1323,6 +1373,13 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) vfd->lock = &dev->mutex; video_set_drvdata(vfd, dev); +#ifdef CONFIG_MEDIA_CONTROLLER + dev->sdr_cap_pad.flags = MEDIA_PAD_FL_SINK; + ret = media_entity_pads_init(&vfd->entity, 1, &dev->sdr_cap_pad); + if (ret) + goto unreg_dev; +#endif + ret = video_register_device(vfd, VFL_TYPE_SDR, sdr_cap_nr[inst]); if (ret < 0) goto unreg_dev; @@ -1369,12 +1426,25 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) video_device_node_name(vfd)); } +#ifdef CONFIG_MEDIA_CONTROLLER + /* Register the media device */ + ret = media_device_register(&dev->mdev); + if (ret) { + dev_err(dev->mdev.dev, + "media device register failed (err=%d)\n", ret); + goto unreg_dev; + } +#endif + /* Now that everything is fine, let's add it to device list */ vivid_devs[inst] = dev; return 0; unreg_dev: +#ifdef CONFIG_MEDIA_CONTROLLER + media_device_unregister(&dev->mdev); +#endif video_unregister_device(&dev->radio_tx_dev); video_unregister_device(&dev->radio_rx_dev); video_unregister_device(&dev->sdr_cap_dev); @@ -1445,6 +1515,10 @@ static int vivid_remove(struct platform_device *pdev) if (!dev) continue; +#ifdef CONFIG_MEDIA_CONTROLLER + media_device_unregister(&dev->mdev); +#endif + if (dev->has_vid_cap) { v4l2_info(&dev->v4l2_dev, "unregistering %s\n", video_device_node_name(&dev->vid_cap_dev)); diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h index cd4c8230563c..1891254c8f0b 100644 --- a/drivers/media/platform/vivid/vivid-core.h +++ b/drivers/media/platform/vivid/vivid-core.h @@ -136,6 +136,14 @@ struct vivid_cec_work { struct vivid_dev { unsigned inst; struct v4l2_device v4l2_dev; +#ifdef CONFIG_MEDIA_CONTROLLER + struct media_device mdev; + struct media_pad vid_cap_pad; + struct media_pad vid_out_pad; + struct media_pad vbi_cap_pad; + struct media_pad vbi_out_pad; + struct media_pad sdr_cap_pad; +#endif struct v4l2_ctrl_handler ctrl_hdl_user_gen; struct v4l2_ctrl_handler ctrl_hdl_user_vid; struct v4l2_ctrl_handler ctrl_hdl_user_aud; diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c index 999aa101b150..bfffeda12f14 100644 --- a/drivers/media/platform/vivid/vivid-ctrls.c +++ b/drivers/media/platform/vivid/vivid-ctrls.c @@ -1662,59 +1662,59 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap, v4l2_ctrl_auto_cluster(2, &dev->autogain, 0, true); if (dev->has_vid_cap) { - v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_gen, NULL); - v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_vid, NULL); - v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_aud, NULL); - v4l2_ctrl_add_handler(hdl_vid_cap, hdl_streaming, NULL); - v4l2_ctrl_add_handler(hdl_vid_cap, hdl_sdtv_cap, NULL); - v4l2_ctrl_add_handler(hdl_vid_cap, hdl_loop_cap, NULL); - v4l2_ctrl_add_handler(hdl_vid_cap, hdl_fb, NULL); + v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_gen, NULL, false); + v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_vid, NULL, false); + v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_aud, NULL, false); + v4l2_ctrl_add_handler(hdl_vid_cap, hdl_streaming, NULL, false); + v4l2_ctrl_add_handler(hdl_vid_cap, hdl_sdtv_cap, NULL, false); + v4l2_ctrl_add_handler(hdl_vid_cap, hdl_loop_cap, NULL, false); + v4l2_ctrl_add_handler(hdl_vid_cap, hdl_fb, NULL, false); if (hdl_vid_cap->error) return hdl_vid_cap->error; dev->vid_cap_dev.ctrl_handler = hdl_vid_cap; } if (dev->has_vid_out) { - v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_gen, NULL); - v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_aud, NULL); - v4l2_ctrl_add_handler(hdl_vid_out, hdl_streaming, NULL); - v4l2_ctrl_add_handler(hdl_vid_out, hdl_fb, NULL); + v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_gen, NULL, false); + v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_aud, NULL, false); + v4l2_ctrl_add_handler(hdl_vid_out, hdl_streaming, NULL, false); + v4l2_ctrl_add_handler(hdl_vid_out, hdl_fb, NULL, false); if (hdl_vid_out->error) return hdl_vid_out->error; dev->vid_out_dev.ctrl_handler = hdl_vid_out; } if (dev->has_vbi_cap) { - v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_user_gen, NULL); - v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_streaming, NULL); - v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_sdtv_cap, NULL); - v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_loop_cap, NULL); + v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_user_gen, NULL, false); + v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_streaming, NULL, false); + v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_sdtv_cap, NULL, false); + v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_loop_cap, NULL, false); if (hdl_vbi_cap->error) return hdl_vbi_cap->error; dev->vbi_cap_dev.ctrl_handler = hdl_vbi_cap; } if (dev->has_vbi_out) { - v4l2_ctrl_add_handler(hdl_vbi_out, hdl_user_gen, NULL); - v4l2_ctrl_add_handler(hdl_vbi_out, hdl_streaming, NULL); + v4l2_ctrl_add_handler(hdl_vbi_out, hdl_user_gen, NULL, false); + v4l2_ctrl_add_handler(hdl_vbi_out, hdl_streaming, NULL, false); if (hdl_vbi_out->error) return hdl_vbi_out->error; dev->vbi_out_dev.ctrl_handler = hdl_vbi_out; } if (dev->has_radio_rx) { - v4l2_ctrl_add_handler(hdl_radio_rx, hdl_user_gen, NULL); - v4l2_ctrl_add_handler(hdl_radio_rx, hdl_user_aud, NULL); + v4l2_ctrl_add_handler(hdl_radio_rx, hdl_user_gen, NULL, false); + v4l2_ctrl_add_handler(hdl_radio_rx, hdl_user_aud, NULL, false); if (hdl_radio_rx->error) return hdl_radio_rx->error; dev->radio_rx_dev.ctrl_handler = hdl_radio_rx; } if (dev->has_radio_tx) { - v4l2_ctrl_add_handler(hdl_radio_tx, hdl_user_gen, NULL); - v4l2_ctrl_add_handler(hdl_radio_tx, hdl_user_aud, NULL); + v4l2_ctrl_add_handler(hdl_radio_tx, hdl_user_gen, NULL, false); + v4l2_ctrl_add_handler(hdl_radio_tx, hdl_user_aud, NULL, false); if (hdl_radio_tx->error) return hdl_radio_tx->error; dev->radio_tx_dev.ctrl_handler = hdl_radio_tx; } if (dev->has_sdr_cap) { - v4l2_ctrl_add_handler(hdl_sdr_cap, hdl_user_gen, NULL); - v4l2_ctrl_add_handler(hdl_sdr_cap, hdl_streaming, NULL); + v4l2_ctrl_add_handler(hdl_sdr_cap, hdl_user_gen, NULL, false); + v4l2_ctrl_add_handler(hdl_sdr_cap, hdl_streaming, NULL, false); if (hdl_sdr_cap->error) return hdl_sdr_cap->error; dev->sdr_cap_dev.ctrl_handler = hdl_sdr_cap; diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c index f06003bb8e42..eebfff2126be 100644 --- a/drivers/media/platform/vivid/vivid-kthread-cap.c +++ b/drivers/media/platform/vivid/vivid-kthread-cap.c @@ -703,6 +703,8 @@ static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs) goto update_mv; if (vid_cap_buf) { + v4l2_ctrl_request_setup(vid_cap_buf->vb.vb2_buf.req_obj.req, + &dev->ctrl_hdl_vid_cap); /* Fill buffer */ vivid_fillbuff(dev, vid_cap_buf); dprintk(dev, 1, "filled buffer %d\n", @@ -713,6 +715,8 @@ static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs) dev->fb_cap.fmt.pixelformat == dev->fmt_cap->fourcc) vivid_overlay(dev, vid_cap_buf); + v4l2_ctrl_request_complete(vid_cap_buf->vb.vb2_buf.req_obj.req, + &dev->ctrl_hdl_vid_cap); vb2_buffer_done(&vid_cap_buf->vb.vb2_buf, dev->dqbuf_error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); dprintk(dev, 2, "vid_cap buffer %d done\n", @@ -720,10 +724,14 @@ static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs) } if (vbi_cap_buf) { + v4l2_ctrl_request_setup(vbi_cap_buf->vb.vb2_buf.req_obj.req, + &dev->ctrl_hdl_vbi_cap); if (dev->stream_sliced_vbi_cap) vivid_sliced_vbi_cap_process(dev, vbi_cap_buf); else vivid_raw_vbi_cap_process(dev, vbi_cap_buf); + v4l2_ctrl_request_complete(vbi_cap_buf->vb.vb2_buf.req_obj.req, + &dev->ctrl_hdl_vbi_cap); vb2_buffer_done(&vbi_cap_buf->vb.vb2_buf, dev->dqbuf_error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); dprintk(dev, 2, "vbi_cap %d done\n", @@ -891,6 +899,8 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming) buf = list_entry(dev->vid_cap_active.next, struct vivid_buffer, list); list_del(&buf->list); + v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, + &dev->ctrl_hdl_vid_cap); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); dprintk(dev, 2, "vid_cap buffer %d done\n", buf->vb.vb2_buf.index); @@ -904,6 +914,8 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming) buf = list_entry(dev->vbi_cap_active.next, struct vivid_buffer, list); list_del(&buf->list); + v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, + &dev->ctrl_hdl_vbi_cap); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); dprintk(dev, 2, "vbi_cap buffer %d done\n", buf->vb.vb2_buf.index); diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c index 9981e7548019..5a14810eeb69 100644 --- a/drivers/media/platform/vivid/vivid-kthread-out.c +++ b/drivers/media/platform/vivid/vivid-kthread-out.c @@ -75,6 +75,10 @@ static void vivid_thread_vid_out_tick(struct vivid_dev *dev) return; if (vid_out_buf) { + v4l2_ctrl_request_setup(vid_out_buf->vb.vb2_buf.req_obj.req, + &dev->ctrl_hdl_vid_out); + v4l2_ctrl_request_complete(vid_out_buf->vb.vb2_buf.req_obj.req, + &dev->ctrl_hdl_vid_out); vid_out_buf->vb.sequence = dev->vid_out_seq_count; if (dev->field_out == V4L2_FIELD_ALTERNATE) { /* @@ -92,6 +96,10 @@ static void vivid_thread_vid_out_tick(struct vivid_dev *dev) } if (vbi_out_buf) { + v4l2_ctrl_request_setup(vbi_out_buf->vb.vb2_buf.req_obj.req, + &dev->ctrl_hdl_vbi_out); + v4l2_ctrl_request_complete(vbi_out_buf->vb.vb2_buf.req_obj.req, + &dev->ctrl_hdl_vbi_out); if (dev->stream_sliced_vbi_out) vivid_sliced_vbi_out_process(dev, vbi_out_buf); @@ -262,6 +270,8 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming) buf = list_entry(dev->vid_out_active.next, struct vivid_buffer, list); list_del(&buf->list); + v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, + &dev->ctrl_hdl_vid_out); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); dprintk(dev, 2, "vid_out buffer %d done\n", buf->vb.vb2_buf.index); @@ -275,6 +285,8 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming) buf = list_entry(dev->vbi_out_active.next, struct vivid_buffer, list); list_del(&buf->list); + v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, + &dev->ctrl_hdl_vbi_out); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); dprintk(dev, 2, "vbi_out buffer %d done\n", buf->vb.vb2_buf.index); diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c index 200b789a3f21..dcdc80e272c2 100644 --- a/drivers/media/platform/vivid/vivid-sdr-cap.c +++ b/drivers/media/platform/vivid/vivid-sdr-cap.c @@ -102,6 +102,10 @@ static void vivid_thread_sdr_cap_tick(struct vivid_dev *dev) if (sdr_cap_buf) { sdr_cap_buf->vb.sequence = dev->sdr_cap_seq_count; + v4l2_ctrl_request_setup(sdr_cap_buf->vb.vb2_buf.req_obj.req, + &dev->ctrl_hdl_sdr_cap); + v4l2_ctrl_request_complete(sdr_cap_buf->vb.vb2_buf.req_obj.req, + &dev->ctrl_hdl_sdr_cap); vivid_sdr_cap_process(dev, sdr_cap_buf); sdr_cap_buf->vb.vb2_buf.timestamp = ktime_get_ns() + dev->time_wrap_offset; @@ -272,6 +276,8 @@ static int sdr_cap_start_streaming(struct vb2_queue *vq, unsigned count) list_for_each_entry_safe(buf, tmp, &dev->sdr_cap_active, list) { list_del(&buf->list); + v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, + &dev->ctrl_hdl_sdr_cap); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); } @@ -293,6 +299,8 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq) buf = list_entry(dev->sdr_cap_active.next, struct vivid_buffer, list); list_del(&buf->list); + v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, + &dev->ctrl_hdl_sdr_cap); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); } @@ -303,12 +311,20 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq) mutex_lock(&dev->mutex); } +static void sdr_cap_buf_request_complete(struct vb2_buffer *vb) +{ + struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); + + v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_sdr_cap); +} + const struct vb2_ops vivid_sdr_cap_qops = { .queue_setup = sdr_cap_queue_setup, .buf_prepare = sdr_cap_buf_prepare, .buf_queue = sdr_cap_buf_queue, .start_streaming = sdr_cap_start_streaming, .stop_streaming = sdr_cap_stop_streaming, + .buf_request_complete = sdr_cap_buf_request_complete, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; diff --git a/drivers/media/platform/vivid/vivid-vbi-cap.c b/drivers/media/platform/vivid/vivid-vbi-cap.c index 92a852955173..903cebeb5ce5 100644 --- a/drivers/media/platform/vivid/vivid-vbi-cap.c +++ b/drivers/media/platform/vivid/vivid-vbi-cap.c @@ -204,6 +204,8 @@ static int vbi_cap_start_streaming(struct vb2_queue *vq, unsigned count) list_for_each_entry_safe(buf, tmp, &dev->vbi_cap_active, list) { list_del(&buf->list); + v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, + &dev->ctrl_hdl_vbi_cap); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); } @@ -220,12 +222,20 @@ static void vbi_cap_stop_streaming(struct vb2_queue *vq) vivid_stop_generating_vid_cap(dev, &dev->vbi_cap_streaming); } +static void vbi_cap_buf_request_complete(struct vb2_buffer *vb) +{ + struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); + + v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vbi_cap); +} + const struct vb2_ops vivid_vbi_cap_qops = { .queue_setup = vbi_cap_queue_setup, .buf_prepare = vbi_cap_buf_prepare, .buf_queue = vbi_cap_buf_queue, .start_streaming = vbi_cap_start_streaming, .stop_streaming = vbi_cap_stop_streaming, + .buf_request_complete = vbi_cap_buf_request_complete, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; diff --git a/drivers/media/platform/vivid/vivid-vbi-out.c b/drivers/media/platform/vivid/vivid-vbi-out.c index 69486c130a7e..9357c07e30d6 100644 --- a/drivers/media/platform/vivid/vivid-vbi-out.c +++ b/drivers/media/platform/vivid/vivid-vbi-out.c @@ -96,6 +96,8 @@ static int vbi_out_start_streaming(struct vb2_queue *vq, unsigned count) list_for_each_entry_safe(buf, tmp, &dev->vbi_out_active, list) { list_del(&buf->list); + v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, + &dev->ctrl_hdl_vbi_out); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); } @@ -115,12 +117,20 @@ static void vbi_out_stop_streaming(struct vb2_queue *vq) dev->vbi_out_have_cc[1] = false; } +static void vbi_out_buf_request_complete(struct vb2_buffer *vb) +{ + struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); + + v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vbi_out); +} + const struct vb2_ops vivid_vbi_out_qops = { .queue_setup = vbi_out_queue_setup, .buf_prepare = vbi_out_buf_prepare, .buf_queue = vbi_out_buf_queue, .start_streaming = vbi_out_start_streaming, .stop_streaming = vbi_out_stop_streaming, + .buf_request_complete = vbi_out_buf_request_complete, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c index 6cf910a60ecf..9c8e8be81ce3 100644 --- a/drivers/media/platform/vivid/vivid-vid-cap.c +++ b/drivers/media/platform/vivid/vivid-vid-cap.c @@ -243,6 +243,8 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count) list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) { list_del(&buf->list); + v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, + &dev->ctrl_hdl_vid_cap); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); } @@ -260,6 +262,13 @@ static void vid_cap_stop_streaming(struct vb2_queue *vq) dev->can_loop_video = false; } +static void vid_cap_buf_request_complete(struct vb2_buffer *vb) +{ + struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); + + v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_cap); +} + const struct vb2_ops vivid_vid_cap_qops = { .queue_setup = vid_cap_queue_setup, .buf_prepare = vid_cap_buf_prepare, @@ -267,6 +276,7 @@ const struct vb2_ops vivid_vid_cap_qops = { .buf_queue = vid_cap_buf_queue, .start_streaming = vid_cap_start_streaming, .stop_streaming = vid_cap_stop_streaming, + .buf_request_complete = vid_cap_buf_request_complete, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c index 50248e2176a0..aaf13f03d5d4 100644 --- a/drivers/media/platform/vivid/vivid-vid-out.c +++ b/drivers/media/platform/vivid/vivid-vid-out.c @@ -162,6 +162,8 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count) list_for_each_entry_safe(buf, tmp, &dev->vid_out_active, list) { list_del(&buf->list); + v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, + &dev->ctrl_hdl_vid_out); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); } @@ -179,12 +181,20 @@ static void vid_out_stop_streaming(struct vb2_queue *vq) dev->can_loop_video = false; } +static void vid_out_buf_request_complete(struct vb2_buffer *vb) +{ + struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); + + v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_out); +} + const struct vb2_ops vivid_vid_out_qops = { .queue_setup = vid_out_queue_setup, .buf_prepare = vid_out_buf_prepare, .buf_queue = vid_out_buf_queue, .start_streaming = vid_out_start_streaming, .stop_streaming = vid_out_stop_streaming, + .buf_request_complete = vid_out_buf_request_complete, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c index aa7f3c307b22..3f401fbd0ecc 100644 --- a/drivers/media/usb/cpia2/cpia2_v4l.c +++ b/drivers/media/usb/cpia2/cpia2_v4l.c @@ -949,7 +949,7 @@ static int cpia2_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf) buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer; buf->length = cam->frame_size; buf->reserved2 = 0; - buf->reserved = 0; + buf->request_fd = 0; memset(&buf->timecode, 0, sizeof(buf->timecode)); DBG("DQBUF #%d status:%d seq:%d length:%d\n", buf->index, diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c index f700ec35b7f3..2641e23d946b 100644 --- a/drivers/media/usb/cx231xx/cx231xx-417.c +++ b/drivers/media/usb/cx231xx/cx231xx-417.c @@ -1992,7 +1992,7 @@ int cx231xx_417_register(struct cx231xx *dev) dev->mpeg_ctrl_handler.ops = &cx231xx_ops; if (dev->sd_cx25840) v4l2_ctrl_add_handler(&dev->mpeg_ctrl_handler.hdl, - dev->sd_cx25840->ctrl_handler, NULL); + dev->sd_cx25840->ctrl_handler, NULL, false); if (dev->mpeg_ctrl_handler.hdl.error) { err = dev->mpeg_ctrl_handler.hdl.error; dprintk(3, "%s: can't add cx25840 controls\n", dev->name); diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c index f2f034c5cd62..c990f70c0ea6 100644 --- a/drivers/media/usb/cx231xx/cx231xx-video.c +++ b/drivers/media/usb/cx231xx/cx231xx-video.c @@ -2204,10 +2204,10 @@ int cx231xx_register_analog_devices(struct cx231xx *dev) if (dev->sd_cx25840) { v4l2_ctrl_add_handler(&dev->ctrl_handler, - dev->sd_cx25840->ctrl_handler, NULL); + dev->sd_cx25840->ctrl_handler, NULL, true); v4l2_ctrl_add_handler(&dev->radio_ctrl_handler, dev->sd_cx25840->ctrl_handler, - v4l2_ctrl_radio_filter); + v4l2_ctrl_radio_filter, true); } if (dev->ctrl_handler.error) diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c index 0fc4076c6d16..10b5b95bee34 100644 --- a/drivers/media/usb/msi2500/msi2500.c +++ b/drivers/media/usb/msi2500/msi2500.c @@ -1278,7 +1278,7 @@ static int msi2500_probe(struct usb_interface *intf, } /* currently all controls are from subdev */ - v4l2_ctrl_add_handler(&dev->hdl, sd->ctrl_handler, NULL); + v4l2_ctrl_add_handler(&dev->hdl, sd->ctrl_handler, NULL, true); dev->v4l2_dev.ctrl_handler = &dev->hdl; dev->vdev.v4l2_dev = &dev->v4l2_dev; diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c index 6c992f197255..ee7b5318b351 100644 --- a/drivers/media/usb/tm6000/tm6000-video.c +++ b/drivers/media/usb/tm6000/tm6000-video.c @@ -1627,7 +1627,7 @@ int tm6000_v4l2_register(struct tm6000_core *dev) v4l2_ctrl_new_std(&dev->ctrl_handler, &tm6000_ctrl_ops, V4L2_CID_HUE, -128, 127, 1, 0); v4l2_ctrl_add_handler(&dev->ctrl_handler, - &dev->radio_ctrl_handler, NULL); + &dev->radio_ctrl_handler, NULL, false); if (dev->radio_ctrl_handler.error) ret = dev->radio_ctrl_handler.error; diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c index fecccb5e7628..8964e16f2b22 100644 --- a/drivers/media/usb/uvc/uvc_queue.c +++ b/drivers/media/usb/uvc/uvc_queue.c @@ -300,12 +300,13 @@ int uvc_create_buffers(struct uvc_video_queue *queue, return ret; } -int uvc_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) +int uvc_queue_buffer(struct uvc_video_queue *queue, + struct media_device *mdev, struct v4l2_buffer *buf) { int ret; mutex_lock(&queue->mutex); - ret = vb2_qbuf(&queue->queue, buf); + ret = vb2_qbuf(&queue->queue, mdev, buf); mutex_unlock(&queue->mutex); return ret; diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c index b26182ce7462..84be596d3269 100644 --- a/drivers/media/usb/uvc/uvc_v4l2.c +++ b/drivers/media/usb/uvc/uvc_v4l2.c @@ -751,7 +751,8 @@ static int uvc_ioctl_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) if (!uvc_has_privileges(handle)) return -EBUSY; - return uvc_queue_buffer(&stream->queue, buf); + return uvc_queue_buffer(&stream->queue, + stream->vdev.v4l2_dev->mdev, buf); } static int uvc_ioctl_expbuf(struct file *file, void *fh, diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h index 591eae3d0b0d..c0cbd833d0a4 100644 --- a/drivers/media/usb/uvc/uvcvideo.h +++ b/drivers/media/usb/uvc/uvcvideo.h @@ -700,6 +700,7 @@ int uvc_query_buffer(struct uvc_video_queue *queue, int uvc_create_buffers(struct uvc_video_queue *queue, struct v4l2_create_buffers *v4l2_cb); int uvc_queue_buffer(struct uvc_video_queue *queue, + struct media_device *mdev, struct v4l2_buffer *v4l2_buf); int uvc_export_buffer(struct uvc_video_queue *queue, struct v4l2_exportbuffer *exp); diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 6481212fda77..f4325329fbd6 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -244,6 +244,7 @@ struct v4l2_format32 { * return: number of created buffers * @memory: buffer memory type * @format: frame format, for which buffers are requested + * @capabilities: capabilities of this buffer type. * @reserved: future extensions */ struct v4l2_create_buffers32 { @@ -251,7 +252,8 @@ struct v4l2_create_buffers32 { __u32 count; __u32 memory; /* enum v4l2_memory */ struct v4l2_format32 format; - __u32 reserved[8]; + __u32 capabilities; + __u32 reserved[7]; }; static int __bufsize_v4l2_format(struct v4l2_format32 __user *p32, u32 *size) @@ -411,6 +413,7 @@ static int put_v4l2_create32(struct v4l2_create_buffers __user *p64, if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) || copy_in_user(p32, p64, offsetof(struct v4l2_create_buffers32, format)) || + assign_in_user(&p32->capabilities, &p64->capabilities) || copy_in_user(p32->reserved, p64->reserved, sizeof(p64->reserved))) return -EFAULT; return __put_v4l2_format32(&p64->format, &p32->format); @@ -482,7 +485,7 @@ struct v4l2_buffer32 { } m; __u32 length; __u32 reserved2; - __u32 reserved; + __s32 request_fd; }; static int get_v4l2_plane32(struct v4l2_plane __user *p64, @@ -581,6 +584,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer __user *p64, { u32 type; u32 length; + s32 request_fd; enum v4l2_memory memory; struct v4l2_plane32 __user *uplane32; struct v4l2_plane __user *uplane; @@ -595,7 +599,9 @@ static int get_v4l2_buffer32(struct v4l2_buffer __user *p64, get_user(memory, &p32->memory) || put_user(memory, &p64->memory) || get_user(length, &p32->length) || - put_user(length, &p64->length)) + put_user(length, &p64->length) || + get_user(request_fd, &p32->request_fd) || + put_user(request_fd, &p64->request_fd)) return -EFAULT; if (V4L2_TYPE_IS_OUTPUT(type)) @@ -699,7 +705,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer __user *p64, copy_in_user(&p32->timecode, &p64->timecode, sizeof(p64->timecode)) || assign_in_user(&p32->sequence, &p64->sequence) || assign_in_user(&p32->reserved2, &p64->reserved2) || - assign_in_user(&p32->reserved, &p64->reserved) || + assign_in_user(&p32->request_fd, &p64->request_fd) || get_user(length, &p64->length) || put_user(length, &p32->length)) return -EFAULT; @@ -834,7 +840,8 @@ struct v4l2_ext_controls32 { __u32 which; __u32 count; __u32 error_idx; - __u32 reserved[2]; + __s32 request_fd; + __u32 reserved[1]; compat_caddr_t controls; /* actually struct v4l2_ext_control32 * */ }; @@ -909,6 +916,7 @@ static int get_v4l2_ext_controls32(struct file *file, get_user(count, &p32->count) || put_user(count, &p64->count) || assign_in_user(&p64->error_idx, &p32->error_idx) || + assign_in_user(&p64->request_fd, &p32->request_fd) || copy_in_user(p64->reserved, p32->reserved, sizeof(p64->reserved))) return -EFAULT; @@ -974,6 +982,7 @@ static int put_v4l2_ext_controls32(struct file *file, get_user(count, &p64->count) || put_user(count, &p32->count) || assign_in_user(&p32->error_idx, &p64->error_idx) || + assign_in_user(&p32->request_fd, &p64->request_fd) || copy_in_user(p32->reserved, p64->reserved, sizeof(p32->reserved)) || get_user(kcontrols, &p64->controls)) return -EFAULT; diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index 4c0ecf29d278..6e37950292cd 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -37,8 +37,8 @@ struct v4l2_ctrl_helper { /* Pointer to the control reference of the master control */ struct v4l2_ctrl_ref *mref; - /* The control corresponding to the v4l2_ext_control ID field. */ - struct v4l2_ctrl *ctrl; + /* The control ref corresponding to the v4l2_ext_control ID field. */ + struct v4l2_ctrl_ref *ref; /* v4l2_ext_control index of the next control belonging to the same cluster, or 0 if there isn't any. */ u32 next; @@ -844,6 +844,8 @@ const char *v4l2_ctrl_get_name(u32 id) case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: return "Vertical MV Search Range"; case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header"; case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: return "Force Key Frame"; + case V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS: return "MPEG-2 Slice Parameters"; + case V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION: return "MPEG-2 Quantization Matrices"; /* VPX controls */ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return "VPX Number of Partitions"; @@ -1292,6 +1294,12 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, case V4L2_CID_RDS_TX_ALT_FREQS: *type = V4L2_CTRL_TYPE_U32; break; + case V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS: + *type = V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS; + break; + case V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION: + *type = V4L2_CTRL_TYPE_MPEG2_QUANTIZATION; + break; default: *type = V4L2_CTRL_TYPE_INTEGER; break; @@ -1550,6 +1558,7 @@ static void std_log(const struct v4l2_ctrl *ctrl) static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx, union v4l2_ctrl_ptr ptr) { + struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params; size_t len; u64 offset; s64 val; @@ -1612,6 +1621,54 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx, return -ERANGE; return 0; + case V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS: + p_mpeg2_slice_params = ptr.p; + + switch (p_mpeg2_slice_params->sequence.chroma_format) { + case 1: /* 4:2:0 */ + case 2: /* 4:2:2 */ + case 3: /* 4:4:4 */ + break; + default: + return -EINVAL; + } + + switch (p_mpeg2_slice_params->picture.intra_dc_precision) { + case 0: /* 8 bits */ + case 1: /* 9 bits */ + case 11: /* 11 bits */ + break; + default: + return -EINVAL; + } + + switch (p_mpeg2_slice_params->picture.picture_structure) { + case 1: /* interlaced top field */ + case 2: /* interlaced bottom field */ + case 3: /* progressive */ + break; + default: + return -EINVAL; + } + + switch (p_mpeg2_slice_params->picture.picture_coding_type) { + case V4L2_MPEG2_PICTURE_CODING_TYPE_I: + case V4L2_MPEG2_PICTURE_CODING_TYPE_P: + case V4L2_MPEG2_PICTURE_CODING_TYPE_B: + break; + default: + return -EINVAL; + } + + if (p_mpeg2_slice_params->backward_ref_index >= VIDEO_MAX_FRAME || + p_mpeg2_slice_params->forward_ref_index >= VIDEO_MAX_FRAME) + return -EINVAL; + + return 0; + + case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION: + return 0; + default: return -EINVAL; } @@ -1668,6 +1725,13 @@ static int new_to_user(struct v4l2_ext_control *c, return ptr_to_user(c, ctrl, ctrl->p_new); } +/* Helper function: copy the request value back to the caller */ +static int req_to_user(struct v4l2_ext_control *c, + struct v4l2_ctrl_ref *ref) +{ + return ptr_to_user(c, ref->ctrl, ref->p_req); +} + /* Helper function: copy the initial control value back to the caller */ static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) { @@ -1787,6 +1851,26 @@ static void cur_to_new(struct v4l2_ctrl *ctrl) ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new); } +/* Copy the new value to the request value */ +static void new_to_req(struct v4l2_ctrl_ref *ref) +{ + if (!ref) + return; + ptr_to_ptr(ref->ctrl, ref->ctrl->p_new, ref->p_req); + ref->req = ref; +} + +/* Copy the request value to the new value */ +static void req_to_new(struct v4l2_ctrl_ref *ref) +{ + if (!ref) + return; + if (ref->req) + ptr_to_ptr(ref->ctrl, ref->req->p_req, ref->ctrl->p_new); + else + ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->ctrl->p_new); +} + /* Return non-zero if one or more of the controls in the cluster has a new value that differs from the current value. */ static int cluster_changed(struct v4l2_ctrl *master) @@ -1896,11 +1980,15 @@ int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl, lockdep_set_class_and_name(hdl->lock, key, name); INIT_LIST_HEAD(&hdl->ctrls); INIT_LIST_HEAD(&hdl->ctrl_refs); + INIT_LIST_HEAD(&hdl->requests); + INIT_LIST_HEAD(&hdl->requests_queued); + hdl->request_is_queued = false; hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8; hdl->buckets = kvmalloc_array(hdl->nr_of_buckets, sizeof(hdl->buckets[0]), GFP_KERNEL | __GFP_ZERO); hdl->error = hdl->buckets ? 0 : -ENOMEM; + media_request_object_init(&hdl->req_obj); return hdl->error; } EXPORT_SYMBOL(v4l2_ctrl_handler_init_class); @@ -1915,6 +2003,14 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl) if (hdl == NULL || hdl->buckets == NULL) return; + if (!hdl->req_obj.req && !list_empty(&hdl->requests)) { + struct v4l2_ctrl_handler *req, *next_req; + + list_for_each_entry_safe(req, next_req, &hdl->requests, requests) { + media_request_object_unbind(&req->req_obj); + media_request_object_put(&req->req_obj); + } + } mutex_lock(hdl->lock); /* Free all nodes */ list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) { @@ -2016,13 +2112,19 @@ EXPORT_SYMBOL(v4l2_ctrl_find); /* Allocate a new v4l2_ctrl_ref and hook it into the handler. */ static int handler_new_ref(struct v4l2_ctrl_handler *hdl, - struct v4l2_ctrl *ctrl) + struct v4l2_ctrl *ctrl, + struct v4l2_ctrl_ref **ctrl_ref, + bool from_other_dev, bool allocate_req) { struct v4l2_ctrl_ref *ref; struct v4l2_ctrl_ref *new_ref; u32 id = ctrl->id; u32 class_ctrl = V4L2_CTRL_ID2WHICH(id) | 1; int bucket = id % hdl->nr_of_buckets; /* which bucket to use */ + unsigned int size_extra_req = 0; + + if (ctrl_ref) + *ctrl_ref = NULL; /* * Automatically add the control class if it is not yet present and @@ -2036,10 +2138,16 @@ static int handler_new_ref(struct v4l2_ctrl_handler *hdl, if (hdl->error) return hdl->error; - new_ref = kzalloc(sizeof(*new_ref), GFP_KERNEL); + if (allocate_req) + size_extra_req = ctrl->elems * ctrl->elem_size; + new_ref = kzalloc(sizeof(*new_ref) + size_extra_req, GFP_KERNEL); if (!new_ref) return handler_set_err(hdl, -ENOMEM); new_ref->ctrl = ctrl; + new_ref->from_other_dev = from_other_dev; + if (size_extra_req) + new_ref->p_req.p = &new_ref[1]; + if (ctrl->handler == hdl) { /* By default each control starts in a cluster of its own. new_ref->ctrl is basically a cluster array with one @@ -2079,6 +2187,8 @@ insert_in_hash: /* Insert the control node in the hash */ new_ref->next = hdl->buckets[bucket]; hdl->buckets[bucket] = new_ref; + if (ctrl_ref) + *ctrl_ref = new_ref; unlock: mutex_unlock(hdl->lock); @@ -2133,6 +2243,12 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl, case V4L2_CTRL_TYPE_U32: elem_size = sizeof(u32); break; + case V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS: + elem_size = sizeof(struct v4l2_ctrl_mpeg2_slice_params); + break; + case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION: + elem_size = sizeof(struct v4l2_ctrl_mpeg2_quantization); + break; default: if (type < V4L2_CTRL_COMPOUND_TYPES) elem_size = sizeof(s32); @@ -2220,7 +2336,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl, ctrl->type_ops->init(ctrl, idx, ctrl->p_new); } - if (handler_new_ref(hdl, ctrl)) { + if (handler_new_ref(hdl, ctrl, NULL, false, false)) { kvfree(ctrl); return NULL; } @@ -2389,7 +2505,8 @@ EXPORT_SYMBOL(v4l2_ctrl_new_int_menu); /* Add the controls from another handler to our own. */ int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl, struct v4l2_ctrl_handler *add, - bool (*filter)(const struct v4l2_ctrl *ctrl)) + bool (*filter)(const struct v4l2_ctrl *ctrl), + bool from_other_dev) { struct v4l2_ctrl_ref *ref; int ret = 0; @@ -2412,7 +2529,7 @@ int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl, /* Filter any unwanted controls */ if (filter && !filter(ctrl)) continue; - ret = handler_new_ref(hdl, ctrl); + ret = handler_new_ref(hdl, ctrl, NULL, from_other_dev, false); if (ret) break; } @@ -2815,6 +2932,148 @@ int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm) } EXPORT_SYMBOL(v4l2_querymenu); +static int v4l2_ctrl_request_clone(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_handler *from) +{ + struct v4l2_ctrl_ref *ref; + int err = 0; + + if (WARN_ON(!hdl || hdl == from)) + return -EINVAL; + + if (hdl->error) + return hdl->error; + + WARN_ON(hdl->lock != &hdl->_lock); + + mutex_lock(from->lock); + list_for_each_entry(ref, &from->ctrl_refs, node) { + struct v4l2_ctrl *ctrl = ref->ctrl; + struct v4l2_ctrl_ref *new_ref; + + /* Skip refs inherited from other devices */ + if (ref->from_other_dev) + continue; + /* And buttons */ + if (ctrl->type == V4L2_CTRL_TYPE_BUTTON) + continue; + err = handler_new_ref(hdl, ctrl, &new_ref, false, true); + if (err) + break; + } + mutex_unlock(from->lock); + return err; +} + +static void v4l2_ctrl_request_queue(struct media_request_object *obj) +{ + struct v4l2_ctrl_handler *hdl = + container_of(obj, struct v4l2_ctrl_handler, req_obj); + struct v4l2_ctrl_handler *main_hdl = obj->priv; + struct v4l2_ctrl_handler *prev_hdl = NULL; + struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL; + + if (list_empty(&main_hdl->requests_queued)) + goto queue; + + prev_hdl = list_last_entry(&main_hdl->requests_queued, + struct v4l2_ctrl_handler, requests_queued); + /* + * Note: prev_hdl and hdl must contain the same list of control + * references, so if any differences are detected then that is a + * driver bug and the WARN_ON is triggered. + */ + mutex_lock(prev_hdl->lock); + ref_ctrl_prev = list_first_entry(&prev_hdl->ctrl_refs, + struct v4l2_ctrl_ref, node); + list_for_each_entry(ref_ctrl, &hdl->ctrl_refs, node) { + if (ref_ctrl->req) + continue; + while (ref_ctrl_prev->ctrl->id < ref_ctrl->ctrl->id) { + /* Should never happen, but just in case... */ + if (list_is_last(&ref_ctrl_prev->node, + &prev_hdl->ctrl_refs)) + break; + ref_ctrl_prev = list_next_entry(ref_ctrl_prev, node); + } + if (WARN_ON(ref_ctrl_prev->ctrl->id != ref_ctrl->ctrl->id)) + break; + ref_ctrl->req = ref_ctrl_prev->req; + } + mutex_unlock(prev_hdl->lock); +queue: + list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued); + hdl->request_is_queued = true; +} + +static void v4l2_ctrl_request_unbind(struct media_request_object *obj) +{ + struct v4l2_ctrl_handler *hdl = + container_of(obj, struct v4l2_ctrl_handler, req_obj); + + list_del_init(&hdl->requests); + if (hdl->request_is_queued) { + list_del_init(&hdl->requests_queued); + hdl->request_is_queued = false; + } +} + +static void v4l2_ctrl_request_release(struct media_request_object *obj) +{ + struct v4l2_ctrl_handler *hdl = + container_of(obj, struct v4l2_ctrl_handler, req_obj); + + v4l2_ctrl_handler_free(hdl); + kfree(hdl); +} + +static const struct media_request_object_ops req_ops = { + .queue = v4l2_ctrl_request_queue, + .unbind = v4l2_ctrl_request_unbind, + .release = v4l2_ctrl_request_release, +}; + +struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req, + struct v4l2_ctrl_handler *parent) +{ + struct media_request_object *obj; + + if (WARN_ON(req->state != MEDIA_REQUEST_STATE_VALIDATING && + req->state != MEDIA_REQUEST_STATE_QUEUED)) + return NULL; + + obj = media_request_object_find(req, &req_ops, parent); + if (obj) + return container_of(obj, struct v4l2_ctrl_handler, req_obj); + return NULL; +} +EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_find); + +struct v4l2_ctrl * +v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id) +{ + struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id); + + return (ref && ref->req == ref) ? ref->ctrl : NULL; +} +EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find); + +static int v4l2_ctrl_request_bind(struct media_request *req, + struct v4l2_ctrl_handler *hdl, + struct v4l2_ctrl_handler *from) +{ + int ret; + + ret = v4l2_ctrl_request_clone(hdl, from); + + if (!ret) { + ret = media_request_object_bind(req, &req_ops, + from, false, &hdl->req_obj); + if (!ret) + list_add_tail(&hdl->requests, &from->requests); + } + return ret; +} /* Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS: @@ -2876,6 +3135,7 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl, if (cs->which && cs->which != V4L2_CTRL_WHICH_DEF_VAL && + cs->which != V4L2_CTRL_WHICH_REQUEST_VAL && V4L2_CTRL_ID2WHICH(id) != cs->which) return -EINVAL; @@ -2886,6 +3146,7 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl, ref = find_ref_lock(hdl, id); if (ref == NULL) return -EINVAL; + h->ref = ref; ctrl = ref->ctrl; if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED) return -EINVAL; @@ -2908,7 +3169,6 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl, } /* Store the ref to the master control of the cluster */ h->mref = ref; - h->ctrl = ctrl; /* Initially set next to 0, meaning that there is no other control in this helper array belonging to the same cluster */ @@ -2955,15 +3215,15 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl, whether there are any controls at all. */ static int class_check(struct v4l2_ctrl_handler *hdl, u32 which) { - if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL) + if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL || + which == V4L2_CTRL_WHICH_REQUEST_VAL) return 0; return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL; } - - /* Get extended controls. Allocates the helpers array if needed. */ -int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs) +static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl, + struct v4l2_ext_controls *cs) { struct v4l2_ctrl_helper helper[4]; struct v4l2_ctrl_helper *helpers = helper; @@ -2993,7 +3253,7 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs cs->error_idx = cs->count; for (i = 0; !ret && i < cs->count; i++) - if (helpers[i].ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY) + if (helpers[i].ref->ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY) ret = -EACCES; for (i = 0; !ret && i < cs->count; i++) { @@ -3027,8 +3287,12 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs u32 idx = i; do { - ret = ctrl_to_user(cs->controls + idx, - helpers[idx].ctrl); + if (helpers[idx].ref->req) + ret = req_to_user(cs->controls + idx, + helpers[idx].ref->req); + else + ret = ctrl_to_user(cs->controls + idx, + helpers[idx].ref->ctrl); idx = helpers[idx].next; } while (!ret && idx); } @@ -3039,6 +3303,91 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs kvfree(helpers); return ret; } + +static struct media_request_object * +v4l2_ctrls_find_req_obj(struct v4l2_ctrl_handler *hdl, + struct media_request *req, bool set) +{ + struct media_request_object *obj; + struct v4l2_ctrl_handler *new_hdl; + int ret; + + if (IS_ERR(req)) + return ERR_CAST(req); + + if (set && WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING)) + return ERR_PTR(-EBUSY); + + obj = media_request_object_find(req, &req_ops, hdl); + if (obj) + return obj; + if (!set) + return ERR_PTR(-ENOENT); + + new_hdl = kzalloc(sizeof(*new_hdl), GFP_KERNEL); + if (!new_hdl) + return ERR_PTR(-ENOMEM); + + obj = &new_hdl->req_obj; + ret = v4l2_ctrl_handler_init(new_hdl, (hdl->nr_of_buckets - 1) * 8); + if (!ret) + ret = v4l2_ctrl_request_bind(req, new_hdl, hdl); + if (ret) { + kfree(new_hdl); + + return ERR_PTR(ret); + } + + media_request_object_get(obj); + return obj; +} + +int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct media_device *mdev, + struct v4l2_ext_controls *cs) +{ + struct media_request_object *obj = NULL; + struct media_request *req = NULL; + int ret; + + if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) { + if (!mdev || cs->request_fd < 0) + return -EINVAL; + + req = media_request_get_by_fd(mdev, cs->request_fd); + if (IS_ERR(req)) + return PTR_ERR(req); + + if (req->state != MEDIA_REQUEST_STATE_COMPLETE) { + media_request_put(req); + return -EACCES; + } + + ret = media_request_lock_for_access(req); + if (ret) { + media_request_put(req); + return ret; + } + + obj = v4l2_ctrls_find_req_obj(hdl, req, false); + if (IS_ERR(obj)) { + media_request_unlock_for_access(req); + media_request_put(req); + return PTR_ERR(obj); + } + + hdl = container_of(obj, struct v4l2_ctrl_handler, + req_obj); + } + + ret = v4l2_g_ext_ctrls_common(hdl, cs); + + if (obj) { + media_request_unlock_for_access(req); + media_request_object_put(obj); + media_request_put(req); + } + return ret; +} EXPORT_SYMBOL(v4l2_g_ext_ctrls); /* Helper function to get a single control */ @@ -3180,7 +3529,7 @@ static int validate_ctrls(struct v4l2_ext_controls *cs, cs->error_idx = cs->count; for (i = 0; i < cs->count; i++) { - struct v4l2_ctrl *ctrl = helpers[i].ctrl; + struct v4l2_ctrl *ctrl = helpers[i].ref->ctrl; union v4l2_ctrl_ptr p_new; cs->error_idx = i; @@ -3227,9 +3576,9 @@ static void update_from_auto_cluster(struct v4l2_ctrl *master) } /* Try or try-and-set controls */ -static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, - struct v4l2_ext_controls *cs, - bool set) +static int try_set_ext_ctrls_common(struct v4l2_fh *fh, + struct v4l2_ctrl_handler *hdl, + struct v4l2_ext_controls *cs, bool set) { struct v4l2_ctrl_helper helper[4]; struct v4l2_ctrl_helper *helpers = helper; @@ -3292,7 +3641,7 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, do { /* Check if the auto control is part of the list, and remember the new value. */ - if (helpers[tmp_idx].ctrl == master) + if (helpers[tmp_idx].ref->ctrl == master) new_auto_val = cs->controls[tmp_idx].value; tmp_idx = helpers[tmp_idx].next; } while (tmp_idx); @@ -3305,7 +3654,7 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, /* Copy the new caller-supplied control values. user_to_new() sets 'is_new' to 1. */ do { - struct v4l2_ctrl *ctrl = helpers[idx].ctrl; + struct v4l2_ctrl *ctrl = helpers[idx].ref->ctrl; ret = user_to_new(cs->controls + idx, ctrl); if (!ret && ctrl->is_ptr) @@ -3314,14 +3663,23 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, } while (!ret && idx); if (!ret) - ret = try_or_set_cluster(fh, master, set, 0); + ret = try_or_set_cluster(fh, master, + !hdl->req_obj.req && set, 0); + if (!ret && hdl->req_obj.req && set) { + for (j = 0; j < master->ncontrols; j++) { + struct v4l2_ctrl_ref *ref = + find_ref(hdl, master->cluster[j]->id); + + new_to_req(ref); + } + } /* Copy the new values back to userspace. */ if (!ret) { idx = i; do { ret = new_to_user(cs->controls + idx, - helpers[idx].ctrl); + helpers[idx].ref->ctrl); idx = helpers[idx].next; } while (!ret && idx); } @@ -3333,16 +3691,60 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, return ret; } -int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs) +static int try_set_ext_ctrls(struct v4l2_fh *fh, + struct v4l2_ctrl_handler *hdl, struct media_device *mdev, + struct v4l2_ext_controls *cs, bool set) +{ + struct media_request_object *obj = NULL; + struct media_request *req = NULL; + int ret; + + if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) { + if (!mdev || cs->request_fd < 0) + return -EINVAL; + + req = media_request_get_by_fd(mdev, cs->request_fd); + if (IS_ERR(req)) + return PTR_ERR(req); + + ret = media_request_lock_for_update(req); + if (ret) { + media_request_put(req); + return ret; + } + + obj = v4l2_ctrls_find_req_obj(hdl, req, set); + if (IS_ERR(obj)) { + media_request_unlock_for_update(req); + media_request_put(req); + return PTR_ERR(obj); + } + hdl = container_of(obj, struct v4l2_ctrl_handler, + req_obj); + } + + ret = try_set_ext_ctrls_common(fh, hdl, cs, set); + + if (obj) { + media_request_unlock_for_update(req); + media_request_object_put(obj); + media_request_put(req); + } + + return ret; +} + +int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct media_device *mdev, + struct v4l2_ext_controls *cs) { - return try_set_ext_ctrls(NULL, hdl, cs, false); + return try_set_ext_ctrls(NULL, hdl, mdev, cs, false); } EXPORT_SYMBOL(v4l2_try_ext_ctrls); int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, - struct v4l2_ext_controls *cs) + struct media_device *mdev, struct v4l2_ext_controls *cs) { - return try_set_ext_ctrls(fh, hdl, cs, true); + return try_set_ext_ctrls(fh, hdl, mdev, cs, true); } EXPORT_SYMBOL(v4l2_s_ext_ctrls); @@ -3441,6 +3843,162 @@ int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s) } EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string); +void v4l2_ctrl_request_complete(struct media_request *req, + struct v4l2_ctrl_handler *main_hdl) +{ + struct media_request_object *obj; + struct v4l2_ctrl_handler *hdl; + struct v4l2_ctrl_ref *ref; + + if (!req || !main_hdl) + return; + + /* + * Note that it is valid if nothing was found. It means + * that this request doesn't have any controls and so just + * wants to leave the controls unchanged. + */ + obj = media_request_object_find(req, &req_ops, main_hdl); + if (!obj) + return; + hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); + + list_for_each_entry(ref, &hdl->ctrl_refs, node) { + struct v4l2_ctrl *ctrl = ref->ctrl; + struct v4l2_ctrl *master = ctrl->cluster[0]; + unsigned int i; + + if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) { + ref->req = ref; + + v4l2_ctrl_lock(master); + /* g_volatile_ctrl will update the current control values */ + for (i = 0; i < master->ncontrols; i++) + cur_to_new(master->cluster[i]); + call_op(master, g_volatile_ctrl); + new_to_req(ref); + v4l2_ctrl_unlock(master); + continue; + } + if (ref->req == ref) + continue; + + v4l2_ctrl_lock(ctrl); + if (ref->req) + ptr_to_ptr(ctrl, ref->req->p_req, ref->p_req); + else + ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req); + v4l2_ctrl_unlock(ctrl); + } + + WARN_ON(!hdl->request_is_queued); + list_del_init(&hdl->requests_queued); + hdl->request_is_queued = false; + media_request_object_complete(obj); + media_request_object_put(obj); +} +EXPORT_SYMBOL(v4l2_ctrl_request_complete); + +void v4l2_ctrl_request_setup(struct media_request *req, + struct v4l2_ctrl_handler *main_hdl) +{ + struct media_request_object *obj; + struct v4l2_ctrl_handler *hdl; + struct v4l2_ctrl_ref *ref; + + if (!req || !main_hdl) + return; + + if (WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED)) + return; + + /* + * Note that it is valid if nothing was found. It means + * that this request doesn't have any controls and so just + * wants to leave the controls unchanged. + */ + obj = media_request_object_find(req, &req_ops, main_hdl); + if (!obj) + return; + if (obj->completed) { + media_request_object_put(obj); + return; + } + hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); + + list_for_each_entry(ref, &hdl->ctrl_refs, node) + ref->req_done = false; + + list_for_each_entry(ref, &hdl->ctrl_refs, node) { + struct v4l2_ctrl *ctrl = ref->ctrl; + struct v4l2_ctrl *master = ctrl->cluster[0]; + bool have_new_data = false; + int i; + + /* + * Skip if this control was already handled by a cluster. + * Skip button controls and read-only controls. + */ + if (ref->req_done || ctrl->type == V4L2_CTRL_TYPE_BUTTON || + (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)) + continue; + + v4l2_ctrl_lock(master); + for (i = 0; i < master->ncontrols; i++) { + if (master->cluster[i]) { + struct v4l2_ctrl_ref *r = + find_ref(hdl, master->cluster[i]->id); + + if (r->req && r == r->req) { + have_new_data = true; + break; + } + } + } + if (!have_new_data) { + v4l2_ctrl_unlock(master); + continue; + } + + for (i = 0; i < master->ncontrols; i++) { + if (master->cluster[i]) { + struct v4l2_ctrl_ref *r = + find_ref(hdl, master->cluster[i]->id); + + req_to_new(r); + master->cluster[i]->is_new = 1; + r->req_done = true; + } + } + /* + * For volatile autoclusters that are currently in auto mode + * we need to discover if it will be set to manual mode. + * If so, then we have to copy the current volatile values + * first since those will become the new manual values (which + * may be overwritten by explicit new values from this set + * of controls). + */ + if (master->is_auto && master->has_volatiles && + !is_cur_manual(master)) { + s32 new_auto_val = *master->p_new.p_s32; + + /* + * If the new value == the manual value, then copy + * the current volatile values. + */ + if (new_auto_val == master->manual_mode_value) + update_from_auto_cluster(master); + } + + try_or_set_cluster(NULL, master, true, 0); + + v4l2_ctrl_unlock(master); + } + + media_request_object_put(obj); +} +EXPORT_SYMBOL(v4l2_ctrl_request_setup); + void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv) { if (ctrl == NULL) diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c index 69e775930fc4..feb749aaaa42 100644 --- a/drivers/media/v4l2-core/v4l2-dev.c +++ b/drivers/media/v4l2-core/v4l2-dev.c @@ -444,8 +444,22 @@ static int v4l2_release(struct inode *inode, struct file *filp) struct video_device *vdev = video_devdata(filp); int ret = 0; - if (vdev->fops->release) - ret = vdev->fops->release(filp); + /* + * We need to serialize the release() with queueing new requests. + * The release() may trigger the cancellation of a streaming + * operation, and that should not be mixed with queueing a new + * request at the same time. + */ + if (vdev->fops->release) { + if (v4l2_device_supports_requests(vdev->v4l2_dev)) { + mutex_lock(&vdev->v4l2_dev->mdev->req_queue_mutex); + ret = vdev->fops->release(filp); + mutex_unlock(&vdev->v4l2_dev->mdev->req_queue_mutex); + } else { + ret = vdev->fops->release(filp); + } + } + if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP) dprintk("%s: release\n", video_device_node_name(vdev)); diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c index 098562901f25..df0ac38c4050 100644 --- a/drivers/media/v4l2-core/v4l2-device.c +++ b/drivers/media/v4l2-core/v4l2-device.c @@ -178,7 +178,8 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, sd->v4l2_dev = v4l2_dev; /* This just returns 0 if either of the two args is NULL */ - err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler, NULL); + err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler, + NULL, true); if (err) goto error_module; diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 7de041bae84f..c63746968fa3 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -474,13 +474,13 @@ static void v4l_print_buffer(const void *arg, bool write_only) const struct v4l2_plane *plane; int i; - pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, flags=0x%08x, field=%s, sequence=%d, memory=%s", + pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, request_fd=%d, flags=0x%08x, field=%s, sequence=%d, memory=%s", p->timestamp.tv_sec / 3600, (int)(p->timestamp.tv_sec / 60) % 60, (int)(p->timestamp.tv_sec % 60), (long)p->timestamp.tv_usec, p->index, - prt_names(p->type, v4l2_type_names), + prt_names(p->type, v4l2_type_names), p->request_fd, p->flags, prt_names(p->field, v4l2_field_names), p->sequence, prt_names(p->memory, v4l2_memory_names)); @@ -590,8 +590,8 @@ static void v4l_print_ext_controls(const void *arg, bool write_only) const struct v4l2_ext_controls *p = arg; int i; - pr_cont("which=0x%x, count=%d, error_idx=%d", - p->which, p->count, p->error_idx); + pr_cont("which=0x%x, count=%d, error_idx=%d, request_fd=%d", + p->which, p->count, p->error_idx, p->request_fd); for (i = 0; i < p->count; i++) { if (!p->controls[i].size) pr_cont(", id/val=0x%x/0x%x", @@ -907,7 +907,7 @@ static int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv) __u32 i; /* zero the reserved fields */ - c->reserved[0] = c->reserved[1] = 0; + c->reserved[0] = 0; for (i = 0; i < c->count; i++) c->controls[i].reserved2[0] = 0; @@ -1309,6 +1309,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_PIX_FMT_H263: descr = "H.263"; break; case V4L2_PIX_FMT_MPEG1: descr = "MPEG-1 ES"; break; case V4L2_PIX_FMT_MPEG2: descr = "MPEG-2 ES"; break; + case V4L2_PIX_FMT_MPEG2_SLICE: descr = "MPEG-2 Parsed Slice Data"; break; case V4L2_PIX_FMT_MPEG4: descr = "MPEG-4 part 2 ES"; break; case V4L2_PIX_FMT_XVID: descr = "Xvid"; break; case V4L2_PIX_FMT_VC1_ANNEX_G: descr = "VC-1 (SMPTE 412M Annex G)"; break; @@ -1336,6 +1337,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_PIX_FMT_SE401: descr = "GSPCA SE401"; break; case V4L2_PIX_FMT_S5C_UYVY_JPG: descr = "S5C73MX interleaved UYVY/JPEG"; break; case V4L2_PIX_FMT_MT21C: descr = "Mediatek Compressed Format"; break; + case V4L2_PIX_FMT_SUNXI_TILED_NV12: descr = "Sunxi Tiled NV12 Format"; break; default: WARN(1, "Unknown pixelformat 0x%08x\n", fmt->pixelformat); if (fmt->description[0]) @@ -1877,7 +1879,7 @@ static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops, if (ret) return ret; - CLEAR_AFTER_FIELD(p, memory); + CLEAR_AFTER_FIELD(p, capabilities); return ops->vidioc_reqbufs(file, fh, p); } @@ -1918,7 +1920,7 @@ static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops, if (ret) return ret; - CLEAR_AFTER_FIELD(create, format); + CLEAR_AFTER_FIELD(create, capabilities); v4l_sanitize_format(&create->format); @@ -2109,9 +2111,9 @@ static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops, p->error_idx = p->count; if (vfh && vfh->ctrl_handler) - return v4l2_g_ext_ctrls(vfh->ctrl_handler, p); + return v4l2_g_ext_ctrls(vfh->ctrl_handler, vfd->v4l2_dev->mdev, p); if (vfd->ctrl_handler) - return v4l2_g_ext_ctrls(vfd->ctrl_handler, p); + return v4l2_g_ext_ctrls(vfd->ctrl_handler, vfd->v4l2_dev->mdev, p); if (ops->vidioc_g_ext_ctrls == NULL) return -ENOTTY; return check_ext_ctrls(p, 0) ? ops->vidioc_g_ext_ctrls(file, fh, p) : @@ -2128,9 +2130,9 @@ static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops, p->error_idx = p->count; if (vfh && vfh->ctrl_handler) - return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, p); + return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, vfd->v4l2_dev->mdev, p); if (vfd->ctrl_handler) - return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, p); + return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, vfd->v4l2_dev->mdev, p); if (ops->vidioc_s_ext_ctrls == NULL) return -ENOTTY; return check_ext_ctrls(p, 0) ? ops->vidioc_s_ext_ctrls(file, fh, p) : @@ -2147,9 +2149,9 @@ static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops, p->error_idx = p->count; if (vfh && vfh->ctrl_handler) - return v4l2_try_ext_ctrls(vfh->ctrl_handler, p); + return v4l2_try_ext_ctrls(vfh->ctrl_handler, vfd->v4l2_dev->mdev, p); if (vfd->ctrl_handler) - return v4l2_try_ext_ctrls(vfd->ctrl_handler, p); + return v4l2_try_ext_ctrls(vfd->ctrl_handler, vfd->v4l2_dev->mdev, p); if (ops->vidioc_try_ext_ctrls == NULL) return -ENOTTY; return check_ext_ctrls(p, 0) ? ops->vidioc_try_ext_ctrls(file, fh, p) : @@ -2780,6 +2782,7 @@ static long __video_do_ioctl(struct file *file, unsigned int cmd, void *arg) { struct video_device *vfd = video_devdata(file); + struct mutex *req_queue_lock = NULL; struct mutex *lock; /* ioctl serialization mutex */ const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops; bool write_only = false; @@ -2799,10 +2802,27 @@ static long __video_do_ioctl(struct file *file, if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) vfh = file->private_data; + /* + * We need to serialize streamon/off with queueing new requests. + * These ioctls may trigger the cancellation of a streaming + * operation, and that should not be mixed with queueing a new + * request at the same time. + */ + if (v4l2_device_supports_requests(vfd->v4l2_dev) && + (cmd == VIDIOC_STREAMON || cmd == VIDIOC_STREAMOFF)) { + req_queue_lock = &vfd->v4l2_dev->mdev->req_queue_mutex; + + if (mutex_lock_interruptible(req_queue_lock)) + return -ERESTARTSYS; + } + lock = v4l2_ioctl_get_lock(vfd, vfh, cmd, arg); - if (lock && mutex_lock_interruptible(lock)) + if (lock && mutex_lock_interruptible(lock)) { + if (req_queue_lock) + mutex_unlock(req_queue_lock); return -ERESTARTSYS; + } if (!video_is_registered(vfd)) { ret = -ENODEV; @@ -2861,6 +2881,8 @@ done: unlock: if (lock) mutex_unlock(lock); + if (req_queue_lock) + mutex_unlock(req_queue_lock); return ret; } diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c index ce9bd1b91210..d7806db222d8 100644 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -387,7 +387,7 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); if (m2m_dev->m2m_ops->job_abort) m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); - dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx); + dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx); wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING)); } else if (m2m_ctx->job_flags & TRANS_QUEUED) { @@ -473,12 +473,19 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_buffer *buf) { + struct video_device *vdev = video_devdata(file); struct vb2_queue *vq; int ret; vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); - ret = vb2_qbuf(vq, buf); - if (!ret) + if (!V4L2_TYPE_IS_OUTPUT(vq->type) && + (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) { + dprintk("%s: requests cannot be used with capture buffers\n", + __func__); + return -EPERM; + } + ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf); + if (!ret && !(buf->flags & V4L2_BUF_FLAG_IN_REQUEST)) v4l2_m2m_try_schedule(m2m_ctx); return ret; @@ -498,15 +505,11 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_buffer *buf) { + struct video_device *vdev = video_devdata(file); struct vb2_queue *vq; - int ret; vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); - ret = vb2_prepare_buf(vq, buf); - if (!ret) - v4l2_m2m_try_schedule(m2m_ctx); - - return ret; + return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); } EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); @@ -950,6 +953,52 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, } EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); +void vb2_m2m_request_queue(struct media_request *req) +{ + struct media_request_object *obj, *obj_safe; + struct v4l2_m2m_ctx *m2m_ctx = NULL; + + /* + * Queue all objects. Note that buffer objects are at the end of the + * objects list, after all other object types. Once buffer objects + * are queued, the driver might delete them immediately (if the driver + * processes the buffer at once), so we have to use + * list_for_each_entry_safe() to handle the case where the object we + * queue is deleted. + */ + list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { + struct v4l2_m2m_ctx *m2m_ctx_obj; + struct vb2_buffer *vb; + + if (!obj->ops->queue) + continue; + + if (vb2_request_object_is_buffer(obj)) { + /* Sanity checks */ + vb = container_of(obj, struct vb2_buffer, req_obj); + WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)); + m2m_ctx_obj = container_of(vb->vb2_queue, + struct v4l2_m2m_ctx, + out_q_ctx.q); + WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx); + m2m_ctx = m2m_ctx_obj; + } + + /* + * The buffer we queue here can in theory be immediately + * unbound, hence the use of list_for_each_entry_safe() + * above and why we call the queue op last. + */ + obj->ops->queue(obj); + } + + WARN_ON(!m2m_ctx); + + if (m2m_ctx) + v4l2_m2m_try_schedule(m2m_ctx); +} +EXPORT_SYMBOL_GPL(vb2_m2m_request_queue); + /* Videobuf2 ioctl helpers */ int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c index 792f41dffe23..f5f0d71ec745 100644 --- a/drivers/media/v4l2-core/v4l2-subdev.c +++ b/drivers/media/v4l2-core/v4l2-subdev.c @@ -222,17 +222,20 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg) case VIDIOC_G_EXT_CTRLS: if (!vfh->ctrl_handler) return -ENOTTY; - return v4l2_g_ext_ctrls(vfh->ctrl_handler, arg); + return v4l2_g_ext_ctrls(vfh->ctrl_handler, + sd->v4l2_dev->mdev, arg); case VIDIOC_S_EXT_CTRLS: if (!vfh->ctrl_handler) return -ENOTTY; - return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, arg); + return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, + sd->v4l2_dev->mdev, arg); case VIDIOC_TRY_EXT_CTRLS: if (!vfh->ctrl_handler) return -ENOTTY; - return v4l2_try_ext_ctrls(vfh->ctrl_handler, arg); + return v4l2_try_ext_ctrls(vfh->ctrl_handler, + sd->v4l2_dev->mdev, arg); case VIDIOC_DQEVENT: if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) |