1 //SPDX-License-Identifier: GPL-2.0
3 * Xilinx V4L2 mem2mem driver
5 * Copyright (C) 2017-2018 Xilinx, Inc.
7 * Author: Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
10 #include <drm/drm_fourcc.h>
11 #include <linux/delay.h>
12 #include <linux/dma/xilinx_frmbuf.h>
13 #include <linux/lcm.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
17 #include <linux/of_graph.h>
18 #include <linux/platform_device.h>
19 #include <linux/slab.h>
21 #include <media/v4l2-async.h>
22 #include <media/v4l2-common.h>
23 #include <media/v4l2-device.h>
24 #include <media/v4l2-fwnode.h>
25 #include <media/v4l2-ioctl.h>
26 #include <media/v4l2-mem2mem.h>
27 #include <media/videobuf2-dma-contig.h>
29 #include "xilinx-vip.h"
31 #define XVIP_M2M_NAME "xilinx-mem2mem"
32 #define XVIP_M2M_DEFAULT_FMT V4L2_PIX_FMT_RGB24
34 /* Minimum and maximum widths are expressed in bytes */
35 #define XVIP_M2M_MIN_WIDTH 1U
36 #define XVIP_M2M_MAX_WIDTH 65535U
37 #define XVIP_M2M_MIN_HEIGHT 1U
38 #define XVIP_M2M_MAX_HEIGHT 8191U
40 #define XVIP_M2M_DEF_WIDTH 1920
41 #define XVIP_M2M_DEF_HEIGHT 1080
43 #define XVIP_M2M_PAD_SINK 1
44 #define XVIP_M2M_PAD_SOURCE 0
47 * struct xvip_graph_entity - Entity in the video graph
48 * @list: list entry in a graph entities list
49 * @node: the entity's DT node
50 * @entity: media entity, from the corresponding V4L2 subdev
51 * @asd: subdev asynchronous registration information
52 * @subdev: V4L2 subdev
53 * @streaming: status of the V4L2 subdev if streaming or not
55 struct xvip_graph_entity {
56 struct list_head list;
57 struct device_node *node;
58 struct media_entity *entity;
60 struct v4l2_async_subdev asd;
61 struct v4l2_subdev *subdev;
66 * struct xvip_pipeline - Xilinx Video IP pipeline structure
67 * @pipe: media pipeline
68 * @lock: protects the pipeline @stream_count
69 * @use_count: number of DMA engines using the pipeline
70 * @stream_count: number of DMA engines currently streaming
71 * @num_dmas: number of DMA engines in the pipeline
72 * @xdev: Composite device the pipe belongs to
74 struct xvip_pipeline {
75 struct media_pipeline pipe;
77 /* protects the pipeline @stream_count */
79 unsigned int use_count;
80 unsigned int stream_count;
82 unsigned int num_dmas;
83 struct xvip_m2m_dev *xdev;
86 struct xventity_list {
87 struct list_head list;
88 struct media_entity *entity;
92 * struct xvip_m2m_dev - Xilinx Video mem2mem device structure
93 * @v4l2_dev: V4L2 device
95 * @media_dev: media device
96 * @notifier: V4L2 asynchronous subdevs notifier
97 * @entities: entities in the graph as a list of xvip_graph_entity
98 * @num_subdevs: number of subdevs in the pipeline
99 * @lock: This is to protect mem2mem context structure data
100 * @queued_lock: This is to protect video buffer information
101 * @dma: Video DMA channels
102 * @m2m_dev: V4L2 mem2mem device structure
103 * @v4l2_caps: V4L2 capabilities of the whole device
105 struct xvip_m2m_dev {
106 struct v4l2_device v4l2_dev;
109 struct media_device media_dev;
110 struct v4l2_async_notifier notifier;
111 struct list_head entities;
112 unsigned int num_subdevs;
114 /* Protects to m2m context data */
117 /* Protects vb2_v4l2_buffer data */
118 spinlock_t queued_lock;
119 struct xvip_m2m_dma *dma;
120 struct v4l2_m2m_dev *m2m_dev;
124 static inline struct xvip_pipeline *to_xvip_pipeline(struct media_entity *e)
126 return container_of(e->pipe, struct xvip_pipeline, pipe);
130 * struct xvip_m2m_dma - Video DMA channel
131 * @video: V4L2 video device associated with the DMA channel
132 * @xdev: composite mem2mem device the DMA channels belongs to
133 * @chan_tx: DMA engine channel for MEM2DEV transfer
134 * @chan_rx: DMA engine channel for DEV2MEM transfer
135 * @outfmt: active V4L2 OUTPUT port pixel format
136 * @capfmt: active V4L2 CAPTURE port pixel format
137 * @r: crop rectangle parameters
138 * @outinfo: format information corresponding to the active @outfmt
139 * @capinfo: format information corresponding to the active @capfmt
140 * @align: transfer alignment required by the DMA channel (in bytes)
141 * @crop: boolean flag to indicate if crop is requested
142 * @pads: media pads for the video M2M device entity
143 * @pipe: pipeline belonging to the DMA channel
145 struct xvip_m2m_dma {
146 struct video_device video;
147 struct xvip_m2m_dev *xdev;
148 struct dma_chan *chan_tx;
149 struct dma_chan *chan_rx;
150 struct v4l2_format outfmt;
151 struct v4l2_format capfmt;
153 const struct xvip_video_format *outinfo;
154 const struct xvip_video_format *capinfo;
158 struct media_pad pads[2];
159 struct xvip_pipeline pipe;
163 * struct xvip_m2m_ctx - VIPP mem2mem context
164 * @fh: V4L2 file handler
165 * @xdev: composite mem2mem device the DMA channels belongs to
166 * @xt: dma interleaved template for dma configuration
167 * @sgl: data chunk structure for dma_interleaved_template
169 struct xvip_m2m_ctx {
171 struct xvip_m2m_dev *xdev;
172 struct dma_interleaved_template xt;
173 struct data_chunk sgl[1];
176 static inline struct xvip_m2m_ctx *file2ctx(struct file *file)
178 return container_of(file->private_data, struct xvip_m2m_ctx, fh);
181 static struct v4l2_subdev *
182 xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
184 struct media_pad *remote;
186 remote = media_entity_remote_pad(local);
187 if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
191 *pad = remote->index;
193 return media_entity_to_v4l2_subdev(remote->entity);
196 static int xvip_dma_verify_format(struct xvip_m2m_dma *dma)
198 struct v4l2_subdev_format fmt;
199 struct v4l2_subdev *subdev;
203 subdev = xvip_dma_remote_subdev(&dma->pads[XVIP_PAD_SOURCE], &fmt.pad);
207 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
208 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
210 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
212 if (dma->outinfo->code != fmt.format.code)
215 if (V4L2_TYPE_IS_MULTIPLANAR(dma->outfmt.type)) {
216 width = dma->outfmt.fmt.pix_mp.width;
217 height = dma->outfmt.fmt.pix_mp.height;
219 width = dma->outfmt.fmt.pix.width;
220 height = dma->outfmt.fmt.pix.height;
223 if (width != fmt.format.width || height != fmt.format.height)
229 #define to_xvip_dma(vdev) container_of(vdev, struct xvip_m2m_dma, video)
230 /* -----------------------------------------------------------------------------
231 * Pipeline Stream Management
235 * xvip_subdev_set_streaming - Find and update streaming status of subdev
236 * @xdev: Composite video device
237 * @subdev: V4L2 sub-device
238 * @enable: enable/disable streaming status
240 * Walk the xvip graph entities list and find if subdev is present. Returns
241 * streaming status of subdev and update the status as requested
243 * Return: streaming status (true or false) if successful or warn_on if subdev
244 * is not present and return false
246 static bool xvip_subdev_set_streaming(struct xvip_m2m_dev *xdev,
247 struct v4l2_subdev *subdev, bool enable)
249 struct xvip_graph_entity *entity;
251 list_for_each_entry(entity, &xdev->entities, list)
252 if (entity->node == subdev->dev->of_node) {
253 bool status = entity->streaming;
255 entity->streaming = enable;
259 WARN(1, "Should never get here\n");
263 static int xvip_entity_start_stop(struct xvip_m2m_dev *xdev,
264 struct media_entity *entity, bool start)
266 struct v4l2_subdev *subdev;
270 dev_dbg(xdev->dev, "%s entity %s\n",
271 start ? "Starting" : "Stopping", entity->name);
272 subdev = media_entity_to_v4l2_subdev(entity);
274 /* This is to maintain list of stream on/off devices */
275 is_streaming = xvip_subdev_set_streaming(xdev, subdev, start);
278 * start or stop the subdev only once in case if they are
279 * shared between sub-graphs
281 if (start && !is_streaming) {
282 /* power-on subdevice */
283 ret = v4l2_subdev_call(subdev, core, s_power, 1);
284 if (ret < 0 && ret != -ENOIOCTLCMD) {
286 "s_power on failed on subdev\n");
287 xvip_subdev_set_streaming(xdev, subdev, 0);
291 /* stream-on subdevice */
292 ret = v4l2_subdev_call(subdev, video, s_stream, 1);
293 if (ret < 0 && ret != -ENOIOCTLCMD) {
295 "s_stream on failed on subdev\n");
296 v4l2_subdev_call(subdev, core, s_power, 0);
297 xvip_subdev_set_streaming(xdev, subdev, 0);
299 } else if (!start && is_streaming) {
300 /* stream-off subdevice */
301 ret = v4l2_subdev_call(subdev, video, s_stream, 0);
302 if (ret < 0 && ret != -ENOIOCTLCMD) {
304 "s_stream off failed on subdev\n");
305 xvip_subdev_set_streaming(xdev, subdev, 1);
308 /* power-off subdevice */
309 ret = v4l2_subdev_call(subdev, core, s_power, 0);
310 if (ret < 0 && ret != -ENOIOCTLCMD)
312 "s_power off failed on subdev\n");
319 * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
320 * @xdev: Composite video device
322 * @start: Start (when true) or stop (when false) the pipeline
324 * Walk the entities chain starting @dma and start or stop all of them
326 * Return: 0 if successful, or the return value of the failed video::s_stream
327 * operation otherwise.
329 static int xvip_pipeline_start_stop(struct xvip_m2m_dev *xdev,
330 struct xvip_m2m_dma *dma, bool start)
332 struct media_graph graph;
333 struct media_entity *entity = &dma->video.entity;
334 struct media_device *mdev = entity->graph_obj.mdev;
335 struct xventity_list *temp, *_temp;
339 mutex_lock(&mdev->graph_mutex);
341 /* Walk the graph to locate the subdev nodes */
342 ret = media_graph_walk_init(&graph, mdev);
346 media_graph_walk_start(&graph, entity);
348 /* get the list of entities */
349 while ((entity = media_graph_walk_next(&graph))) {
350 struct xventity_list *ele;
352 /* We want to stream on/off only subdevs */
353 if (!is_media_entity_v4l2_subdev(entity))
356 /* Maintain the pipeline sequence in a list */
357 ele = kzalloc(sizeof(*ele), GFP_KERNEL);
363 ele->entity = entity;
364 list_add(&ele->list, &ent_list);
368 list_for_each_entry_safe(temp, _temp, &ent_list, list) {
369 /* Enable all subdevs from sink to source */
370 ret = xvip_entity_start_stop(xdev, temp->entity, start);
372 dev_err(xdev->dev, "ret = %d for entity %s\n",
373 ret, temp->entity->name);
378 list_for_each_entry_safe_reverse(temp, _temp, &ent_list, list)
379 /* Enable all subdevs from source to sink */
380 xvip_entity_start_stop(xdev, temp->entity, start);
383 list_for_each_entry_safe(temp, _temp, &ent_list, list) {
384 list_del(&temp->list);
389 mutex_unlock(&mdev->graph_mutex);
390 media_graph_walk_cleanup(&graph);
395 * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
396 * @pipe: The pipeline
397 * @on: Turn the stream on when true or off when false
399 * The pipeline is shared between all DMA engines connect at its input and
400 * output. While the stream state of DMA engines can be controlled
401 * independently, pipelines have a shared stream state that enable or disable
402 * all entities in the pipeline. For this reason the pipeline uses a streaming
403 * counter that tracks the number of DMA engines that have requested the stream
404 * to be enabled. This will walk the graph starting from each DMA and enable or
405 * disable the entities in the path.
407 * When called with the @on argument set to true, this function will increment
408 * the pipeline streaming count. If the streaming count reaches the number of
409 * DMA engines in the pipeline it will enable all entities that belong to the
412 * Similarly, when called with the @on argument set to false, this function will
413 * decrement the pipeline streaming count and disable all entities in the
414 * pipeline when the streaming count reaches zero.
416 * Return: 0 if successful, or the return value of the failed video::s_stream
417 * operation otherwise. Stopping the pipeline never fails. The pipeline state is
418 * not updated when the operation fails.
420 static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
422 struct xvip_m2m_dev *xdev;
423 struct xvip_m2m_dma *dma;
426 mutex_lock(&pipe->lock);
431 ret = xvip_pipeline_start_stop(xdev, dma, true);
434 pipe->stream_count++;
436 if (--pipe->stream_count == 0)
437 xvip_pipeline_start_stop(xdev, dma, false);
441 mutex_unlock(&pipe->lock);
445 static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
446 struct xvip_m2m_dma *start)
448 struct media_graph graph;
449 struct media_entity *entity = &start->video.entity;
450 struct media_device *mdev = entity->graph_obj.mdev;
451 unsigned int num_inputs = 0;
452 unsigned int num_outputs = 0;
455 mutex_lock(&mdev->graph_mutex);
457 /* Walk the graph to locate the video nodes. */
458 ret = media_graph_walk_init(&graph, mdev);
460 mutex_unlock(&mdev->graph_mutex);
464 media_graph_walk_start(&graph, entity);
466 while ((entity = media_graph_walk_next(&graph))) {
467 struct xvip_m2m_dma *dma;
469 if (entity->function != MEDIA_ENT_F_IO_V4L)
472 dma = to_xvip_dma(media_entity_to_video_device(entity));
478 mutex_unlock(&mdev->graph_mutex);
480 media_graph_walk_cleanup(&graph);
482 /* We need at least one DMA to proceed */
483 if (num_outputs == 0 && num_inputs == 0)
486 pipe->num_dmas = num_inputs + num_outputs;
487 pipe->xdev = start->xdev;
492 static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
498 * xvip_pipeline_cleanup - Cleanup the pipeline after streaming
499 * @pipe: the pipeline
501 * Decrease the pipeline use count and clean it up if we were the last user.
503 static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
505 mutex_lock(&pipe->lock);
507 /* If we're the last user clean up the pipeline. */
508 if (--pipe->use_count == 0)
509 __xvip_pipeline_cleanup(pipe);
511 mutex_unlock(&pipe->lock);
515 * xvip_pipeline_prepare - Prepare the pipeline for streaming
516 * @pipe: the pipeline
517 * @dma: DMA engine at one end of the pipeline
519 * Validate the pipeline if no user exists yet, otherwise just increase the use
522 * Return: 0 if successful or -EPIPE if the pipeline is not valid.
524 static int xvip_pipeline_prepare(struct xvip_pipeline *pipe,
525 struct xvip_m2m_dma *dma)
529 mutex_lock(&pipe->lock);
531 /* If we're the first user validate and initialize the pipeline. */
532 if (pipe->use_count == 0) {
533 ret = xvip_pipeline_validate(pipe, dma);
535 __xvip_pipeline_cleanup(pipe);
544 mutex_unlock(&pipe->lock);
548 static void xvip_m2m_dma_callback_mem2dev(void *data)
552 static void xvip_m2m_dma_callback(void *data)
554 struct xvip_m2m_ctx *ctx = data;
555 struct xvip_m2m_dev *xdev = ctx->xdev;
556 struct vb2_v4l2_buffer *src_vb, *dst_vb;
558 spin_lock(&xdev->queued_lock);
559 src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
560 dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
562 dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
563 dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
565 src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
566 dst_vb->timecode = src_vb->timecode;
568 v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
569 v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
570 v4l2_m2m_job_finish(xdev->m2m_dev, ctx->fh.m2m_ctx);
571 spin_unlock(&xdev->queued_lock);
578 static int xvip_m2m_queue_setup(struct vb2_queue *vq,
579 u32 *nbuffers, u32 *nplanes,
580 u32 sizes[], struct device *alloc_devs[])
582 struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(vq);
583 struct xvip_m2m_dma *dma = ctx->xdev->dma;
584 struct v4l2_format *f;
585 const struct xvip_video_format *info;
588 if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
597 if (*nplanes != f->fmt.pix_mp.num_planes)
600 for (i = 0; i < *nplanes; i++) {
601 if (sizes[i] < f->fmt.pix_mp.plane_fmt[i].sizeimage)
605 *nplanes = info->buffers;
606 for (i = 0; i < info->buffers; i++)
607 sizes[i] = f->fmt.pix_mp.plane_fmt[i].sizeimage;
613 static int xvip_m2m_buf_prepare(struct vb2_buffer *vb)
615 struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
616 struct xvip_m2m_dma *dma = ctx->xdev->dma;
617 struct v4l2_format *f;
618 const struct xvip_video_format *info;
621 if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
629 for (i = 0; i < info->buffers; i++) {
630 if (vb2_plane_size(vb, i) <
631 f->fmt.pix_mp.plane_fmt[i].sizeimage) {
632 dev_err(ctx->xdev->dev,
633 "insufficient plane size (%u < %u)\n",
634 (u32)vb2_plane_size(vb, i),
635 f->fmt.pix_mp.plane_fmt[i].sizeimage);
639 vb2_set_plane_payload(vb, i,
640 f->fmt.pix_mp.plane_fmt[i].sizeimage);
646 static void xvip_m2m_buf_queue(struct vb2_buffer *vb)
648 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
649 struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
651 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
654 static void xvip_m2m_stop_streaming(struct vb2_queue *q)
656 struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(q);
657 struct xvip_m2m_dma *dma = ctx->xdev->dma;
658 struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity);
659 struct vb2_v4l2_buffer *vbuf;
662 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
663 dmaengine_terminate_sync(dma->chan_tx);
665 dmaengine_terminate_sync(dma->chan_rx);
667 if (ctx->xdev->num_subdevs) {
668 /* Stop the pipeline. */
669 xvip_pipeline_set_stream(pipe, false);
671 /* Cleanup the pipeline and mark it as being stopped. */
672 xvip_pipeline_cleanup(pipe);
673 media_pipeline_stop(&dma->video.entity);
677 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
678 vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
680 vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
685 spin_lock(&ctx->xdev->queued_lock);
686 v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
687 spin_unlock(&ctx->xdev->queued_lock);
691 static int xvip_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
693 struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(q);
694 struct xvip_m2m_dma *dma = ctx->xdev->dma;
695 struct xvip_m2m_dev *xdev = ctx->xdev;
696 struct xvip_pipeline *pipe;
699 if (!xdev->num_subdevs)
702 pipe = dma->video.entity.pipe
703 ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
705 ret = media_pipeline_start(&dma->video.entity, &pipe->pipe);
709 /* Verify that the configured format matches the output of the
712 ret = xvip_dma_verify_format(dma);
716 ret = xvip_pipeline_prepare(pipe, dma);
720 /* Start the pipeline. */
721 ret = xvip_pipeline_set_stream(pipe, true);
727 media_pipeline_stop(&dma->video.entity);
730 xvip_m2m_stop_streaming(q);
735 static const struct vb2_ops m2m_vb2_ops = {
736 .queue_setup = xvip_m2m_queue_setup,
737 .buf_prepare = xvip_m2m_buf_prepare,
738 .buf_queue = xvip_m2m_buf_queue,
739 .start_streaming = xvip_m2m_start_streaming,
740 .stop_streaming = xvip_m2m_stop_streaming,
741 .wait_prepare = vb2_ops_wait_prepare,
742 .wait_finish = vb2_ops_wait_finish,
745 static int xvip_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
746 struct vb2_queue *dst_vq)
748 struct xvip_m2m_ctx *ctx = priv;
751 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
752 src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
753 src_vq->drv_priv = ctx;
754 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
755 src_vq->ops = &m2m_vb2_ops;
756 src_vq->mem_ops = &vb2_dma_contig_memops;
757 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
758 src_vq->dev = ctx->xdev->v4l2_dev.dev;
760 ret = vb2_queue_init(src_vq);
764 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
765 dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
766 dst_vq->drv_priv = ctx;
767 dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
768 dst_vq->ops = &m2m_vb2_ops;
769 dst_vq->mem_ops = &vb2_dma_contig_memops;
770 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
771 dst_vq->dev = ctx->xdev->v4l2_dev.dev;
773 return vb2_queue_init(dst_vq);
776 /* -----------------------------------------------------------------------------
781 xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
783 cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
784 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
786 strlcpy(cap->driver, XVIP_M2M_NAME, sizeof(cap->driver));
787 strlcpy(cap->card, XVIP_M2M_NAME, sizeof(cap->card));
788 strlcpy(cap->bus_info, XVIP_M2M_NAME, sizeof(cap->card));
794 xvip_m2m_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
796 struct xvip_m2m_ctx *ctx = file2ctx(file);
797 struct xvip_m2m_dma *dma = ctx->xdev->dma;
798 const struct xvip_video_format *fmtinfo;
799 const struct xvip_video_format *fmt;
800 struct v4l2_subdev *subdev;
801 struct v4l2_subdev_format v4l_fmt;
802 struct xvip_m2m_dev *xdev = ctx->xdev;
803 u32 i, fmt_cnt, *fmts;
806 if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
807 ret = xilinx_xdma_get_v4l2_vid_fmts(dma->chan_rx,
810 ret = xilinx_xdma_get_v4l2_vid_fmts(dma->chan_tx,
815 if (f->index >= fmt_cnt)
818 if (!xdev->num_subdevs) {
819 fmt = xvip_get_format_by_fourcc(fmts[f->index]);
823 f->pixelformat = fmt->fourcc;
824 strlcpy(f->description, fmt->description,
825 sizeof(f->description));
832 /* Establish media pad format */
833 if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
834 subdev = xvip_dma_remote_subdev(&dma->pads[XVIP_PAD_SOURCE],
837 subdev = xvip_dma_remote_subdev(&dma->pads[XVIP_PAD_SINK],
842 v4l_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
843 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &v4l_fmt);
845 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
847 for (i = 0; i < fmt_cnt; i++) {
848 fmt = xvip_get_format_by_fourcc(fmts[i]);
852 if (fmt->code == v4l_fmt.format.code)
859 fmtinfo = xvip_get_format_by_fourcc(fmts[i]);
860 f->pixelformat = fmtinfo->fourcc;
861 strlcpy(f->description, fmtinfo->description, sizeof(f->description));
866 static int xvip_m2m_get_fmt(struct file *file, void *fh, struct v4l2_format *f)
868 struct xvip_m2m_ctx *ctx = file2ctx(file);
869 struct xvip_m2m_dma *dma = ctx->xdev->dma;
870 struct vb2_queue *vq;
872 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
876 if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
877 f->fmt.pix_mp = dma->outfmt.fmt.pix_mp;
879 f->fmt.pix_mp = dma->capfmt.fmt.pix_mp;
884 static int __xvip_m2m_try_fmt(struct xvip_m2m_ctx *ctx, struct v4l2_format *f)
886 struct xvip_m2m_dma *dma = ctx->xdev->dma;
887 const struct xvip_video_format *info;
888 struct v4l2_pix_format_mplane *pix_mp;
889 struct v4l2_plane_pix_format *plane_fmt;
890 u32 align, min_width, max_width;
891 u32 bpl, min_bpl, max_bpl;
892 u32 padding_factor_nume, padding_factor_deno;
893 u32 bpl_nume, bpl_deno;
894 u32 i, plane_width, plane_height;
895 struct v4l2_subdev_format fmt;
896 struct v4l2_subdev *subdev;
897 struct xvip_m2m_dev *xdev = ctx->xdev;
900 if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
901 f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
904 if (xdev->num_subdevs) {
905 if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
906 subdev = xvip_dma_remote_subdev
907 (&dma->pads[XVIP_PAD_SOURCE], &fmt.pad);
909 subdev = xvip_dma_remote_subdev
910 (&dma->pads[XVIP_PAD_SINK], &fmt.pad);
915 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
916 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
921 pix_mp = &f->fmt.pix_mp;
922 plane_fmt = pix_mp->plane_fmt;
923 info = xvip_get_format_by_fourcc(f->fmt.pix_mp.pixelformat);
925 if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
930 info = xvip_get_format_by_fourcc(XVIP_M2M_DEFAULT_FMT);
933 if (xdev->num_subdevs) {
934 if (info->code != fmt.format.code ||
935 fmt.format.width != pix_mp->width ||
936 fmt.format.height != pix_mp->height) {
937 dev_err(xdev->dev, "Failed to set format\n");
939 "Reqed Code = %d, Width = %d, Height = %d\n",
940 info->code, pix_mp->width, pix_mp->height);
942 "Subdev Code = %d, Width = %d, Height = %d",
943 fmt.format.code, fmt.format.width,
949 xvip_width_padding_factor(info->fourcc, &padding_factor_nume,
950 &padding_factor_deno);
951 xvip_bpl_scaling_factor(info->fourcc, &bpl_nume, &bpl_deno);
954 * V4L2 specification suggests the driver corrects the format struct
955 * if any of the dimensions is unsupported
957 align = lcm(dma->align, info->bpp >> 3);
958 min_width = roundup(XVIP_M2M_MIN_WIDTH, align);
959 max_width = rounddown(XVIP_M2M_MAX_WIDTH, align);
960 pix_mp->width = clamp(pix_mp->width, min_width, max_width);
961 pix_mp->height = clamp(pix_mp->height, XVIP_M2M_MIN_HEIGHT,
962 XVIP_M2M_MAX_HEIGHT);
965 * Clamp the requested bytes per line value. If the maximum
966 * bytes per line value is zero, the module doesn't support
967 * user configurable line sizes. Override the requested value
968 * with the minimum in that case.
970 max_bpl = rounddown(XVIP_M2M_MAX_WIDTH, align);
972 if (info->buffers == 1) {
973 /* Handling contiguous data with mplanes */
974 min_bpl = (pix_mp->width * info->bpl_factor *
975 padding_factor_nume * bpl_nume) /
976 (padding_factor_deno * bpl_deno);
977 min_bpl = roundup(min_bpl, align);
978 bpl = roundup(plane_fmt[0].bytesperline, align);
979 plane_fmt[0].bytesperline = clamp(bpl, min_bpl, max_bpl);
981 if (info->num_planes == 1) {
982 /* Single plane formats */
983 plane_fmt[0].sizeimage = plane_fmt[0].bytesperline *
986 /* Multi plane formats in contiguous buffer*/
987 plane_fmt[0].sizeimage =
988 DIV_ROUND_UP(plane_fmt[0].bytesperline *
993 /* Handling non-contiguous data with mplanes */
994 for (i = 0; i < info->num_planes; i++) {
995 plane_width = pix_mp->width / (i ? info->hsub : 1);
996 plane_height = pix_mp->height / (i ? info->vsub : 1);
997 min_bpl = (plane_width * info->bpl_factor *
998 padding_factor_nume * bpl_nume) /
999 (padding_factor_deno * bpl_deno);
1000 min_bpl = roundup(min_bpl, align);
1001 bpl = rounddown(plane_fmt[i].bytesperline, align);
1002 plane_fmt[i].bytesperline = clamp(bpl, min_bpl,
1004 plane_fmt[i].sizeimage = plane_fmt[i].bytesperline *
1012 static int xvip_m2m_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
1014 struct xvip_m2m_ctx *ctx = file2ctx(file);
1017 ret = __xvip_m2m_try_fmt(ctx, f);
1024 static int xvip_m2m_set_fmt(struct file *file, void *fh, struct v4l2_format *f)
1026 struct xvip_m2m_ctx *ctx = file2ctx(file);
1027 struct xvip_m2m_dma *dma = ctx->xdev->dma;
1028 struct vb2_queue *vq;
1031 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
1035 if (vb2_is_busy(vq)) {
1036 v4l2_err(&ctx->xdev->v4l2_dev, "%s queue busy\n", __func__);
1040 ret = __xvip_m2m_try_fmt(ctx, f);
1044 if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
1045 dma->outfmt.fmt.pix_mp = f->fmt.pix_mp;
1047 dma->capfmt.fmt.pix_mp = f->fmt.pix_mp;
1053 xvip_m2m_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
1055 struct xvip_m2m_ctx *ctx = file2ctx(file);
1056 struct xvip_m2m_dma *dma = ctx->xdev->dma;
1059 if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
1060 s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1063 switch (s->target) {
1064 case V4L2_SEL_TGT_COMPOSE:
1067 case V4L2_SEL_TGT_CROP:
1070 s->r.width = dma->r.width;
1071 s->r.height = dma->r.height;
1081 xvip_m2m_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
1083 struct xvip_m2m_ctx *ctx = file2ctx(file);
1084 struct xvip_m2m_dma *dma = ctx->xdev->dma;
1085 u32 min_width, max_width;
1088 if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
1089 s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1092 switch (s->target) {
1093 case V4L2_SEL_TGT_COMPOSE:
1096 case V4L2_SEL_TGT_CROP:
1097 if (s->r.width > dma->outfmt.fmt.pix_mp.width ||
1098 s->r.height > dma->outfmt.fmt.pix_mp.height ||
1099 s->r.top != 0 || s->r.left != 0)
1103 min_width = roundup(XVIP_M2M_MIN_WIDTH, dma->align);
1104 max_width = rounddown(XVIP_M2M_MAX_WIDTH, dma->align);
1105 dma->r.width = clamp(s->r.width, min_width, max_width);
1106 dma->r.height = s->r.height;
1115 static const struct v4l2_ioctl_ops xvip_m2m_ioctl_ops = {
1116 .vidioc_querycap = xvip_dma_querycap,
1118 .vidioc_enum_fmt_vid_cap_mplane = xvip_m2m_enum_fmt,
1119 .vidioc_g_fmt_vid_cap_mplane = xvip_m2m_get_fmt,
1120 .vidioc_try_fmt_vid_cap_mplane = xvip_m2m_try_fmt,
1121 .vidioc_s_fmt_vid_cap_mplane = xvip_m2m_set_fmt,
1123 .vidioc_enum_fmt_vid_out_mplane = xvip_m2m_enum_fmt,
1124 .vidioc_g_fmt_vid_out_mplane = xvip_m2m_get_fmt,
1125 .vidioc_try_fmt_vid_out_mplane = xvip_m2m_try_fmt,
1126 .vidioc_s_fmt_vid_out_mplane = xvip_m2m_set_fmt,
1127 .vidioc_s_selection = xvip_m2m_s_selection,
1128 .vidioc_g_selection = xvip_m2m_g_selection,
1130 .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
1131 .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
1132 .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
1133 .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
1134 .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
1135 .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
1136 .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
1138 .vidioc_streamon = v4l2_m2m_ioctl_streamon,
1139 .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
1145 static int xvip_m2m_open(struct file *file)
1147 struct xvip_m2m_dev *xdev = video_drvdata(file);
1148 struct xvip_m2m_ctx *ctx = NULL;
1151 ctx = devm_kzalloc(xdev->dev, sizeof(*ctx), GFP_KERNEL);
1155 v4l2_fh_init(&ctx->fh, video_devdata(file));
1156 file->private_data = &ctx->fh;
1159 ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(xdev->m2m_dev, ctx,
1160 &xvip_m2m_queue_init);
1161 if (IS_ERR(ctx->fh.m2m_ctx)) {
1162 ret = PTR_ERR(ctx->fh.m2m_ctx);
1163 v4l2_fh_exit(&ctx->fh);
1167 v4l2_fh_add(&ctx->fh);
1168 dev_info(xdev->dev, "Created instance %p, m2m_ctx: %p\n", ctx,
1173 static int xvip_m2m_release(struct file *file)
1175 struct xvip_m2m_ctx *ctx = file->private_data;
1177 v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
1181 static u32 xvip_m2m_poll(struct file *file,
1182 struct poll_table_struct *wait)
1184 struct xvip_m2m_ctx *ctx = file->private_data;
1187 mutex_lock(&ctx->xdev->lock);
1188 ret = v4l2_m2m_poll(file, ctx->fh.m2m_ctx, wait);
1189 mutex_unlock(&ctx->xdev->lock);
1194 static int xvip_m2m_mmap(struct file *file, struct vm_area_struct *vma)
1196 struct xvip_m2m_ctx *ctx = file->private_data;
1198 return v4l2_m2m_mmap(file, ctx->fh.m2m_ctx, vma);
1205 static int xvip_m2m_job_ready(void *priv)
1207 struct xvip_m2m_ctx *ctx = priv;
1209 if ((v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0) &&
1210 (v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) > 0))
1216 static void xvip_m2m_job_abort(void *priv)
1218 struct xvip_m2m_ctx *ctx = priv;
1220 /* Will cancel the transaction in the next interrupt handler */
1221 v4l2_m2m_job_finish(ctx->xdev->m2m_dev, ctx->fh.m2m_ctx);
1224 static void xvip_m2m_prep_submit_dev2mem_desc(struct xvip_m2m_ctx *ctx,
1225 struct vb2_v4l2_buffer *dst_buf)
1227 struct xvip_m2m_dma *dma = ctx->xdev->dma;
1228 struct xvip_m2m_dev *xdev = ctx->xdev;
1229 struct dma_async_tx_descriptor *desc;
1231 const struct xvip_video_format *info;
1232 struct v4l2_pix_format_mplane *pix_mp;
1233 u32 padding_factor_nume, padding_factor_deno;
1234 u32 bpl_nume, bpl_deno;
1236 u32 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
1237 enum operation_mode mode = DEFAULT;
1239 p_out = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
1243 "Acquiring kernel pointer to buffer failed\n");
1247 ctx->xt.dir = DMA_DEV_TO_MEM;
1248 ctx->xt.src_sgl = false;
1249 ctx->xt.dst_sgl = true;
1250 ctx->xt.dst_start = p_out;
1252 pix_mp = &dma->capfmt.fmt.pix_mp;
1253 info = dma->capinfo;
1254 xilinx_xdma_set_mode(dma->chan_rx, mode);
1255 xilinx_xdma_v4l2_config(dma->chan_rx, pix_mp->pixelformat);
1256 xvip_width_padding_factor(pix_mp->pixelformat, &padding_factor_nume,
1257 &padding_factor_deno);
1258 xvip_bpl_scaling_factor(pix_mp->pixelformat, &bpl_nume, &bpl_deno);
1260 ctx->xt.frame_size = info->num_planes;
1261 ctx->sgl[0].size = (pix_mp->width * info->bpl_factor *
1262 padding_factor_nume * bpl_nume) /
1263 (padding_factor_deno * bpl_deno);
1264 ctx->sgl[0].icg = pix_mp->plane_fmt[0].bytesperline - ctx->sgl[0].size;
1265 ctx->xt.numf = pix_mp->height;
1268 * dst_icg is the number of bytes to jump after last luma addr
1269 * and before first chroma addr
1271 ctx->sgl[0].src_icg = 0;
1273 if (info->buffers == 1) {
1274 /* Handling contiguous data with mplanes */
1275 ctx->sgl[0].dst_icg = 0;
1277 /* Handling non-contiguous data with mplanes */
1278 if (info->buffers == 2) {
1279 dma_addr_t chroma_cap =
1280 vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 1);
1281 luma_size = pix_mp->plane_fmt[0].bytesperline *
1283 if (chroma_cap > p_out)
1284 ctx->sgl[0].dst_icg = chroma_cap - p_out -
1289 desc = dmaengine_prep_interleaved_dma(dma->chan_rx, &ctx->xt, flags);
1291 dev_err(xdev->dev, "Failed to prepare DMA rx transfer\n");
1295 desc->callback = xvip_m2m_dma_callback;
1296 desc->callback_param = ctx;
1297 dmaengine_submit(desc);
1298 dma_async_issue_pending(dma->chan_rx);
1301 static void xvip_m2m_prep_submit_mem2dev_desc(struct xvip_m2m_ctx *ctx,
1302 struct vb2_v4l2_buffer *src_buf)
1304 struct xvip_m2m_dma *dma = ctx->xdev->dma;
1305 struct xvip_m2m_dev *xdev = ctx->xdev;
1306 struct dma_async_tx_descriptor *desc;
1308 const struct xvip_video_format *info;
1309 struct v4l2_pix_format_mplane *pix_mp;
1310 u32 padding_factor_nume, padding_factor_deno;
1311 u32 bpl_nume, bpl_deno;
1313 u32 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
1314 enum operation_mode mode = DEFAULT;
1315 u32 bpl, src_width, src_height;
1317 p_in = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
1321 "Acquiring kernel pointer to buffer failed\n");
1325 ctx->xt.dir = DMA_MEM_TO_DEV;
1326 ctx->xt.src_sgl = true;
1327 ctx->xt.dst_sgl = false;
1328 ctx->xt.src_start = p_in;
1330 pix_mp = &dma->outfmt.fmt.pix_mp;
1331 bpl = pix_mp->plane_fmt[0].bytesperline;
1333 src_width = dma->r.width;
1334 src_height = dma->r.height;
1336 src_width = pix_mp->width;
1337 src_height = pix_mp->height;
1340 info = dma->outinfo;
1341 xilinx_xdma_set_mode(dma->chan_tx, mode);
1342 xilinx_xdma_v4l2_config(dma->chan_tx, pix_mp->pixelformat);
1343 xvip_width_padding_factor(pix_mp->pixelformat, &padding_factor_nume,
1344 &padding_factor_deno);
1345 xvip_bpl_scaling_factor(pix_mp->pixelformat, &bpl_nume, &bpl_deno);
1347 ctx->xt.frame_size = info->num_planes;
1348 ctx->sgl[0].size = (src_width * info->bpl_factor *
1349 padding_factor_nume * bpl_nume) /
1350 (padding_factor_deno * bpl_deno);
1351 ctx->sgl[0].icg = bpl - ctx->sgl[0].size;
1352 ctx->xt.numf = src_height;
1355 * src_icg is the number of bytes to jump after last luma addr
1356 * and before first chroma addr
1358 ctx->sgl[0].dst_icg = 0;
1360 if (info->buffers == 1) {
1361 /* Handling contiguous data with mplanes */
1362 ctx->sgl[0].src_icg = 0;
1364 ctx->sgl[0].src_icg = bpl *
1365 (pix_mp->height - src_height);
1367 /* Handling non-contiguous data with mplanes */
1368 if (info->buffers == 2) {
1369 dma_addr_t chroma_out =
1370 vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 1);
1371 luma_size = bpl * ctx->xt.numf;
1372 if (chroma_out > p_in)
1373 ctx->sgl[0].src_icg = chroma_out - p_in -
1378 desc = dmaengine_prep_interleaved_dma(dma->chan_tx, &ctx->xt, flags);
1380 dev_err(xdev->dev, "Failed to prepare DMA tx transfer\n");
1384 desc->callback = xvip_m2m_dma_callback_mem2dev;
1385 desc->callback_param = ctx;
1386 dmaengine_submit(desc);
1387 dma_async_issue_pending(dma->chan_tx);
1391 * xvip_m2m_device_run - prepares and starts the device
1393 * @priv: Instance private data
1395 * This simulates all the immediate preparations required before starting
1396 * a device. This will be called by the framework when it decides to schedule
1397 * a particular instance.
1399 static void xvip_m2m_device_run(void *priv)
1401 struct xvip_m2m_ctx *ctx = priv;
1402 struct vb2_v4l2_buffer *src_buf, *dst_buf;
1404 src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
1405 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
1407 /* Prepare and submit mem2dev transaction */
1408 xvip_m2m_prep_submit_mem2dev_desc(ctx, src_buf);
1410 /* Prepare and submit dev2mem transaction */
1411 xvip_m2m_prep_submit_dev2mem_desc(ctx, dst_buf);
1414 static const struct v4l2_file_operations xvip_m2m_fops = {
1415 .owner = THIS_MODULE,
1416 .open = xvip_m2m_open,
1417 .release = xvip_m2m_release,
1418 .poll = xvip_m2m_poll,
1419 .unlocked_ioctl = video_ioctl2,
1420 .mmap = xvip_m2m_mmap,
1423 static struct video_device xvip_m2m_videodev = {
1424 .name = XVIP_M2M_NAME,
1425 .fops = &xvip_m2m_fops,
1426 .ioctl_ops = &xvip_m2m_ioctl_ops,
1427 .release = video_device_release_empty,
1428 .vfl_dir = VFL_DIR_M2M,
1429 .device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING,
1430 .vfl_type = VFL_TYPE_GRABBER,
1433 static const struct v4l2_m2m_ops xvip_m2m_ops = {
1434 .device_run = xvip_m2m_device_run,
1435 .job_ready = xvip_m2m_job_ready,
1436 .job_abort = xvip_m2m_job_abort,
1439 static int xvip_m2m_dma_init(struct xvip_m2m_dma *dma)
1441 struct xvip_m2m_dev *xdev;
1442 struct v4l2_pix_format_mplane *pix_mp;
1446 mutex_init(&xdev->lock);
1447 mutex_init(&dma->pipe.lock);
1448 spin_lock_init(&xdev->queued_lock);
1450 /* Format info on capture port - NV12 is the default format */
1451 dma->capinfo = xvip_get_format_by_fourcc(XVIP_M2M_DEFAULT_FMT);
1452 pix_mp = &dma->capfmt.fmt.pix_mp;
1453 pix_mp->pixelformat = dma->capinfo->fourcc;
1455 pix_mp->field = V4L2_FIELD_NONE;
1456 pix_mp->width = XVIP_M2M_DEF_WIDTH;
1457 pix_mp->height = XVIP_M2M_DEF_HEIGHT;
1458 pix_mp->plane_fmt[0].bytesperline = pix_mp->width *
1459 dma->capinfo->bpl_factor;
1460 pix_mp->plane_fmt[0].sizeimage =
1461 DIV_ROUND_UP(pix_mp->plane_fmt[0].bytesperline *
1462 pix_mp->height * dma->capinfo->bpp, 8);
1464 /* Format info on output port - NV12 is the default format */
1465 dma->outinfo = xvip_get_format_by_fourcc(XVIP_M2M_DEFAULT_FMT);
1466 pix_mp = &dma->capfmt.fmt.pix_mp;
1467 pix_mp->pixelformat = dma->outinfo->fourcc;
1468 pix_mp->field = V4L2_FIELD_NONE;
1469 pix_mp->width = XVIP_M2M_DEF_WIDTH;
1470 pix_mp->height = XVIP_M2M_DEF_HEIGHT;
1471 pix_mp->plane_fmt[0].bytesperline = pix_mp->width *
1472 dma->outinfo->bpl_factor;
1473 pix_mp->plane_fmt[0].sizeimage =
1474 DIV_ROUND_UP(pix_mp->plane_fmt[0].bytesperline *
1475 pix_mp->height * dma->outinfo->bpp, 8);
1477 /* DMA channels for mem2mem */
1478 dma->chan_tx = dma_request_chan(xdev->dev, "tx");
1479 if (IS_ERR(dma->chan_tx)) {
1480 ret = PTR_ERR(dma->chan_tx);
1481 if (ret != -EPROBE_DEFER)
1482 dev_err(xdev->dev, "mem2mem DMA tx channel not found");
1487 dma->chan_rx = dma_request_chan(xdev->dev, "rx");
1488 if (IS_ERR(dma->chan_rx)) {
1489 ret = PTR_ERR(dma->chan_rx);
1490 if (ret != -EPROBE_DEFER)
1491 dev_err(xdev->dev, "mem2mem DMA rx channel not found");
1496 dma->align = BIT(dma->chan_tx->device->copy_align);
1499 dma->video = xvip_m2m_videodev;
1500 dma->video.v4l2_dev = &xdev->v4l2_dev;
1501 dma->video.lock = &xdev->lock;
1503 dma->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
1504 dma->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
1506 ret = media_entity_pads_init(&dma->video.entity, 2, dma->pads);
1510 ret = video_register_device(&dma->video, VFL_TYPE_GRABBER, -1);
1512 dev_err(xdev->dev, "Failed to register mem2mem video device\n");
1516 video_set_drvdata(&dma->video, dma->xdev);
1520 dma_release_channel(dma->chan_rx);
1522 dma_release_channel(dma->chan_tx);
1527 static void xvip_m2m_dma_deinit(struct xvip_m2m_dma *dma)
1529 if (video_is_registered(&dma->video))
1530 video_unregister_device(&dma->video);
1532 mutex_destroy(&dma->pipe.lock);
1533 mutex_destroy(&dma->xdev->lock);
1534 dma_release_channel(dma->chan_tx);
1535 dma_release_channel(dma->chan_rx);
1538 static int xvip_m2m_dma_alloc_init(struct xvip_m2m_dev *xdev)
1540 struct xvip_m2m_dma *dma = NULL;
1543 dma = devm_kzalloc(xdev->dev, sizeof(*dma), GFP_KERNEL);
1550 ret = xvip_m2m_dma_init(xdev->dma);
1552 dev_err(xdev->dev, "DMA initialization failed\n");
1556 xdev->v4l2_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
1560 /* -----------------------------------------------------------------------------
1561 * Platform Device Driver
1563 static void xvip_composite_v4l2_cleanup(struct xvip_m2m_dev *xdev)
1565 v4l2_device_unregister(&xdev->v4l2_dev);
1566 media_device_unregister(&xdev->media_dev);
1567 media_device_cleanup(&xdev->media_dev);
1570 static int xvip_composite_v4l2_init(struct xvip_m2m_dev *xdev)
1574 xdev->media_dev.dev = xdev->dev;
1575 strlcpy(xdev->media_dev.model, "Xilinx Videoi M2M Composite Device",
1576 sizeof(xdev->media_dev.model));
1577 xdev->media_dev.hw_revision = 0;
1579 media_device_init(&xdev->media_dev);
1581 xdev->v4l2_dev.mdev = &xdev->media_dev;
1582 ret = v4l2_device_register(xdev->dev, &xdev->v4l2_dev);
1584 dev_err(xdev->dev, "V4L2 device registration failed (%d)\n",
1586 media_device_cleanup(&xdev->media_dev);
1593 static struct xvip_graph_entity *
1594 xvip_graph_find_entity(struct xvip_m2m_dev *xdev,
1595 const struct device_node *node)
1597 struct xvip_graph_entity *entity;
1599 list_for_each_entry(entity, &xdev->entities, list) {
1600 if (entity->node == node)
1607 static int xvip_graph_build_one(struct xvip_m2m_dev *xdev,
1608 struct xvip_graph_entity *entity)
1610 u32 link_flags = MEDIA_LNK_FL_ENABLED;
1611 struct media_entity *local = entity->entity;
1612 struct media_entity *remote;
1613 struct media_pad *local_pad;
1614 struct media_pad *remote_pad;
1615 struct xvip_graph_entity *ent;
1616 struct v4l2_fwnode_link link;
1617 struct device_node *ep = NULL;
1618 struct device_node *next;
1621 dev_dbg(xdev->dev, "creating links for entity %s\n", local->name);
1624 /* Get the next endpoint and parse its link. */
1625 next = of_graph_get_next_endpoint(entity->node, ep);
1631 dev_dbg(xdev->dev, "processing endpoint %pOF\n", ep);
1633 ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link);
1635 dev_err(xdev->dev, "failed to parse link for %pOF\n",
1640 /* Skip sink ports, they will be processed from the other end of
1643 if (link.local_port >= local->num_pads) {
1644 dev_err(xdev->dev, "invalid port number %u for %pOF\n",
1646 to_of_node(link.local_node));
1647 v4l2_fwnode_put_link(&link);
1652 local_pad = &local->pads[link.local_port];
1654 if (local_pad->flags & MEDIA_PAD_FL_SINK) {
1655 dev_dbg(xdev->dev, "skipping sink port %pOF:%u\n",
1656 to_of_node(link.local_node),
1658 v4l2_fwnode_put_link(&link);
1662 /* Skip DMA engines, they will be processed separately. */
1663 if (link.remote_node == of_fwnode_handle(xdev->dev->of_node)) {
1664 dev_dbg(xdev->dev, "skipping DMA port %pOF:%u\n",
1665 to_of_node(link.local_node),
1667 v4l2_fwnode_put_link(&link);
1671 /* Find the remote entity. */
1672 ent = xvip_graph_find_entity(xdev,
1673 to_of_node(link.remote_node));
1675 dev_err(xdev->dev, "no entity found for %pOF\n",
1676 to_of_node(link.remote_node));
1677 v4l2_fwnode_put_link(&link);
1682 remote = ent->entity;
1684 if (link.remote_port >= remote->num_pads) {
1685 dev_err(xdev->dev, "invalid port number %u on %pOF\n",
1686 link.remote_port, to_of_node(link.remote_node));
1687 v4l2_fwnode_put_link(&link);
1692 remote_pad = &remote->pads[link.remote_port];
1694 v4l2_fwnode_put_link(&link);
1696 /* Create the media link. */
1697 dev_dbg(xdev->dev, "creating %s:%u -> %s:%u link\n",
1698 local->name, local_pad->index,
1699 remote->name, remote_pad->index);
1701 ret = media_create_pad_link(local, local_pad->index,
1702 remote, remote_pad->index,
1706 "failed to create %s:%u -> %s:%u link\n",
1707 local->name, local_pad->index,
1708 remote->name, remote_pad->index);
1716 static int xvip_graph_parse_one(struct xvip_m2m_dev *xdev,
1717 struct device_node *node)
1719 struct xvip_graph_entity *entity;
1720 struct device_node *remote;
1721 struct device_node *ep = NULL;
1724 dev_dbg(xdev->dev, "parsing node %pOF\n", node);
1727 ep = of_graph_get_next_endpoint(node, ep);
1731 dev_dbg(xdev->dev, "handling endpoint %pOF %s\n",
1734 remote = of_graph_get_remote_port_parent(ep);
1739 dev_dbg(xdev->dev, "Remote endpoint %pOF %s\n",
1740 remote, remote->name);
1742 /* Skip entities that we have already processed. */
1743 if (remote == xdev->dev->of_node ||
1744 xvip_graph_find_entity(xdev, remote)) {
1745 of_node_put(remote);
1749 entity = devm_kzalloc(xdev->dev, sizeof(*entity), GFP_KERNEL);
1751 of_node_put(remote);
1756 entity->node = remote;
1757 entity->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
1758 entity->asd.match.fwnode = of_fwnode_handle(remote);
1759 list_add_tail(&entity->list, &xdev->entities);
1760 xdev->num_subdevs++;
1767 static int xvip_graph_parse(struct xvip_m2m_dev *xdev)
1769 struct xvip_graph_entity *entity;
1773 * Walk the links to parse the full graph. Start by parsing the
1774 * composite node and then parse entities in turn. The list_for_each
1775 * loop will handle entities added at the end of the list while walking
1778 ret = xvip_graph_parse_one(xdev, xdev->dev->of_node);
1782 list_for_each_entry(entity, &xdev->entities, list) {
1783 ret = xvip_graph_parse_one(xdev, entity->node);
1791 static int xvip_graph_build_dma(struct xvip_m2m_dev *xdev)
1793 u32 link_flags = MEDIA_LNK_FL_ENABLED;
1794 struct device_node *node = xdev->dev->of_node;
1795 struct media_entity *source;
1796 struct media_entity *sink;
1797 struct media_pad *source_pad;
1798 struct media_pad *sink_pad;
1799 struct xvip_graph_entity *ent;
1800 struct v4l2_fwnode_link link;
1801 struct device_node *ep = NULL;
1802 struct device_node *next;
1803 struct xvip_m2m_dma *dma = xdev->dma;
1806 dev_dbg(xdev->dev, "creating links for DMA engines\n");
1809 /* Get the next endpoint and parse its link. */
1810 next = of_graph_get_next_endpoint(node, ep);
1816 dev_dbg(xdev->dev, "processing endpoint %pOF\n", ep);
1818 ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link);
1820 dev_err(xdev->dev, "failed to parse link for %pOF\n",
1825 dev_dbg(xdev->dev, "creating link for DMA engine %s\n",
1828 /* Find the remote entity. */
1829 ent = xvip_graph_find_entity(xdev,
1830 to_of_node(link.remote_node));
1832 dev_err(xdev->dev, "no entity found for %pOF\n",
1833 to_of_node(link.remote_node));
1834 v4l2_fwnode_put_link(&link);
1838 if (link.remote_port >= ent->entity->num_pads) {
1839 dev_err(xdev->dev, "invalid port number %u on %pOF\n",
1841 to_of_node(link.remote_node));
1842 v4l2_fwnode_put_link(&link);
1847 dev_dbg(xdev->dev, "Entity %s %s\n", ent->node->name,
1848 ent->node->full_name);
1849 dev_dbg(xdev->dev, "port number %u on %pOF\n",
1850 link.remote_port, to_of_node(link.remote_node));
1851 dev_dbg(xdev->dev, "local port number %u on %pOF\n",
1852 link.local_port, to_of_node(link.local_node));
1854 if (link.local_port == XVIP_PAD_SOURCE) {
1855 source = &dma->video.entity;
1856 source_pad = &dma->pads[XVIP_PAD_SOURCE];
1858 sink_pad = &sink->pads[XVIP_PAD_SINK];
1861 source = ent->entity;
1862 source_pad = &source->pads[XVIP_PAD_SOURCE];
1863 sink = &dma->video.entity;
1864 sink_pad = &dma->pads[XVIP_PAD_SINK];
1867 v4l2_fwnode_put_link(&link);
1869 /* Create the media link. */
1870 dev_dbg(xdev->dev, "creating %s:%u -> %s:%u link\n",
1871 source->name, source_pad->index,
1872 sink->name, sink_pad->index);
1874 ret = media_create_pad_link(source, source_pad->index,
1875 sink, sink_pad->index,
1879 "failed to create %s:%u -> %s:%u link\n",
1880 source->name, source_pad->index,
1881 sink->name, sink_pad->index);
1889 static int xvip_graph_notify_complete(struct v4l2_async_notifier *notifier)
1891 struct xvip_m2m_dev *xdev =
1892 container_of(notifier, struct xvip_m2m_dev, notifier);
1893 struct xvip_graph_entity *entity;
1896 dev_dbg(xdev->dev, "notify complete, all subdevs registered\n");
1898 /* Create links for every entity. */
1899 list_for_each_entry(entity, &xdev->entities, list) {
1900 ret = xvip_graph_build_one(xdev, entity);
1905 /* Create links for DMA channels. */
1906 ret = xvip_graph_build_dma(xdev);
1910 ret = v4l2_device_register_subdev_nodes(&xdev->v4l2_dev);
1912 dev_err(xdev->dev, "failed to register subdev nodes\n");
1914 return media_device_register(&xdev->media_dev);
1917 static int xvip_graph_notify_bound(struct v4l2_async_notifier *notifier,
1918 struct v4l2_subdev *subdev,
1919 struct v4l2_async_subdev *asd)
1921 struct xvip_m2m_dev *xdev =
1922 container_of(notifier, struct xvip_m2m_dev, notifier);
1923 struct xvip_graph_entity *entity;
1925 /* Locate the entity corresponding to the bound subdev and store the
1928 list_for_each_entry(entity, &xdev->entities, list) {
1929 if (entity->node != subdev->dev->of_node)
1932 if (entity->subdev) {
1933 dev_err(xdev->dev, "duplicate subdev for node %pOF\n",
1938 dev_dbg(xdev->dev, "subdev %s bound\n", subdev->name);
1939 entity->entity = &subdev->entity;
1940 entity->subdev = subdev;
1944 dev_err(xdev->dev, "no entity for subdev %s\n", subdev->name);
1948 static const struct v4l2_async_notifier_operations xvip_graph_notify_ops = {
1949 .bound = xvip_graph_notify_bound,
1950 .complete = xvip_graph_notify_complete,
1953 static void xvip_graph_cleanup(struct xvip_m2m_dev *xdev)
1955 struct xvip_graph_entity *entityp;
1956 struct xvip_graph_entity *entity;
1958 v4l2_async_notifier_unregister(&xdev->notifier);
1960 list_for_each_entry_safe(entity, entityp, &xdev->entities, list) {
1961 of_node_put(entity->node);
1962 list_del(&entity->list);
1966 static int xvip_graph_init(struct xvip_m2m_dev *xdev)
1968 struct xvip_graph_entity *entity;
1969 struct v4l2_async_subdev **subdevs = NULL;
1970 unsigned int num_subdevs;
1974 /* Init the DMA channels. */
1975 ret = xvip_m2m_dma_alloc_init(xdev);
1977 dev_err(xdev->dev, "DMA initialization failed\n");
1981 /* Parse the graph to extract a list of subdevice DT nodes. */
1982 ret = xvip_graph_parse(xdev);
1984 dev_err(xdev->dev, "graph parsing failed\n");
1987 dev_dbg(xdev->dev, "Number of subdev = %d\n", xdev->num_subdevs);
1989 if (!xdev->num_subdevs) {
1990 dev_err(xdev->dev, "no subdev found in graph\n");
1994 /* Register the subdevices notifier. */
1995 num_subdevs = xdev->num_subdevs;
1996 subdevs = devm_kzalloc(xdev->dev, sizeof(*subdevs) * num_subdevs,
2004 list_for_each_entry(entity, &xdev->entities, list)
2005 subdevs[i++] = &entity->asd;
2007 xdev->notifier.subdevs = subdevs;
2008 xdev->notifier.num_subdevs = num_subdevs;
2009 xdev->notifier.ops = &xvip_graph_notify_ops;
2011 ret = v4l2_async_notifier_register(&xdev->v4l2_dev, &xdev->notifier);
2013 dev_err(xdev->dev, "notifier registration failed\n");
2021 xvip_graph_cleanup(xdev);
2026 static int xvip_composite_remove(struct platform_device *pdev)
2028 struct xvip_m2m_dev *xdev = platform_get_drvdata(pdev);
2030 xvip_graph_cleanup(xdev);
2031 xvip_composite_v4l2_cleanup(xdev);
2036 static int xvip_m2m_probe(struct platform_device *pdev)
2038 struct xvip_m2m_dev *xdev = NULL;
2041 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2045 xdev->dev = &pdev->dev;
2046 INIT_LIST_HEAD(&xdev->entities);
2048 ret = xvip_composite_v4l2_init(xdev);
2052 ret = xvip_graph_init(xdev);
2056 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
2058 dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret);
2062 platform_set_drvdata(pdev, xdev);
2064 xdev->m2m_dev = v4l2_m2m_init(&xvip_m2m_ops);
2065 if (IS_ERR(xdev->m2m_dev)) {
2066 dev_err(xdev->dev, "Failed to init mem2mem device\n");
2067 ret = PTR_ERR(xdev->m2m_dev);
2071 dev_info(xdev->dev, "mem2mem device registered\n");
2075 xvip_m2m_dma_deinit(xdev->dma);
2078 v4l2_device_unregister(&xdev->v4l2_dev);
2082 static int xvip_m2m_remove(struct platform_device *pdev)
2084 xvip_composite_remove(pdev);
2088 static const struct of_device_id xvip_m2m_of_id_table[] = {
2089 { .compatible = "xlnx,mem2mem" },
2092 MODULE_DEVICE_TABLE(of, xvip_m2m_of_id_table);
2094 static struct platform_driver xvip_m2m_driver = {
2096 .name = XVIP_M2M_NAME,
2097 .of_match_table = xvip_m2m_of_id_table,
2099 .probe = xvip_m2m_probe,
2100 .remove = xvip_m2m_remove,
2103 module_platform_driver(xvip_m2m_driver);
2105 MODULE_AUTHOR("Xilinx Inc.");
2106 MODULE_DESCRIPTION("Xilinx V4L2 mem2mem driver");
2107 MODULE_LICENSE("GPL v2");