4 * Copyright (C) 2013-2015 Ideas on Board
5 * Copyright (C) 2013-2015 Xilinx, Inc.
7 * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
8 * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/dma/xilinx_dma.h>
16 #include <linux/dma/xilinx_frmbuf.h>
17 #include <linux/lcm.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
21 #include <linux/slab.h>
23 #include <media/v4l2-dev.h>
24 #include <media/v4l2-fh.h>
25 #include <media/v4l2-ioctl.h>
26 #include <media/videobuf2-v4l2.h>
27 #include <media/videobuf2-dma-contig.h>
29 #include "xilinx-dma.h"
30 #include "xilinx-vip.h"
31 #include "xilinx-vipp.h"
33 #define XVIP_DMA_DEF_FORMAT V4L2_PIX_FMT_YUYV
34 #define XVIP_DMA_DEF_WIDTH 1920
35 #define XVIP_DMA_DEF_HEIGHT 1080
37 /* Minimum and maximum widths are expressed in bytes */
38 #define XVIP_DMA_MIN_WIDTH 1U
39 #define XVIP_DMA_MAX_WIDTH 65535U
40 #define XVIP_DMA_MIN_HEIGHT 1U
41 #define XVIP_DMA_MAX_HEIGHT 8191U
43 /* -----------------------------------------------------------------------------
47 static struct v4l2_subdev *
48 xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
50 struct media_pad *remote;
52 remote = media_entity_remote_pad(local);
53 if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
59 return media_entity_to_v4l2_subdev(remote->entity);
62 static int xvip_dma_verify_format(struct xvip_dma *dma)
64 struct v4l2_subdev_format fmt;
65 struct v4l2_subdev *subdev;
69 subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
73 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
74 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
76 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
78 if (dma->fmtinfo->code != fmt.format.code)
81 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
82 width = dma->format.fmt.pix_mp.width;
83 height = dma->format.fmt.pix_mp.height;
85 width = dma->format.fmt.pix.width;
86 height = dma->format.fmt.pix.height;
89 if (width != fmt.format.width || height != fmt.format.height)
95 /* -----------------------------------------------------------------------------
96 * Pipeline Stream Management
100 * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
101 * @xdev: Composite video device
103 * @start: Start (when true) or stop (when false) the pipeline
105 * Walk the entities chain starting @dma and start or stop all of them
107 * Return: 0 if successful, or the return value of the failed video::s_stream
108 * operation otherwise.
110 static int xvip_pipeline_start_stop(struct xvip_composite_device *xdev,
111 struct xvip_dma *dma, bool start)
113 struct media_graph graph;
114 struct media_entity *entity = &dma->video.entity;
115 struct media_device *mdev = entity->graph_obj.mdev;
116 struct v4l2_subdev *subdev;
120 mutex_lock(&mdev->graph_mutex);
122 /* Walk the graph to locate the subdev nodes */
123 ret = media_graph_walk_init(&graph, mdev);
127 media_graph_walk_start(&graph, entity);
129 while ((entity = media_graph_walk_next(&graph))) {
130 /* We want to stream on/off only subdevs */
131 if (!is_media_entity_v4l2_subdev(entity))
134 subdev = media_entity_to_v4l2_subdev(entity);
136 /* This is to maintain list of stream on/off devices */
137 is_streaming = xvip_subdev_set_streaming(xdev, subdev, start);
140 * start or stop the subdev only once in case if they are
141 * shared between sub-graphs
143 if (start && !is_streaming) {
144 /* power-on subdevice */
145 ret = v4l2_subdev_call(subdev, core, s_power, 1);
146 if (ret < 0 && ret != -ENOIOCTLCMD) {
148 "s_power on failed on subdev\n");
149 xvip_subdev_set_streaming(xdev, subdev, 0);
153 /* stream-on subdevice */
154 ret = v4l2_subdev_call(subdev, video, s_stream, 1);
155 if (ret < 0 && ret != -ENOIOCTLCMD) {
157 "s_stream on failed on subdev\n");
158 v4l2_subdev_call(subdev, core, s_power, 0);
159 xvip_subdev_set_streaming(xdev, subdev, 0);
161 } else if (!start && is_streaming) {
162 /* stream-off subdevice */
163 ret = v4l2_subdev_call(subdev, video, s_stream, 0);
164 if (ret < 0 && ret != -ENOIOCTLCMD) {
166 "s_stream off failed on subdev\n");
167 xvip_subdev_set_streaming(xdev, subdev, 1);
170 /* power-off subdevice */
171 ret = v4l2_subdev_call(subdev, core, s_power, 0);
172 if (ret < 0 && ret != -ENOIOCTLCMD)
174 "s_power off failed on subdev\n");
180 mutex_unlock(&mdev->graph_mutex);
181 media_graph_walk_cleanup(&graph);
186 * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
187 * @pipe: The pipeline
188 * @on: Turn the stream on when true or off when false
190 * The pipeline is shared between all DMA engines connect at its input and
191 * output. While the stream state of DMA engines can be controlled
192 * independently, pipelines have a shared stream state that enable or disable
193 * all entities in the pipeline. For this reason the pipeline uses a streaming
194 * counter that tracks the number of DMA engines that have requested the stream
195 * to be enabled. This will walk the graph starting from each DMA and enable or
196 * disable the entities in the path.
198 * When called with the @on argument set to true, this function will increment
199 * the pipeline streaming count. If the streaming count reaches the number of
200 * DMA engines in the pipeline it will enable all entities that belong to the
203 * Similarly, when called with the @on argument set to false, this function will
204 * decrement the pipeline streaming count and disable all entities in the
205 * pipeline when the streaming count reaches zero.
207 * Return: 0 if successful, or the return value of the failed video::s_stream
208 * operation otherwise. Stopping the pipeline never fails. The pipeline state is
209 * not updated when the operation fails.
211 static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
213 struct xvip_composite_device *xdev;
214 struct xvip_dma *dma;
217 mutex_lock(&pipe->lock);
221 if (pipe->stream_count == pipe->num_dmas - 1) {
223 * This will iterate the DMAs and the stream-on of
224 * subdevs may not be sequential due to multiple
227 list_for_each_entry(dma, &xdev->dmas, list) {
228 ret = xvip_pipeline_start_stop(xdev, dma, true);
233 pipe->stream_count++;
235 if (--pipe->stream_count == 0)
236 list_for_each_entry(dma, &xdev->dmas, list)
237 xvip_pipeline_start_stop(xdev, dma, false);
241 mutex_unlock(&pipe->lock);
245 static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
246 struct xvip_dma *start)
248 struct media_graph graph;
249 struct media_entity *entity = &start->video.entity;
250 struct media_device *mdev = entity->graph_obj.mdev;
251 unsigned int num_inputs = 0;
252 unsigned int num_outputs = 0;
255 mutex_lock(&mdev->graph_mutex);
257 /* Walk the graph to locate the video nodes. */
258 ret = media_graph_walk_init(&graph, mdev);
260 mutex_unlock(&mdev->graph_mutex);
264 media_graph_walk_start(&graph, entity);
266 while ((entity = media_graph_walk_next(&graph))) {
267 struct xvip_dma *dma;
269 if (entity->function != MEDIA_ENT_F_IO_V4L)
272 dma = to_xvip_dma(media_entity_to_video_device(entity));
274 if (dma->pad.flags & MEDIA_PAD_FL_SINK) {
281 mutex_unlock(&mdev->graph_mutex);
283 media_graph_walk_cleanup(&graph);
285 /* We need at least one DMA to proceed */
286 if (num_outputs == 0 && num_inputs == 0)
289 pipe->num_dmas = num_inputs + num_outputs;
290 pipe->xdev = start->xdev;
295 static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
301 * xvip_pipeline_cleanup - Cleanup the pipeline after streaming
302 * @pipe: the pipeline
304 * Decrease the pipeline use count and clean it up if we were the last user.
306 static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
308 mutex_lock(&pipe->lock);
310 /* If we're the last user clean up the pipeline. */
311 if (--pipe->use_count == 0)
312 __xvip_pipeline_cleanup(pipe);
314 mutex_unlock(&pipe->lock);
318 * xvip_pipeline_prepare - Prepare the pipeline for streaming
319 * @pipe: the pipeline
320 * @dma: DMA engine at one end of the pipeline
322 * Validate the pipeline if no user exists yet, otherwise just increase the use
325 * Return: 0 if successful or -EPIPE if the pipeline is not valid.
327 static int xvip_pipeline_prepare(struct xvip_pipeline *pipe,
328 struct xvip_dma *dma)
332 mutex_lock(&pipe->lock);
334 /* If we're the first user validate and initialize the pipeline. */
335 if (pipe->use_count == 0) {
336 ret = xvip_pipeline_validate(pipe, dma);
338 __xvip_pipeline_cleanup(pipe);
347 mutex_unlock(&pipe->lock);
351 /* -----------------------------------------------------------------------------
352 * videobuf2 queue operations
356 * struct xvip_dma_buffer - Video DMA buffer
357 * @buf: vb2 buffer base object
358 * @queue: buffer list entry in the DMA engine queued buffers list
359 * @dma: DMA channel that uses the buffer
360 * @desc: Descriptor associated with this structure
362 struct xvip_dma_buffer {
363 struct vb2_v4l2_buffer buf;
364 struct list_head queue;
365 struct xvip_dma *dma;
366 struct dma_async_tx_descriptor *desc;
369 #define to_xvip_dma_buffer(vb) container_of(vb, struct xvip_dma_buffer, buf)
371 static void xvip_dma_complete(void *param)
373 struct xvip_dma_buffer *buf = param;
374 struct xvip_dma *dma = buf->dma;
379 spin_lock(&dma->queued_lock);
380 list_del(&buf->queue);
381 spin_unlock(&dma->queued_lock);
383 buf->buf.field = V4L2_FIELD_NONE;
384 buf->buf.sequence = dma->sequence++;
385 buf->buf.vb2_buf.timestamp = ktime_get_ns();
387 status = xilinx_xdma_get_fid(dma->dma, buf->desc, &fid);
389 if (((V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) &&
390 dma->format.fmt.pix_mp.field == V4L2_FIELD_ALTERNATE) ||
391 dma->format.fmt.pix.field == V4L2_FIELD_ALTERNATE) {
393 * fid = 1 is odd field i.e. V4L2_FIELD_TOP.
394 * fid = 0 is even field i.e. V4L2_FIELD_BOTTOM.
396 buf->buf.field = fid ?
397 V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
399 if (fid == dma->prev_fid)
400 buf->buf.sequence = dma->sequence++;
402 buf->buf.sequence >>= 1;
407 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
408 for (i = 0; i < dma->fmtinfo->buffers; i++) {
410 dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
411 vb2_set_plane_payload(&buf->buf.vb2_buf, i, sizeimage);
414 sizeimage = dma->format.fmt.pix.sizeimage;
415 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, sizeimage);
418 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
422 xvip_dma_queue_setup(struct vb2_queue *vq,
423 unsigned int *nbuffers, unsigned int *nplanes,
424 unsigned int sizes[], struct device *alloc_devs[])
426 struct xvip_dma *dma = vb2_get_drv_priv(vq);
430 /* Multi planar case: Make sure the image size is large enough */
431 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
433 if (*nplanes != dma->format.fmt.pix_mp.num_planes)
436 for (i = 0; i < *nplanes; i++) {
438 dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
439 if (sizes[i] < sizeimage)
443 *nplanes = dma->fmtinfo->buffers;
444 for (i = 0; i < dma->fmtinfo->buffers; i++) {
446 dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
447 sizes[i] = sizeimage;
453 /* Single planar case: Make sure the image size is large enough */
454 sizeimage = dma->format.fmt.pix.sizeimage;
456 return sizes[0] < sizeimage ? -EINVAL : 0;
459 sizes[0] = sizeimage;
464 static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
466 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
467 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
468 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
475 static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
477 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
478 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
479 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
480 struct dma_async_tx_descriptor *desc;
481 dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
484 u32 padding_factor_nume, padding_factor_deno, bpl_nume, bpl_deno;
487 if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
488 dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
489 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
490 dma->xt.dir = DMA_DEV_TO_MEM;
491 dma->xt.src_sgl = false;
492 dma->xt.dst_sgl = true;
493 dma->xt.dst_start = addr;
494 } else if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
495 dma->queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
496 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
497 dma->xt.dir = DMA_MEM_TO_DEV;
498 dma->xt.src_sgl = true;
499 dma->xt.dst_sgl = false;
500 dma->xt.src_start = addr;
504 * DMA IP supports only 2 planes, so one datachunk is sufficient
505 * to get start address of 2nd plane
507 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
508 struct v4l2_pix_format_mplane *pix_mp;
510 pix_mp = &dma->format.fmt.pix_mp;
511 xilinx_xdma_v4l2_config(dma->dma, pix_mp->pixelformat);
512 xvip_width_padding_factor(pix_mp->pixelformat,
513 &padding_factor_nume,
514 &padding_factor_deno);
515 xvip_bpl_scaling_factor(pix_mp->pixelformat, &bpl_nume,
517 dma->xt.frame_size = dma->fmtinfo->num_planes;
518 dma->sgl[0].size = (pix_mp->width * dma->fmtinfo->bpl_factor *
519 padding_factor_nume * bpl_nume) /
520 (padding_factor_deno * bpl_deno);
521 dma->sgl[0].icg = pix_mp->plane_fmt[0].bytesperline -
523 dma->xt.numf = pix_mp->height;
526 * dst_icg is the number of bytes to jump after last luma addr
527 * and before first chroma addr
530 /* Handling contiguous data with mplanes */
531 if (dma->fmtinfo->buffers == 1) {
532 dma->sgl[0].dst_icg = 0;
534 /* Handling non-contiguous data with mplanes */
535 if (dma->fmtinfo->buffers == 2) {
536 dma_addr_t chroma_addr =
537 vb2_dma_contig_plane_dma_addr(vb, 1);
538 luma_size = pix_mp->plane_fmt[0].bytesperline *
540 if (chroma_addr > addr)
541 dma->sgl[0].dst_icg = chroma_addr -
546 struct v4l2_pix_format *pix;
548 pix = &dma->format.fmt.pix;
549 xilinx_xdma_v4l2_config(dma->dma, pix->pixelformat);
550 xvip_width_padding_factor(pix->pixelformat,
551 &padding_factor_nume,
552 &padding_factor_deno);
553 xvip_bpl_scaling_factor(pix->pixelformat, &bpl_nume,
555 dma->xt.frame_size = dma->fmtinfo->num_planes;
556 dma->sgl[0].size = (pix->width * dma->fmtinfo->bpl_factor *
557 padding_factor_nume * bpl_nume) /
558 (padding_factor_deno * bpl_deno);
559 dma->sgl[0].icg = pix->bytesperline - dma->sgl[0].size;
560 dma->xt.numf = pix->height;
561 dma->sgl[0].dst_icg = 0;
564 desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
566 dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
567 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
570 desc->callback = xvip_dma_complete;
571 desc->callback_param = buf;
574 if (buf->buf.field == V4L2_FIELD_TOP)
576 else if (buf->buf.field == V4L2_FIELD_BOTTOM)
578 else if (buf->buf.field == V4L2_FIELD_NONE)
581 xilinx_xdma_set_fid(dma->dma, desc, fid);
583 spin_lock_irq(&dma->queued_lock);
584 list_add_tail(&buf->queue, &dma->queued_bufs);
585 spin_unlock_irq(&dma->queued_lock);
587 dmaengine_submit(desc);
589 if (vb2_is_streaming(&dma->queue))
590 dma_async_issue_pending(dma->dma);
593 static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
595 struct xvip_dma *dma = vb2_get_drv_priv(vq);
596 struct xvip_dma_buffer *buf, *nbuf;
597 struct xvip_pipeline *pipe;
604 * Start streaming on the pipeline. No link touching an entity in the
605 * pipeline can be activated or deactivated once streaming is started.
607 * Use the pipeline object embedded in the first DMA object that starts
610 mutex_lock(&dma->xdev->lock);
611 pipe = dma->video.entity.pipe
612 ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
614 ret = media_pipeline_start(&dma->video.entity, &pipe->pipe);
615 mutex_unlock(&dma->xdev->lock);
619 /* Verify that the configured format matches the output of the
622 ret = xvip_dma_verify_format(dma);
626 ret = xvip_pipeline_prepare(pipe, dma);
630 /* Start the DMA engine. This must be done before starting the blocks
631 * in the pipeline to avoid DMA synchronization issues.
633 dma_async_issue_pending(dma->dma);
635 /* Start the pipeline. */
636 ret = xvip_pipeline_set_stream(pipe, true);
643 media_pipeline_stop(&dma->video.entity);
646 dmaengine_terminate_all(dma->dma);
647 /* Give back all queued buffers to videobuf2. */
648 spin_lock_irq(&dma->queued_lock);
649 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
650 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_QUEUED);
651 list_del(&buf->queue);
653 spin_unlock_irq(&dma->queued_lock);
658 static void xvip_dma_stop_streaming(struct vb2_queue *vq)
660 struct xvip_dma *dma = vb2_get_drv_priv(vq);
661 struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity);
662 struct xvip_dma_buffer *buf, *nbuf;
664 /* Stop the pipeline. */
665 xvip_pipeline_set_stream(pipe, false);
667 /* Stop and reset the DMA engine. */
668 dmaengine_terminate_all(dma->dma);
670 /* Cleanup the pipeline and mark it as being stopped. */
671 xvip_pipeline_cleanup(pipe);
672 media_pipeline_stop(&dma->video.entity);
674 /* Give back all queued buffers to videobuf2. */
675 spin_lock_irq(&dma->queued_lock);
676 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
677 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
678 list_del(&buf->queue);
680 spin_unlock_irq(&dma->queued_lock);
683 static const struct vb2_ops xvip_dma_queue_qops = {
684 .queue_setup = xvip_dma_queue_setup,
685 .buf_prepare = xvip_dma_buffer_prepare,
686 .buf_queue = xvip_dma_buffer_queue,
687 .wait_prepare = vb2_ops_wait_prepare,
688 .wait_finish = vb2_ops_wait_finish,
689 .start_streaming = xvip_dma_start_streaming,
690 .stop_streaming = xvip_dma_stop_streaming,
693 /* -----------------------------------------------------------------------------
698 xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
700 struct v4l2_fh *vfh = file->private_data;
701 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
703 cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
704 | dma->xdev->v4l2_caps;
706 cap->device_caps = V4L2_CAP_STREAMING;
707 switch (dma->queue.type) {
708 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
709 cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE_MPLANE;
711 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
712 cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
714 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
715 cap->device_caps |= V4L2_CAP_VIDEO_OUTPUT_MPLANE;
717 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
718 cap->device_caps |= V4L2_CAP_VIDEO_OUTPUT;
722 strlcpy(cap->driver, "xilinx-vipp", sizeof(cap->driver));
723 strlcpy(cap->card, dma->video.name, sizeof(cap->card));
724 snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s:%u",
725 dma->xdev->dev->of_node->name, dma->port);
730 static int xvip_xdma_enum_fmt(struct xvip_dma *dma, struct v4l2_fmtdesc *f,
731 struct v4l2_subdev_format *v4l_fmt)
733 const struct xvip_video_format *fmt;
735 u32 i, fmt_cnt, *fmts;
737 ret = xilinx_xdma_get_v4l2_vid_fmts(dma->dma, &fmt_cnt, &fmts);
741 /* Has media pad value changed? */
742 if (v4l_fmt->format.code != dma->remote_subdev_med_bus ||
743 !dma->remote_subdev_med_bus) {
744 /* Re-generate legal list of fourcc codes */
745 dma->poss_v4l2_fmt_cnt = 0;
746 dma->remote_subdev_med_bus = v4l_fmt->format.code;
748 if (!dma->poss_v4l2_fmts) {
749 dma->poss_v4l2_fmts =
750 devm_kzalloc(&dma->video.dev,
751 sizeof(u32) * fmt_cnt,
753 if (!dma->poss_v4l2_fmts)
757 for (i = 0; i < fmt_cnt; i++) {
758 fmt = xvip_get_format_by_fourcc(fmts[i]);
762 if (fmt->code != dma->remote_subdev_med_bus)
765 dma->poss_v4l2_fmts[dma->poss_v4l2_fmt_cnt++] = fmts[i];
769 /* Return err if index is greater than count of legal values */
770 if (f->index >= dma->poss_v4l2_fmt_cnt)
773 /* Else return pix format in table */
774 fmt = xvip_get_format_by_fourcc(dma->poss_v4l2_fmts[f->index]);
778 f->pixelformat = fmt->fourcc;
779 strlcpy(f->description, fmt->description,
780 sizeof(f->description));
785 /* FIXME: without this callback function, some applications are not configured
786 * with correct formats, and it results in frames in wrong format. Whether this
787 * callback needs to be required is not clearly defined, so it should be
788 * clarified through the mailing list.
791 xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
793 struct v4l2_fh *vfh = file->private_data;
794 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
795 struct v4l2_subdev *subdev;
796 struct v4l2_subdev_format v4l_fmt;
797 const struct xvip_video_format *fmt;
800 /* Establish media pad format */
801 subdev = xvip_dma_remote_subdev(&dma->pad, &v4l_fmt.pad);
805 v4l_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
806 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &v4l_fmt);
808 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
811 * In case of frmbuf DMA, this will invoke frambuf driver specific APIs
812 * to enumerate formats otherwise return the pix format corresponding
813 * to subdev's media bus format. This kind of separation would be
814 * helpful for clean up and upstreaming.
816 err = xvip_xdma_enum_fmt(dma, f, &v4l_fmt);
821 * This logic will just return one pix format based on subdev's
827 fmt = xvip_get_format_by_code(v4l_fmt.format.code);
831 f->pixelformat = fmt->fourcc;
832 strlcpy(f->description, fmt->description,
833 sizeof(f->description));
839 xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format)
841 struct v4l2_fh *vfh = file->private_data;
842 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
844 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
845 format->fmt.pix_mp = dma->format.fmt.pix_mp;
847 format->fmt.pix = dma->format.fmt.pix;
853 __xvip_dma_try_format(struct xvip_dma *dma,
854 struct v4l2_format *format,
855 const struct xvip_video_format **fmtinfo)
857 const struct xvip_video_format *info;
858 unsigned int min_width;
859 unsigned int max_width;
860 unsigned int min_bpl;
861 unsigned int max_bpl;
865 unsigned int i, hsub, vsub, plane_width, plane_height;
867 unsigned int padding_factor_nume, padding_factor_deno;
868 unsigned int bpl_nume, bpl_deno;
869 struct v4l2_subdev_format fmt;
870 struct v4l2_subdev *subdev;
873 subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
877 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
878 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
882 if (fmt.format.field == V4L2_FIELD_ALTERNATE) {
883 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
884 dma->format.fmt.pix_mp.field = V4L2_FIELD_ALTERNATE;
886 dma->format.fmt.pix.field = V4L2_FIELD_ALTERNATE;
888 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
889 dma->format.fmt.pix_mp.field = V4L2_FIELD_NONE;
891 dma->format.fmt.pix.field = V4L2_FIELD_NONE;
894 /* Retrieve format information and select the default format if the
895 * requested format isn't supported.
897 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
898 fourcc = format->fmt.pix_mp.pixelformat;
900 fourcc = format->fmt.pix.pixelformat;
902 info = xvip_get_format_by_fourcc(fourcc);
905 info = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
907 xvip_width_padding_factor(info->fourcc, &padding_factor_nume,
908 &padding_factor_deno);
909 xvip_bpl_scaling_factor(info->fourcc, &bpl_nume, &bpl_deno);
911 /* The transfer alignment requirements are expressed in bytes. Compute
912 * the minimum and maximum values, clamp the requested width and convert
915 align = lcm(dma->align, info->bpp >> 3);
916 min_width = roundup(XVIP_DMA_MIN_WIDTH, align);
917 max_width = rounddown(XVIP_DMA_MAX_WIDTH, align);
919 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
920 struct v4l2_pix_format_mplane *pix_mp;
921 struct v4l2_plane_pix_format *plane_fmt;
923 pix_mp = &format->fmt.pix_mp;
924 plane_fmt = pix_mp->plane_fmt;
925 pix_mp->field = dma->format.fmt.pix_mp.field;
926 width = rounddown(pix_mp->width * info->bpl_factor, align);
927 pix_mp->width = clamp(width, min_width, max_width) /
929 pix_mp->height = clamp(pix_mp->height, XVIP_DMA_MIN_HEIGHT,
930 XVIP_DMA_MAX_HEIGHT);
933 * Clamp the requested bytes per line value. If the maximum
934 * bytes per line value is zero, the module doesn't support
935 * user configurable line sizes. Override the requested value
936 * with the minimum in that case.
939 max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
941 /* Handling contiguous data with mplanes */
942 if (info->buffers == 1) {
943 min_bpl = (pix_mp->width * info->bpl_factor *
944 padding_factor_nume * bpl_nume) /
945 (padding_factor_deno * bpl_deno);
946 min_bpl = roundup(min_bpl, dma->align);
947 bpl = roundup(plane_fmt[0].bytesperline, dma->align);
948 plane_fmt[0].bytesperline = clamp(bpl, min_bpl,
951 if (info->num_planes == 1) {
952 /* Single plane formats */
953 plane_fmt[0].sizeimage =
954 plane_fmt[0].bytesperline *
957 /* Multi plane formats */
958 plane_fmt[0].sizeimage =
959 DIV_ROUND_UP(plane_fmt[0].bytesperline *
964 /* Handling non-contiguous data with mplanes */
967 for (i = 0; i < info->num_planes; i++) {
968 plane_width = pix_mp->width / (i ? hsub : 1);
969 plane_height = pix_mp->height / (i ? vsub : 1);
970 min_bpl = (plane_width * info->bpl_factor *
971 padding_factor_nume * bpl_nume) /
972 (padding_factor_deno * bpl_deno);
973 min_bpl = roundup(min_bpl, dma->align);
974 bpl = rounddown(plane_fmt[i].bytesperline,
976 plane_fmt[i].bytesperline =
977 clamp(bpl, min_bpl, max_bpl);
978 plane_fmt[i].sizeimage =
979 plane_fmt[i].bytesperline *
984 struct v4l2_pix_format *pix;
986 pix = &format->fmt.pix;
987 pix->field = dma->format.fmt.pix.field;
988 width = rounddown(pix->width * info->bpl_factor, align);
989 pix->width = clamp(width, min_width, max_width) /
991 pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
992 XVIP_DMA_MAX_HEIGHT);
994 min_bpl = (pix->width * info->bpl_factor *
995 padding_factor_nume * bpl_nume) /
996 (padding_factor_deno * bpl_deno);
997 min_bpl = roundup(min_bpl, dma->align);
998 max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
999 bpl = rounddown(pix->bytesperline, dma->align);
1000 pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
1001 pix->sizeimage = pix->width * pix->height * info->bpp / 8;
1009 xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
1011 struct v4l2_fh *vfh = file->private_data;
1012 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
1014 __xvip_dma_try_format(dma, format, NULL);
1019 xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
1021 struct v4l2_fh *vfh = file->private_data;
1022 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
1023 const struct xvip_video_format *info;
1025 __xvip_dma_try_format(dma, format, &info);
1027 if (vb2_is_busy(&dma->queue))
1030 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
1031 dma->format.fmt.pix_mp = format->fmt.pix_mp;
1033 dma->format.fmt.pix = format->fmt.pix;
1035 dma->fmtinfo = info;
1040 static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
1041 .vidioc_querycap = xvip_dma_querycap,
1042 .vidioc_enum_fmt_vid_cap = xvip_dma_enum_format,
1043 .vidioc_enum_fmt_vid_cap_mplane = xvip_dma_enum_format,
1044 .vidioc_enum_fmt_vid_out = xvip_dma_enum_format,
1045 .vidioc_enum_fmt_vid_out_mplane = xvip_dma_enum_format,
1046 .vidioc_g_fmt_vid_cap = xvip_dma_get_format,
1047 .vidioc_g_fmt_vid_cap_mplane = xvip_dma_get_format,
1048 .vidioc_g_fmt_vid_out = xvip_dma_get_format,
1049 .vidioc_g_fmt_vid_out_mplane = xvip_dma_get_format,
1050 .vidioc_s_fmt_vid_cap = xvip_dma_set_format,
1051 .vidioc_s_fmt_vid_cap_mplane = xvip_dma_set_format,
1052 .vidioc_s_fmt_vid_out = xvip_dma_set_format,
1053 .vidioc_s_fmt_vid_out_mplane = xvip_dma_set_format,
1054 .vidioc_try_fmt_vid_cap = xvip_dma_try_format,
1055 .vidioc_try_fmt_vid_cap_mplane = xvip_dma_try_format,
1056 .vidioc_try_fmt_vid_out = xvip_dma_try_format,
1057 .vidioc_try_fmt_vid_out_mplane = xvip_dma_try_format,
1058 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1059 .vidioc_querybuf = vb2_ioctl_querybuf,
1060 .vidioc_qbuf = vb2_ioctl_qbuf,
1061 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1062 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1063 .vidioc_expbuf = vb2_ioctl_expbuf,
1064 .vidioc_streamon = vb2_ioctl_streamon,
1065 .vidioc_streamoff = vb2_ioctl_streamoff,
1068 /* -----------------------------------------------------------------------------
1069 * V4L2 file operations
1072 static const struct v4l2_file_operations xvip_dma_fops = {
1073 .owner = THIS_MODULE,
1074 .unlocked_ioctl = video_ioctl2,
1075 .open = v4l2_fh_open,
1076 .release = vb2_fop_release,
1077 .poll = vb2_fop_poll,
1078 .mmap = vb2_fop_mmap,
1081 /* -----------------------------------------------------------------------------
1082 * Xilinx Video DMA Core
1085 int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
1086 enum v4l2_buf_type type, unsigned int port)
1090 u32 i, hsub, vsub, width, height;
1094 mutex_init(&dma->lock);
1095 mutex_init(&dma->pipe.lock);
1096 INIT_LIST_HEAD(&dma->queued_bufs);
1097 spin_lock_init(&dma->queued_lock);
1099 dma->fmtinfo = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
1100 dma->format.type = type;
1102 if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
1103 struct v4l2_pix_format_mplane *pix_mp;
1105 pix_mp = &dma->format.fmt.pix_mp;
1106 pix_mp->pixelformat = dma->fmtinfo->fourcc;
1107 pix_mp->colorspace = V4L2_COLORSPACE_SRGB;
1108 pix_mp->field = V4L2_FIELD_NONE;
1109 pix_mp->width = XVIP_DMA_DEF_WIDTH;
1111 /* Handling contiguous data with mplanes */
1112 if (dma->fmtinfo->buffers == 1) {
1113 pix_mp->plane_fmt[0].bytesperline =
1114 pix_mp->width * dma->fmtinfo->bpl_factor;
1115 pix_mp->plane_fmt[0].sizeimage =
1116 pix_mp->width * pix_mp->height *
1117 dma->fmtinfo->bpp / 8;
1119 /* Handling non-contiguous data with mplanes */
1120 hsub = dma->fmtinfo->hsub;
1121 vsub = dma->fmtinfo->vsub;
1122 for (i = 0; i < dma->fmtinfo->buffers; i++) {
1123 width = pix_mp->width / (i ? hsub : 1);
1124 height = pix_mp->height / (i ? vsub : 1);
1125 pix_mp->plane_fmt[i].bytesperline =
1126 width * dma->fmtinfo->bpl_factor;
1127 pix_mp->plane_fmt[i].sizeimage = width * height;
1131 struct v4l2_pix_format *pix;
1133 pix = &dma->format.fmt.pix;
1134 pix->pixelformat = dma->fmtinfo->fourcc;
1135 pix->colorspace = V4L2_COLORSPACE_SRGB;
1136 pix->field = V4L2_FIELD_NONE;
1137 pix->width = XVIP_DMA_DEF_WIDTH;
1138 pix->height = XVIP_DMA_DEF_HEIGHT;
1139 pix->bytesperline = pix->width * dma->fmtinfo->bpl_factor;
1141 pix->width * pix->height * dma->fmtinfo->bpp / 8;
1144 /* Initialize the media entity... */
1145 if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1146 type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1147 dma->pad.flags = MEDIA_PAD_FL_SINK;
1149 dma->pad.flags = MEDIA_PAD_FL_SOURCE;
1151 ret = media_entity_pads_init(&dma->video.entity, 1, &dma->pad);
1155 /* ... and the video node... */
1156 dma->video.fops = &xvip_dma_fops;
1157 dma->video.v4l2_dev = &xdev->v4l2_dev;
1158 dma->video.queue = &dma->queue;
1159 snprintf(dma->video.name, sizeof(dma->video.name), "%s %s %u",
1160 xdev->dev->of_node->name,
1161 (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1162 type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1163 ? "output" : "input",
1166 dma->video.vfl_type = VFL_TYPE_GRABBER;
1167 if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1168 type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1169 dma->video.vfl_dir = VFL_DIR_RX;
1171 dma->video.vfl_dir = VFL_DIR_TX;
1173 dma->video.release = video_device_release_empty;
1174 dma->video.ioctl_ops = &xvip_dma_ioctl_ops;
1175 dma->video.lock = &dma->lock;
1177 video_set_drvdata(&dma->video, dma);
1179 /* ... and the buffers queue... */
1180 /* Don't enable VB2_READ and VB2_WRITE, as using the read() and write()
1181 * V4L2 APIs would be inefficient. Testing on the command line with a
1182 * 'cat /dev/video?' thus won't be possible, but given that the driver
1183 * anyway requires a test tool to setup the pipeline before any video
1184 * stream can be started, requiring a specific V4L2 test tool as well
1185 * instead of 'cat' isn't really a drawback.
1187 dma->queue.type = type;
1188 dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
1189 dma->queue.lock = &dma->lock;
1190 dma->queue.drv_priv = dma;
1191 dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer);
1192 dma->queue.ops = &xvip_dma_queue_qops;
1193 dma->queue.mem_ops = &vb2_dma_contig_memops;
1194 dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
1195 | V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
1196 dma->queue.dev = dma->xdev->dev;
1197 ret = vb2_queue_init(&dma->queue);
1199 dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n");
1203 /* ... and the DMA channel. */
1204 snprintf(name, sizeof(name), "port%u", port);
1205 dma->dma = dma_request_chan(dma->xdev->dev, name);
1206 if (IS_ERR(dma->dma)) {
1207 ret = PTR_ERR(dma->dma);
1208 if (ret != -EPROBE_DEFER)
1209 dev_err(dma->xdev->dev,
1210 "No Video DMA channel found");
1214 dma->align = 1 << dma->dma->device->copy_align;
1216 ret = video_register_device(&dma->video, VFL_TYPE_GRABBER, -1);
1218 dev_err(dma->xdev->dev, "failed to register video device\n");
1225 xvip_dma_cleanup(dma);
1229 void xvip_dma_cleanup(struct xvip_dma *dma)
1231 if (video_is_registered(&dma->video))
1232 video_unregister_device(&dma->video);
1234 if (!IS_ERR(dma->dma))
1235 dma_release_channel(dma->dma);
1237 media_entity_cleanup(&dma->video.entity);
1239 mutex_destroy(&dma->lock);
1240 mutex_destroy(&dma->pipe.lock);