4 * Copyright (C) 2013-2015 Ideas on Board
5 * Copyright (C) 2013-2015 Xilinx, Inc.
7 * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
8 * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/dma/xilinx_dma.h>
16 #include <linux/dma/xilinx_frmbuf.h>
17 #include <linux/lcm.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
21 #include <linux/slab.h>
23 #include <media/v4l2-dev.h>
24 #include <media/v4l2-fh.h>
25 #include <media/v4l2-ioctl.h>
26 #include <media/videobuf2-v4l2.h>
27 #include <media/videobuf2-dma-contig.h>
29 #include "xilinx-dma.h"
30 #include "xilinx-vip.h"
31 #include "xilinx-vipp.h"
33 #define XVIP_DMA_DEF_FORMAT V4L2_PIX_FMT_YUYV
34 #define XVIP_DMA_DEF_WIDTH 1920
35 #define XVIP_DMA_DEF_HEIGHT 1080
37 /* Minimum and maximum widths are expressed in bytes */
38 #define XVIP_DMA_MIN_WIDTH 1U
39 #define XVIP_DMA_MAX_WIDTH 65535U
40 #define XVIP_DMA_MIN_HEIGHT 1U
41 #define XVIP_DMA_MAX_HEIGHT 8191U
43 /* -----------------------------------------------------------------------------
47 static struct v4l2_subdev *
48 xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
50 struct media_pad *remote;
52 remote = media_entity_remote_pad(local);
53 if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
59 return media_entity_to_v4l2_subdev(remote->entity);
62 static int xvip_dma_verify_format(struct xvip_dma *dma)
64 struct v4l2_subdev_format fmt;
65 struct v4l2_subdev *subdev;
69 subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
73 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
74 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
76 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
78 if (dma->fmtinfo->code != fmt.format.code)
81 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
82 width = dma->format.fmt.pix_mp.width;
83 height = dma->format.fmt.pix_mp.height;
85 width = dma->format.fmt.pix.width;
86 height = dma->format.fmt.pix.height;
89 if (width != fmt.format.width || height != fmt.format.height)
95 /* -----------------------------------------------------------------------------
96 * Pipeline Stream Management
99 /* Get the sink pad internally connected to a source pad in the given entity. */
100 static struct media_pad *xvip_get_entity_sink(struct media_entity *entity,
101 struct media_pad *source)
105 /* The source pad can be NULL when the entity has no source pad. Return
106 * the first pad in that case, guaranteed to be a sink pad.
109 return &entity->pads[0];
111 /* Iterates through the pads to find a connected sink pad. */
112 for (i = 0; i < entity->num_pads; ++i) {
113 struct media_pad *sink = &entity->pads[i];
115 if (!(sink->flags & MEDIA_PAD_FL_SINK))
121 if (media_entity_has_route(entity, sink->index, source->index))
129 * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
130 * @xdev: Composite video device
132 * @start: Start (when true) or stop (when false) the pipeline
134 * Walk the entities chain starting @dma and start or stop all of them
136 * Return: 0 if successful, or the return value of the failed video::s_stream
137 * operation otherwise.
139 static int xvip_pipeline_start_stop(struct xvip_composite_device *xdev,
140 struct xvip_dma *dma, bool start)
142 struct media_entity *entity;
143 struct media_pad *pad;
144 struct v4l2_subdev *subdev;
148 entity = &dma->video.entity;
152 pad = xvip_get_entity_sink(entity, pad);
156 if (!(pad->flags & MEDIA_PAD_FL_SINK))
159 pad = media_entity_remote_pad(pad);
160 if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
163 entity = pad->entity;
164 subdev = media_entity_to_v4l2_subdev(entity);
166 is_streaming = xvip_subdev_set_streaming(xdev, subdev, start);
169 * start or stop the subdev only once in case if they are
170 * shared between sub-graphs
172 if (start != is_streaming) {
173 ret = v4l2_subdev_call(subdev, video, s_stream,
175 if (start && ret < 0 && ret != -ENOIOCTLCMD) {
176 dev_err(xdev->dev, "s_stream is failed on subdev\n");
177 xvip_subdev_set_streaming(xdev, subdev, !start);
187 * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
188 * @pipe: The pipeline
189 * @on: Turn the stream on when true or off when false
191 * The pipeline is shared between all DMA engines connect at its input and
192 * output. While the stream state of DMA engines can be controlled
193 * independently, pipelines have a shared stream state that enable or disable
194 * all entities in the pipeline. For this reason the pipeline uses a streaming
195 * counter that tracks the number of DMA engines that have requested the stream
196 * to be enabled. This will walk the graph starting from each DMA and enable or
197 * disable the entities in the path.
199 * When called with the @on argument set to true, this function will increment
200 * the pipeline streaming count. If the streaming count reaches the number of
201 * DMA engines in the pipeline it will enable all entities that belong to the
204 * Similarly, when called with the @on argument set to false, this function will
205 * decrement the pipeline streaming count and disable all entities in the
206 * pipeline when the streaming count reaches zero.
208 * Return: 0 if successful, or the return value of the failed video::s_stream
209 * operation otherwise. Stopping the pipeline never fails. The pipeline state is
210 * not updated when the operation fails.
212 static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
214 struct xvip_composite_device *xdev;
215 struct xvip_dma *dma;
218 mutex_lock(&pipe->lock);
222 if (pipe->stream_count == pipe->num_dmas - 1) {
224 * This will iterate the DMAs and the stream-on of
225 * subdevs may not be sequential due to multiple
228 list_for_each_entry(dma, &xdev->dmas, list) {
229 ret = xvip_pipeline_start_stop(xdev, dma, true);
234 pipe->stream_count++;
236 if (--pipe->stream_count == 0)
237 list_for_each_entry(dma, &xdev->dmas, list)
238 xvip_pipeline_start_stop(xdev, dma, false);
242 mutex_unlock(&pipe->lock);
246 static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
247 struct xvip_dma *start)
249 struct media_graph graph;
250 struct media_entity *entity = &start->video.entity;
251 struct media_device *mdev = entity->graph_obj.mdev;
252 unsigned int num_inputs = 0;
253 unsigned int num_outputs = 0;
256 mutex_lock(&mdev->graph_mutex);
258 /* Walk the graph to locate the video nodes. */
259 ret = media_graph_walk_init(&graph, mdev);
261 mutex_unlock(&mdev->graph_mutex);
265 media_graph_walk_start(&graph, entity);
267 while ((entity = media_graph_walk_next(&graph))) {
268 struct xvip_dma *dma;
270 if (entity->function != MEDIA_ENT_F_IO_V4L)
273 dma = to_xvip_dma(media_entity_to_video_device(entity));
275 if (dma->pad.flags & MEDIA_PAD_FL_SINK) {
282 mutex_unlock(&mdev->graph_mutex);
284 media_graph_walk_cleanup(&graph);
286 /* We need at least one DMA to proceed */
287 if (num_outputs == 0 && num_inputs == 0)
290 pipe->num_dmas = num_inputs + num_outputs;
291 pipe->xdev = start->xdev;
296 static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
302 * xvip_pipeline_cleanup - Cleanup the pipeline after streaming
303 * @pipe: the pipeline
305 * Decrease the pipeline use count and clean it up if we were the last user.
307 static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
309 mutex_lock(&pipe->lock);
311 /* If we're the last user clean up the pipeline. */
312 if (--pipe->use_count == 0)
313 __xvip_pipeline_cleanup(pipe);
315 mutex_unlock(&pipe->lock);
319 * xvip_pipeline_prepare - Prepare the pipeline for streaming
320 * @pipe: the pipeline
321 * @dma: DMA engine at one end of the pipeline
323 * Validate the pipeline if no user exists yet, otherwise just increase the use
326 * Return: 0 if successful or -EPIPE if the pipeline is not valid.
328 static int xvip_pipeline_prepare(struct xvip_pipeline *pipe,
329 struct xvip_dma *dma)
333 mutex_lock(&pipe->lock);
335 /* If we're the first user validate and initialize the pipeline. */
336 if (pipe->use_count == 0) {
337 ret = xvip_pipeline_validate(pipe, dma);
339 __xvip_pipeline_cleanup(pipe);
348 mutex_unlock(&pipe->lock);
352 /* -----------------------------------------------------------------------------
353 * videobuf2 queue operations
357 * struct xvip_dma_buffer - Video DMA buffer
358 * @buf: vb2 buffer base object
359 * @queue: buffer list entry in the DMA engine queued buffers list
360 * @dma: DMA channel that uses the buffer
361 * @desc: Descriptor associated with this structure
363 struct xvip_dma_buffer {
364 struct vb2_v4l2_buffer buf;
365 struct list_head queue;
366 struct xvip_dma *dma;
367 struct dma_async_tx_descriptor *desc;
370 #define to_xvip_dma_buffer(vb) container_of(vb, struct xvip_dma_buffer, buf)
372 static void xvip_dma_complete(void *param)
374 struct xvip_dma_buffer *buf = param;
375 struct xvip_dma *dma = buf->dma;
380 spin_lock(&dma->queued_lock);
381 list_del(&buf->queue);
382 spin_unlock(&dma->queued_lock);
384 buf->buf.field = V4L2_FIELD_NONE;
385 buf->buf.sequence = dma->sequence++;
386 buf->buf.vb2_buf.timestamp = ktime_get_ns();
388 status = xilinx_xdma_get_fid(dma->dma, buf->desc, &fid);
390 if (((V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) &&
391 dma->format.fmt.pix_mp.field == V4L2_FIELD_ALTERNATE) ||
392 dma->format.fmt.pix.field == V4L2_FIELD_ALTERNATE) {
394 * fid = 1 is odd field i.e. V4L2_FIELD_TOP.
395 * fid = 0 is even field i.e. V4L2_FIELD_BOTTOM.
397 buf->buf.field = fid ?
398 V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
400 if (fid == dma->prev_fid)
401 buf->buf.sequence = dma->sequence++;
403 buf->buf.sequence >>= 1;
408 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
409 for (i = 0; i < dma->fmtinfo->buffers; i++) {
411 dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
412 vb2_set_plane_payload(&buf->buf.vb2_buf, i, sizeimage);
415 sizeimage = dma->format.fmt.pix.sizeimage;
416 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, sizeimage);
419 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
423 xvip_dma_queue_setup(struct vb2_queue *vq,
424 unsigned int *nbuffers, unsigned int *nplanes,
425 unsigned int sizes[], struct device *alloc_devs[])
427 struct xvip_dma *dma = vb2_get_drv_priv(vq);
431 /* Multi planar case: Make sure the image size is large enough */
432 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
434 if (*nplanes != dma->format.fmt.pix_mp.num_planes)
437 for (i = 0; i < *nplanes; i++) {
439 dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
440 if (sizes[i] < sizeimage)
444 *nplanes = dma->fmtinfo->buffers;
445 for (i = 0; i < dma->fmtinfo->buffers; i++) {
447 dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
448 sizes[i] = sizeimage;
454 /* Single planar case: Make sure the image size is large enough */
455 sizeimage = dma->format.fmt.pix.sizeimage;
457 return sizes[0] < sizeimage ? -EINVAL : 0;
460 sizes[0] = sizeimage;
465 static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
467 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
468 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
469 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
476 static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
478 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
479 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
480 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
481 struct dma_async_tx_descriptor *desc;
482 dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
485 u32 padding_factor_nume, padding_factor_deno, bpl_nume, bpl_deno;
488 if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
489 dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
490 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
491 dma->xt.dir = DMA_DEV_TO_MEM;
492 dma->xt.src_sgl = false;
493 dma->xt.dst_sgl = true;
494 dma->xt.dst_start = addr;
495 } else if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
496 dma->queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
497 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
498 dma->xt.dir = DMA_MEM_TO_DEV;
499 dma->xt.src_sgl = true;
500 dma->xt.dst_sgl = false;
501 dma->xt.src_start = addr;
505 * DMA IP supports only 2 planes, so one datachunk is sufficient
506 * to get start address of 2nd plane
508 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
509 struct v4l2_pix_format_mplane *pix_mp;
511 pix_mp = &dma->format.fmt.pix_mp;
512 xilinx_xdma_v4l2_config(dma->dma, pix_mp->pixelformat);
513 xvip_width_padding_factor(pix_mp->pixelformat,
514 &padding_factor_nume,
515 &padding_factor_deno);
516 xvip_bpl_scaling_factor(pix_mp->pixelformat, &bpl_nume,
518 dma->xt.frame_size = dma->fmtinfo->num_planes;
519 dma->sgl[0].size = (pix_mp->width * dma->fmtinfo->bpl_factor *
520 padding_factor_nume * bpl_nume) /
521 (padding_factor_deno * bpl_deno);
522 dma->sgl[0].icg = pix_mp->plane_fmt[0].bytesperline -
524 dma->xt.numf = pix_mp->height;
527 * dst_icg is the number of bytes to jump after last luma addr
528 * and before first chroma addr
531 /* Handling contiguous data with mplanes */
532 if (dma->fmtinfo->buffers == 1) {
533 dma->sgl[0].dst_icg = 0;
535 /* Handling non-contiguous data with mplanes */
536 if (dma->fmtinfo->buffers == 2) {
537 dma_addr_t chroma_addr =
538 vb2_dma_contig_plane_dma_addr(vb, 1);
539 luma_size = pix_mp->plane_fmt[0].bytesperline *
541 if (chroma_addr > addr)
542 dma->sgl[0].dst_icg = chroma_addr -
547 struct v4l2_pix_format *pix;
549 pix = &dma->format.fmt.pix;
550 xilinx_xdma_v4l2_config(dma->dma, pix->pixelformat);
551 xvip_width_padding_factor(pix->pixelformat,
552 &padding_factor_nume,
553 &padding_factor_deno);
554 xvip_bpl_scaling_factor(pix->pixelformat, &bpl_nume,
556 dma->xt.frame_size = dma->fmtinfo->num_planes;
557 dma->sgl[0].size = (pix->width * dma->fmtinfo->bpl_factor *
558 padding_factor_nume * bpl_nume) /
559 (padding_factor_deno * bpl_deno);
560 dma->sgl[0].icg = pix->bytesperline - dma->sgl[0].size;
561 dma->xt.numf = pix->height;
562 dma->sgl[0].dst_icg = 0;
565 desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
567 dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
568 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
571 desc->callback = xvip_dma_complete;
572 desc->callback_param = buf;
575 if (buf->buf.field == V4L2_FIELD_TOP)
577 else if (buf->buf.field == V4L2_FIELD_BOTTOM)
579 else if (buf->buf.field == V4L2_FIELD_NONE)
582 xilinx_xdma_set_fid(dma->dma, desc, fid);
584 spin_lock_irq(&dma->queued_lock);
585 list_add_tail(&buf->queue, &dma->queued_bufs);
586 spin_unlock_irq(&dma->queued_lock);
588 dmaengine_submit(desc);
590 if (vb2_is_streaming(&dma->queue))
591 dma_async_issue_pending(dma->dma);
594 static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
596 struct xvip_dma *dma = vb2_get_drv_priv(vq);
597 struct xvip_dma_buffer *buf, *nbuf;
598 struct xvip_pipeline *pipe;
605 * Start streaming on the pipeline. No link touching an entity in the
606 * pipeline can be activated or deactivated once streaming is started.
608 * Use the pipeline object embedded in the first DMA object that starts
611 pipe = dma->video.entity.pipe
612 ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
614 ret = media_pipeline_start(&dma->video.entity, &pipe->pipe);
618 /* Verify that the configured format matches the output of the
621 ret = xvip_dma_verify_format(dma);
625 ret = xvip_pipeline_prepare(pipe, dma);
629 /* Start the DMA engine. This must be done before starting the blocks
630 * in the pipeline to avoid DMA synchronization issues.
632 dma_async_issue_pending(dma->dma);
634 /* Start the pipeline. */
635 ret = xvip_pipeline_set_stream(pipe, true);
642 media_pipeline_stop(&dma->video.entity);
645 dmaengine_terminate_all(dma->dma);
646 /* Give back all queued buffers to videobuf2. */
647 spin_lock_irq(&dma->queued_lock);
648 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
649 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_QUEUED);
650 list_del(&buf->queue);
652 spin_unlock_irq(&dma->queued_lock);
657 static void xvip_dma_stop_streaming(struct vb2_queue *vq)
659 struct xvip_dma *dma = vb2_get_drv_priv(vq);
660 struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity);
661 struct xvip_dma_buffer *buf, *nbuf;
663 /* Stop the pipeline. */
664 xvip_pipeline_set_stream(pipe, false);
666 /* Stop and reset the DMA engine. */
667 dmaengine_terminate_all(dma->dma);
669 /* Cleanup the pipeline and mark it as being stopped. */
670 xvip_pipeline_cleanup(pipe);
671 media_pipeline_stop(&dma->video.entity);
673 /* Give back all queued buffers to videobuf2. */
674 spin_lock_irq(&dma->queued_lock);
675 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
676 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
677 list_del(&buf->queue);
679 spin_unlock_irq(&dma->queued_lock);
682 static const struct vb2_ops xvip_dma_queue_qops = {
683 .queue_setup = xvip_dma_queue_setup,
684 .buf_prepare = xvip_dma_buffer_prepare,
685 .buf_queue = xvip_dma_buffer_queue,
686 .wait_prepare = vb2_ops_wait_prepare,
687 .wait_finish = vb2_ops_wait_finish,
688 .start_streaming = xvip_dma_start_streaming,
689 .stop_streaming = xvip_dma_stop_streaming,
692 /* -----------------------------------------------------------------------------
697 xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
699 struct v4l2_fh *vfh = file->private_data;
700 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
702 cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
703 | dma->xdev->v4l2_caps;
705 cap->device_caps = V4L2_CAP_STREAMING;
706 switch (dma->queue.type) {
707 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
708 cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE_MPLANE;
710 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
711 cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
713 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
714 cap->device_caps |= V4L2_CAP_VIDEO_OUTPUT_MPLANE;
716 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
717 cap->device_caps |= V4L2_CAP_VIDEO_OUTPUT;
721 strlcpy(cap->driver, "xilinx-vipp", sizeof(cap->driver));
722 strlcpy(cap->card, dma->video.name, sizeof(cap->card));
723 snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s:%u",
724 dma->xdev->dev->of_node->name, dma->port);
729 /* FIXME: without this callback function, some applications are not configured
730 * with correct formats, and it results in frames in wrong format. Whether this
731 * callback needs to be required is not clearly defined, so it should be
732 * clarified through the mailing list.
735 xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
737 struct v4l2_fh *vfh = file->private_data;
738 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
739 struct v4l2_subdev *subdev;
740 struct v4l2_subdev_format v4l_fmt;
742 const struct xvip_video_format *fmt;
744 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
745 /* establish media pad format */
746 subdev = xvip_dma_remote_subdev(&dma->pad, &v4l_fmt.pad);
750 v4l_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
751 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &v4l_fmt);
753 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
755 /* has media pad value changed? */
756 if (v4l_fmt.format.code != dma->remote_subdev_med_bus ||
757 !dma->remote_subdev_med_bus) {
758 u32 i, fmt_cnt, *fmts;
759 /* re-generate legal list of fourcc codes */
760 dma->poss_v4l2_fmt_cnt = 0;
761 dma->remote_subdev_med_bus = v4l_fmt.format.code;
762 err = xilinx_xdma_get_v4l2_vid_fmts(dma->dma, &fmt_cnt,
766 if (!dma->poss_v4l2_fmts) {
767 dma->poss_v4l2_fmts =
768 devm_kzalloc(&dma->video.dev,
769 sizeof(u32) * fmt_cnt,
771 if (!dma->poss_v4l2_fmts)
774 for (i = 0; i < fmt_cnt; i++) {
775 fmt = xvip_get_format_by_fourcc(fmts[i]);
779 if (fmt->code != dma->remote_subdev_med_bus)
782 dma->poss_v4l2_fmts[dma->poss_v4l2_fmt_cnt++] =
787 /* Return err if index is greater than count of legal values */
788 if (f->index >= dma->poss_v4l2_fmt_cnt)
791 /* Else return pix format in table */
792 fmt = xvip_get_format_by_fourcc(dma->poss_v4l2_fmts[f->index]);
796 f->pixelformat = fmt->fourcc;
797 strlcpy(f->description, fmt->description,
798 sizeof(f->description));
803 /* Single plane formats */
807 f->pixelformat = dma->format.fmt.pix.pixelformat;
808 strlcpy(f->description, dma->fmtinfo->description,
809 sizeof(f->description));
814 xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format)
816 struct v4l2_fh *vfh = file->private_data;
817 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
819 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
820 format->fmt.pix_mp = dma->format.fmt.pix_mp;
822 format->fmt.pix = dma->format.fmt.pix;
828 __xvip_dma_try_format(struct xvip_dma *dma,
829 struct v4l2_format *format,
830 const struct xvip_video_format **fmtinfo)
832 const struct xvip_video_format *info;
833 unsigned int min_width;
834 unsigned int max_width;
835 unsigned int min_bpl;
836 unsigned int max_bpl;
840 unsigned int i, hsub, vsub, plane_width, plane_height;
842 unsigned int padding_factor_nume, padding_factor_deno;
843 unsigned int bpl_nume, bpl_deno;
844 struct v4l2_subdev_format fmt;
845 struct v4l2_subdev *subdev;
848 subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
852 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
853 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
857 if (fmt.format.field == V4L2_FIELD_ALTERNATE) {
858 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
859 dma->format.fmt.pix_mp.field = V4L2_FIELD_ALTERNATE;
861 dma->format.fmt.pix.field = V4L2_FIELD_ALTERNATE;
863 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
864 dma->format.fmt.pix_mp.field = V4L2_FIELD_NONE;
866 dma->format.fmt.pix.field = V4L2_FIELD_NONE;
869 /* Retrieve format information and select the default format if the
870 * requested format isn't supported.
872 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
873 fourcc = format->fmt.pix_mp.pixelformat;
875 fourcc = format->fmt.pix.pixelformat;
877 info = xvip_get_format_by_fourcc(fourcc);
880 info = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
882 xvip_width_padding_factor(info->fourcc, &padding_factor_nume,
883 &padding_factor_deno);
884 xvip_bpl_scaling_factor(info->fourcc, &bpl_nume, &bpl_deno);
886 /* The transfer alignment requirements are expressed in bytes. Compute
887 * the minimum and maximum values, clamp the requested width and convert
890 align = lcm(dma->align, info->bpp >> 3);
891 min_width = roundup(XVIP_DMA_MIN_WIDTH, align);
892 max_width = rounddown(XVIP_DMA_MAX_WIDTH, align);
894 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
895 struct v4l2_pix_format_mplane *pix_mp;
896 struct v4l2_plane_pix_format *plane_fmt;
898 pix_mp = &format->fmt.pix_mp;
899 plane_fmt = pix_mp->plane_fmt;
900 pix_mp->field = dma->format.fmt.pix_mp.field;
901 width = rounddown(pix_mp->width * info->bpl_factor, align);
902 pix_mp->width = clamp(width, min_width, max_width) /
904 pix_mp->height = clamp(pix_mp->height, XVIP_DMA_MIN_HEIGHT,
905 XVIP_DMA_MAX_HEIGHT);
906 if (pix_mp->field == V4L2_FIELD_ALTERNATE)
907 pix_mp->height = pix_mp->height / 2;
910 * Clamp the requested bytes per line value. If the maximum
911 * bytes per line value is zero, the module doesn't support
912 * user configurable line sizes. Override the requested value
913 * with the minimum in that case.
916 max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
918 /* Handling contiguous data with mplanes */
919 if (info->buffers == 1) {
920 min_bpl = (pix_mp->width * info->bpl_factor *
921 padding_factor_nume * bpl_nume) /
922 (padding_factor_deno * bpl_deno);
923 min_bpl = roundup(min_bpl, dma->align);
924 bpl = roundup(plane_fmt[0].bytesperline, dma->align);
925 plane_fmt[0].bytesperline = clamp(bpl, min_bpl,
928 if (info->num_planes == 1) {
929 /* Single plane formats */
930 plane_fmt[0].sizeimage =
931 plane_fmt[0].bytesperline *
934 /* Multi plane formats */
935 plane_fmt[0].sizeimage =
936 DIV_ROUND_UP(plane_fmt[0].bytesperline *
941 /* Handling non-contiguous data with mplanes */
944 for (i = 0; i < info->num_planes; i++) {
945 plane_width = pix_mp->width / (i ? hsub : 1);
946 plane_height = pix_mp->height / (i ? vsub : 1);
947 min_bpl = (plane_width * info->bpl_factor *
948 padding_factor_nume * bpl_nume) /
949 (padding_factor_deno * bpl_deno);
950 min_bpl = roundup(min_bpl, dma->align);
951 bpl = rounddown(plane_fmt[i].bytesperline,
953 plane_fmt[i].bytesperline =
954 clamp(bpl, min_bpl, max_bpl);
955 plane_fmt[i].sizeimage =
956 plane_fmt[i].bytesperline *
961 struct v4l2_pix_format *pix;
963 pix = &format->fmt.pix;
964 pix->field = dma->format.fmt.pix.field;
965 width = rounddown(pix->width * info->bpl_factor, align);
966 pix->width = clamp(width, min_width, max_width) /
968 pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
969 XVIP_DMA_MAX_HEIGHT);
971 if (pix->field == V4L2_FIELD_ALTERNATE)
972 pix->height = pix->height / 2;
974 min_bpl = (pix->width * info->bpl_factor *
975 padding_factor_nume * bpl_nume) /
976 (padding_factor_deno * bpl_deno);
977 min_bpl = roundup(min_bpl, dma->align);
978 max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
979 bpl = rounddown(pix->bytesperline, dma->align);
980 pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
981 pix->sizeimage = pix->width * pix->height * info->bpp / 8;
989 xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
991 struct v4l2_fh *vfh = file->private_data;
992 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
994 __xvip_dma_try_format(dma, format, NULL);
999 xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
1001 struct v4l2_fh *vfh = file->private_data;
1002 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
1003 const struct xvip_video_format *info;
1005 __xvip_dma_try_format(dma, format, &info);
1007 if (vb2_is_busy(&dma->queue))
1010 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
1011 dma->format.fmt.pix_mp = format->fmt.pix_mp;
1013 dma->format.fmt.pix = format->fmt.pix;
1015 dma->fmtinfo = info;
1020 static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
1021 .vidioc_querycap = xvip_dma_querycap,
1022 .vidioc_enum_fmt_vid_cap = xvip_dma_enum_format,
1023 .vidioc_enum_fmt_vid_cap_mplane = xvip_dma_enum_format,
1024 .vidioc_g_fmt_vid_cap = xvip_dma_get_format,
1025 .vidioc_g_fmt_vid_cap_mplane = xvip_dma_get_format,
1026 .vidioc_g_fmt_vid_out = xvip_dma_get_format,
1027 .vidioc_s_fmt_vid_cap = xvip_dma_set_format,
1028 .vidioc_s_fmt_vid_cap_mplane = xvip_dma_set_format,
1029 .vidioc_s_fmt_vid_out = xvip_dma_set_format,
1030 .vidioc_try_fmt_vid_cap = xvip_dma_try_format,
1031 .vidioc_try_fmt_vid_cap_mplane = xvip_dma_try_format,
1032 .vidioc_try_fmt_vid_out = xvip_dma_try_format,
1033 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1034 .vidioc_querybuf = vb2_ioctl_querybuf,
1035 .vidioc_qbuf = vb2_ioctl_qbuf,
1036 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1037 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1038 .vidioc_expbuf = vb2_ioctl_expbuf,
1039 .vidioc_streamon = vb2_ioctl_streamon,
1040 .vidioc_streamoff = vb2_ioctl_streamoff,
1043 /* -----------------------------------------------------------------------------
1044 * V4L2 file operations
1047 static const struct v4l2_file_operations xvip_dma_fops = {
1048 .owner = THIS_MODULE,
1049 .unlocked_ioctl = video_ioctl2,
1050 .open = v4l2_fh_open,
1051 .release = vb2_fop_release,
1052 .poll = vb2_fop_poll,
1053 .mmap = vb2_fop_mmap,
1056 /* -----------------------------------------------------------------------------
1057 * Xilinx Video DMA Core
1060 int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
1061 enum v4l2_buf_type type, unsigned int port)
1065 u32 i, hsub, vsub, width, height;
1069 mutex_init(&dma->lock);
1070 mutex_init(&dma->pipe.lock);
1071 INIT_LIST_HEAD(&dma->queued_bufs);
1072 spin_lock_init(&dma->queued_lock);
1074 dma->fmtinfo = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
1075 dma->format.type = type;
1077 if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
1078 struct v4l2_pix_format_mplane *pix_mp;
1080 pix_mp = &dma->format.fmt.pix_mp;
1081 pix_mp->pixelformat = dma->fmtinfo->fourcc;
1082 pix_mp->colorspace = V4L2_COLORSPACE_SRGB;
1083 pix_mp->field = V4L2_FIELD_NONE;
1084 pix_mp->width = XVIP_DMA_DEF_WIDTH;
1086 /* Handling contiguous data with mplanes */
1087 if (dma->fmtinfo->buffers == 1) {
1088 pix_mp->plane_fmt[0].bytesperline =
1089 pix_mp->width * dma->fmtinfo->bpl_factor;
1090 pix_mp->plane_fmt[0].sizeimage =
1091 pix_mp->width * pix_mp->height *
1092 dma->fmtinfo->bpp / 8;
1094 /* Handling non-contiguous data with mplanes */
1095 hsub = dma->fmtinfo->hsub;
1096 vsub = dma->fmtinfo->vsub;
1097 for (i = 0; i < dma->fmtinfo->buffers; i++) {
1098 width = pix_mp->width / (i ? hsub : 1);
1099 height = pix_mp->height / (i ? vsub : 1);
1100 pix_mp->plane_fmt[i].bytesperline =
1101 width * dma->fmtinfo->bpl_factor;
1102 pix_mp->plane_fmt[i].sizeimage = width * height;
1106 struct v4l2_pix_format *pix;
1108 pix = &dma->format.fmt.pix;
1109 pix->pixelformat = dma->fmtinfo->fourcc;
1110 pix->colorspace = V4L2_COLORSPACE_SRGB;
1111 pix->field = V4L2_FIELD_NONE;
1112 pix->width = XVIP_DMA_DEF_WIDTH;
1113 pix->height = XVIP_DMA_DEF_HEIGHT;
1114 pix->bytesperline = pix->width * dma->fmtinfo->bpl_factor;
1116 pix->width * pix->height * dma->fmtinfo->bpp / 8;
1119 /* Initialize the media entity... */
1120 if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1121 type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1122 dma->pad.flags = MEDIA_PAD_FL_SINK;
1124 dma->pad.flags = MEDIA_PAD_FL_SOURCE;
1126 ret = media_entity_pads_init(&dma->video.entity, 1, &dma->pad);
1130 /* ... and the video node... */
1131 dma->video.fops = &xvip_dma_fops;
1132 dma->video.v4l2_dev = &xdev->v4l2_dev;
1133 dma->video.queue = &dma->queue;
1134 snprintf(dma->video.name, sizeof(dma->video.name), "%s %s %u",
1135 xdev->dev->of_node->name,
1136 (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1137 type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1138 ? "output" : "input",
1141 dma->video.vfl_type = VFL_TYPE_GRABBER;
1142 if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1143 type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1144 dma->video.vfl_dir = VFL_DIR_RX;
1146 dma->video.vfl_dir = VFL_DIR_TX;
1148 dma->video.release = video_device_release_empty;
1149 dma->video.ioctl_ops = &xvip_dma_ioctl_ops;
1150 dma->video.lock = &dma->lock;
1152 video_set_drvdata(&dma->video, dma);
1154 /* ... and the buffers queue... */
1155 /* Don't enable VB2_READ and VB2_WRITE, as using the read() and write()
1156 * V4L2 APIs would be inefficient. Testing on the command line with a
1157 * 'cat /dev/video?' thus won't be possible, but given that the driver
1158 * anyway requires a test tool to setup the pipeline before any video
1159 * stream can be started, requiring a specific V4L2 test tool as well
1160 * instead of 'cat' isn't really a drawback.
1162 dma->queue.type = type;
1163 dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
1164 dma->queue.lock = &dma->lock;
1165 dma->queue.drv_priv = dma;
1166 dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer);
1167 dma->queue.ops = &xvip_dma_queue_qops;
1168 dma->queue.mem_ops = &vb2_dma_contig_memops;
1169 dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
1170 | V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
1171 dma->queue.dev = dma->xdev->dev;
1172 ret = vb2_queue_init(&dma->queue);
1174 dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n");
1178 /* ... and the DMA channel. */
1179 snprintf(name, sizeof(name), "port%u", port);
1180 dma->dma = dma_request_chan(dma->xdev->dev, name);
1181 if (IS_ERR(dma->dma)) {
1182 ret = PTR_ERR(dma->dma);
1183 if (ret != -EPROBE_DEFER)
1184 dev_err(dma->xdev->dev,
1185 "No Video DMA channel found");
1189 dma->align = 1 << dma->dma->device->copy_align;
1191 ret = video_register_device(&dma->video, VFL_TYPE_GRABBER, -1);
1193 dev_err(dma->xdev->dev, "failed to register video device\n");
1200 xvip_dma_cleanup(dma);
1204 void xvip_dma_cleanup(struct xvip_dma *dma)
1206 if (video_is_registered(&dma->video))
1207 video_unregister_device(&dma->video);
1209 if (!IS_ERR(dma->dma))
1210 dma_release_channel(dma->dma);
1212 media_entity_cleanup(&dma->video.entity);
1214 mutex_destroy(&dma->lock);
1215 mutex_destroy(&dma->pipe.lock);