4 * Copyright (C) 2013-2015 Ideas on Board
5 * Copyright (C) 2013-2015 Xilinx, Inc.
7 * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
8 * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/dma/xilinx_dma.h>
16 #include <linux/dma/xilinx_frmbuf.h>
17 #include <linux/lcm.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
21 #include <linux/slab.h>
23 #include <media/v4l2-dev.h>
24 #include <media/v4l2-fh.h>
25 #include <media/v4l2-ioctl.h>
26 #include <media/videobuf2-v4l2.h>
27 #include <media/videobuf2-dma-contig.h>
29 #include "xilinx-dma.h"
30 #include "xilinx-vip.h"
31 #include "xilinx-vipp.h"
33 #define XVIP_DMA_DEF_FORMAT V4L2_PIX_FMT_YUYV
34 #define XVIP_DMA_DEF_WIDTH 1920
35 #define XVIP_DMA_DEF_HEIGHT 1080
37 /* Minimum and maximum widths are expressed in bytes */
38 #define XVIP_DMA_MIN_WIDTH 1U
39 #define XVIP_DMA_MAX_WIDTH 65535U
40 #define XVIP_DMA_MIN_HEIGHT 1U
41 #define XVIP_DMA_MAX_HEIGHT 8191U
43 /* -----------------------------------------------------------------------------
47 static struct v4l2_subdev *
48 xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
50 struct media_pad *remote;
52 remote = media_entity_remote_pad(local);
53 if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
59 return media_entity_to_v4l2_subdev(remote->entity);
62 static int xvip_dma_verify_format(struct xvip_dma *dma)
64 struct v4l2_subdev_format fmt;
65 struct v4l2_subdev *subdev;
69 subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
73 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
74 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
76 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
78 if (dma->fmtinfo->code != fmt.format.code)
81 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
82 width = dma->format.fmt.pix_mp.width;
83 height = dma->format.fmt.pix_mp.height;
85 width = dma->format.fmt.pix.width;
86 height = dma->format.fmt.pix.height;
89 if (width != fmt.format.width || height != fmt.format.height)
95 /* -----------------------------------------------------------------------------
96 * Pipeline Stream Management
100 * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
101 * @xdev: Composite video device
103 * @start: Start (when true) or stop (when false) the pipeline
105 * Walk the entities chain starting @dma and start or stop all of them
107 * Return: 0 if successful, or the return value of the failed video::s_stream
108 * operation otherwise.
110 static int xvip_pipeline_start_stop(struct xvip_composite_device *xdev,
111 struct xvip_dma *dma, bool start)
113 struct media_graph graph;
114 struct media_entity *entity = &dma->video.entity;
115 struct media_device *mdev = entity->graph_obj.mdev;
116 struct v4l2_subdev *subdev;
120 mutex_lock(&mdev->graph_mutex);
122 /* Walk the graph to locate the subdev nodes */
123 ret = media_graph_walk_init(&graph, mdev);
125 mutex_unlock(&mdev->graph_mutex);
129 media_graph_walk_start(&graph, entity);
131 while ((entity = media_graph_walk_next(&graph))) {
132 /* We want to stream on/off only subdevs */
133 if (!is_media_entity_v4l2_subdev(entity))
136 subdev = media_entity_to_v4l2_subdev(entity);
138 /* This is to maintain list of stream on/off devices */
139 is_streaming = xvip_subdev_set_streaming(xdev, subdev, start);
142 * start or stop the subdev only once in case if they are
143 * shared between sub-graphs
145 if (start != is_streaming) {
146 ret = v4l2_subdev_call(subdev, video, s_stream,
148 if (start && ret < 0 && ret != -ENOIOCTLCMD) {
149 dev_err(xdev->dev, "s_stream is failed on subdev\n");
150 xvip_subdev_set_streaming(xdev, subdev, !start);
156 mutex_unlock(&mdev->graph_mutex);
157 media_graph_walk_cleanup(&graph);
163 * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
164 * @pipe: The pipeline
165 * @on: Turn the stream on when true or off when false
167 * The pipeline is shared between all DMA engines connect at its input and
168 * output. While the stream state of DMA engines can be controlled
169 * independently, pipelines have a shared stream state that enable or disable
170 * all entities in the pipeline. For this reason the pipeline uses a streaming
171 * counter that tracks the number of DMA engines that have requested the stream
172 * to be enabled. This will walk the graph starting from each DMA and enable or
173 * disable the entities in the path.
175 * When called with the @on argument set to true, this function will increment
176 * the pipeline streaming count. If the streaming count reaches the number of
177 * DMA engines in the pipeline it will enable all entities that belong to the
180 * Similarly, when called with the @on argument set to false, this function will
181 * decrement the pipeline streaming count and disable all entities in the
182 * pipeline when the streaming count reaches zero.
184 * Return: 0 if successful, or the return value of the failed video::s_stream
185 * operation otherwise. Stopping the pipeline never fails. The pipeline state is
186 * not updated when the operation fails.
188 static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
190 struct xvip_composite_device *xdev;
191 struct xvip_dma *dma;
194 mutex_lock(&pipe->lock);
198 if (pipe->stream_count == pipe->num_dmas - 1) {
200 * This will iterate the DMAs and the stream-on of
201 * subdevs may not be sequential due to multiple
204 list_for_each_entry(dma, &xdev->dmas, list) {
205 ret = xvip_pipeline_start_stop(xdev, dma, true);
210 pipe->stream_count++;
212 if (--pipe->stream_count == 0)
213 list_for_each_entry(dma, &xdev->dmas, list)
214 xvip_pipeline_start_stop(xdev, dma, false);
218 mutex_unlock(&pipe->lock);
222 static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
223 struct xvip_dma *start)
225 struct media_graph graph;
226 struct media_entity *entity = &start->video.entity;
227 struct media_device *mdev = entity->graph_obj.mdev;
228 unsigned int num_inputs = 0;
229 unsigned int num_outputs = 0;
232 mutex_lock(&mdev->graph_mutex);
234 /* Walk the graph to locate the video nodes. */
235 ret = media_graph_walk_init(&graph, mdev);
237 mutex_unlock(&mdev->graph_mutex);
241 media_graph_walk_start(&graph, entity);
243 while ((entity = media_graph_walk_next(&graph))) {
244 struct xvip_dma *dma;
246 if (entity->function != MEDIA_ENT_F_IO_V4L)
249 dma = to_xvip_dma(media_entity_to_video_device(entity));
251 if (dma->pad.flags & MEDIA_PAD_FL_SINK) {
258 mutex_unlock(&mdev->graph_mutex);
260 media_graph_walk_cleanup(&graph);
262 /* We need at least one DMA to proceed */
263 if (num_outputs == 0 && num_inputs == 0)
266 pipe->num_dmas = num_inputs + num_outputs;
267 pipe->xdev = start->xdev;
272 static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
278 * xvip_pipeline_cleanup - Cleanup the pipeline after streaming
279 * @pipe: the pipeline
281 * Decrease the pipeline use count and clean it up if we were the last user.
283 static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
285 mutex_lock(&pipe->lock);
287 /* If we're the last user clean up the pipeline. */
288 if (--pipe->use_count == 0)
289 __xvip_pipeline_cleanup(pipe);
291 mutex_unlock(&pipe->lock);
295 * xvip_pipeline_prepare - Prepare the pipeline for streaming
296 * @pipe: the pipeline
297 * @dma: DMA engine at one end of the pipeline
299 * Validate the pipeline if no user exists yet, otherwise just increase the use
302 * Return: 0 if successful or -EPIPE if the pipeline is not valid.
304 static int xvip_pipeline_prepare(struct xvip_pipeline *pipe,
305 struct xvip_dma *dma)
309 mutex_lock(&pipe->lock);
311 /* If we're the first user validate and initialize the pipeline. */
312 if (pipe->use_count == 0) {
313 ret = xvip_pipeline_validate(pipe, dma);
315 __xvip_pipeline_cleanup(pipe);
324 mutex_unlock(&pipe->lock);
328 /* -----------------------------------------------------------------------------
329 * videobuf2 queue operations
333 * struct xvip_dma_buffer - Video DMA buffer
334 * @buf: vb2 buffer base object
335 * @queue: buffer list entry in the DMA engine queued buffers list
336 * @dma: DMA channel that uses the buffer
337 * @desc: Descriptor associated with this structure
339 struct xvip_dma_buffer {
340 struct vb2_v4l2_buffer buf;
341 struct list_head queue;
342 struct xvip_dma *dma;
343 struct dma_async_tx_descriptor *desc;
346 #define to_xvip_dma_buffer(vb) container_of(vb, struct xvip_dma_buffer, buf)
348 static void xvip_dma_complete(void *param)
350 struct xvip_dma_buffer *buf = param;
351 struct xvip_dma *dma = buf->dma;
356 spin_lock(&dma->queued_lock);
357 list_del(&buf->queue);
358 spin_unlock(&dma->queued_lock);
360 buf->buf.field = V4L2_FIELD_NONE;
361 buf->buf.sequence = dma->sequence++;
362 buf->buf.vb2_buf.timestamp = ktime_get_ns();
364 status = xilinx_xdma_get_fid(dma->dma, buf->desc, &fid);
366 if (((V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) &&
367 dma->format.fmt.pix_mp.field == V4L2_FIELD_ALTERNATE) ||
368 dma->format.fmt.pix.field == V4L2_FIELD_ALTERNATE) {
370 * fid = 1 is odd field i.e. V4L2_FIELD_TOP.
371 * fid = 0 is even field i.e. V4L2_FIELD_BOTTOM.
373 buf->buf.field = fid ?
374 V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
376 if (fid == dma->prev_fid)
377 buf->buf.sequence = dma->sequence++;
379 buf->buf.sequence >>= 1;
384 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
385 for (i = 0; i < dma->fmtinfo->buffers; i++) {
387 dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
388 vb2_set_plane_payload(&buf->buf.vb2_buf, i, sizeimage);
391 sizeimage = dma->format.fmt.pix.sizeimage;
392 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, sizeimage);
395 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
399 xvip_dma_queue_setup(struct vb2_queue *vq,
400 unsigned int *nbuffers, unsigned int *nplanes,
401 unsigned int sizes[], struct device *alloc_devs[])
403 struct xvip_dma *dma = vb2_get_drv_priv(vq);
407 /* Multi planar case: Make sure the image size is large enough */
408 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
410 if (*nplanes != dma->format.fmt.pix_mp.num_planes)
413 for (i = 0; i < *nplanes; i++) {
415 dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
416 if (sizes[i] < sizeimage)
420 *nplanes = dma->fmtinfo->buffers;
421 for (i = 0; i < dma->fmtinfo->buffers; i++) {
423 dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
424 sizes[i] = sizeimage;
430 /* Single planar case: Make sure the image size is large enough */
431 sizeimage = dma->format.fmt.pix.sizeimage;
433 return sizes[0] < sizeimage ? -EINVAL : 0;
436 sizes[0] = sizeimage;
441 static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
443 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
444 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
445 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
452 static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
454 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
455 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
456 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
457 struct dma_async_tx_descriptor *desc;
458 dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
461 u32 padding_factor_nume, padding_factor_deno, bpl_nume, bpl_deno;
464 if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
465 dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
466 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
467 dma->xt.dir = DMA_DEV_TO_MEM;
468 dma->xt.src_sgl = false;
469 dma->xt.dst_sgl = true;
470 dma->xt.dst_start = addr;
471 } else if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
472 dma->queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
473 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
474 dma->xt.dir = DMA_MEM_TO_DEV;
475 dma->xt.src_sgl = true;
476 dma->xt.dst_sgl = false;
477 dma->xt.src_start = addr;
481 * DMA IP supports only 2 planes, so one datachunk is sufficient
482 * to get start address of 2nd plane
484 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
485 struct v4l2_pix_format_mplane *pix_mp;
487 pix_mp = &dma->format.fmt.pix_mp;
488 xilinx_xdma_v4l2_config(dma->dma, pix_mp->pixelformat);
489 xvip_width_padding_factor(pix_mp->pixelformat,
490 &padding_factor_nume,
491 &padding_factor_deno);
492 xvip_bpl_scaling_factor(pix_mp->pixelformat, &bpl_nume,
494 dma->xt.frame_size = dma->fmtinfo->num_planes;
495 dma->sgl[0].size = (pix_mp->width * dma->fmtinfo->bpl_factor *
496 padding_factor_nume * bpl_nume) /
497 (padding_factor_deno * bpl_deno);
498 dma->sgl[0].icg = pix_mp->plane_fmt[0].bytesperline -
500 dma->xt.numf = pix_mp->height;
503 * dst_icg is the number of bytes to jump after last luma addr
504 * and before first chroma addr
507 /* Handling contiguous data with mplanes */
508 if (dma->fmtinfo->buffers == 1) {
509 dma->sgl[0].dst_icg = 0;
511 /* Handling non-contiguous data with mplanes */
512 if (dma->fmtinfo->buffers == 2) {
513 dma_addr_t chroma_addr =
514 vb2_dma_contig_plane_dma_addr(vb, 1);
515 luma_size = pix_mp->plane_fmt[0].bytesperline *
517 if (chroma_addr > addr)
518 dma->sgl[0].dst_icg = chroma_addr -
523 struct v4l2_pix_format *pix;
525 pix = &dma->format.fmt.pix;
526 xilinx_xdma_v4l2_config(dma->dma, pix->pixelformat);
527 xvip_width_padding_factor(pix->pixelformat,
528 &padding_factor_nume,
529 &padding_factor_deno);
530 xvip_bpl_scaling_factor(pix->pixelformat, &bpl_nume,
532 dma->xt.frame_size = dma->fmtinfo->num_planes;
533 dma->sgl[0].size = (pix->width * dma->fmtinfo->bpl_factor *
534 padding_factor_nume * bpl_nume) /
535 (padding_factor_deno * bpl_deno);
536 dma->sgl[0].icg = pix->bytesperline - dma->sgl[0].size;
537 dma->xt.numf = pix->height;
538 dma->sgl[0].dst_icg = 0;
541 desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
543 dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
544 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
547 desc->callback = xvip_dma_complete;
548 desc->callback_param = buf;
551 if (buf->buf.field == V4L2_FIELD_TOP)
553 else if (buf->buf.field == V4L2_FIELD_BOTTOM)
555 else if (buf->buf.field == V4L2_FIELD_NONE)
558 xilinx_xdma_set_fid(dma->dma, desc, fid);
560 spin_lock_irq(&dma->queued_lock);
561 list_add_tail(&buf->queue, &dma->queued_bufs);
562 spin_unlock_irq(&dma->queued_lock);
564 dmaengine_submit(desc);
566 if (vb2_is_streaming(&dma->queue))
567 dma_async_issue_pending(dma->dma);
570 static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
572 struct xvip_dma *dma = vb2_get_drv_priv(vq);
573 struct xvip_dma_buffer *buf, *nbuf;
574 struct xvip_pipeline *pipe;
581 * Start streaming on the pipeline. No link touching an entity in the
582 * pipeline can be activated or deactivated once streaming is started.
584 * Use the pipeline object embedded in the first DMA object that starts
587 mutex_lock(&dma->xdev->lock);
588 pipe = dma->video.entity.pipe
589 ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
591 ret = media_pipeline_start(&dma->video.entity, &pipe->pipe);
592 mutex_unlock(&dma->xdev->lock);
596 /* Verify that the configured format matches the output of the
599 ret = xvip_dma_verify_format(dma);
603 ret = xvip_pipeline_prepare(pipe, dma);
607 /* Start the DMA engine. This must be done before starting the blocks
608 * in the pipeline to avoid DMA synchronization issues.
610 dma_async_issue_pending(dma->dma);
612 /* Start the pipeline. */
613 ret = xvip_pipeline_set_stream(pipe, true);
620 media_pipeline_stop(&dma->video.entity);
623 dmaengine_terminate_all(dma->dma);
624 /* Give back all queued buffers to videobuf2. */
625 spin_lock_irq(&dma->queued_lock);
626 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
627 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_QUEUED);
628 list_del(&buf->queue);
630 spin_unlock_irq(&dma->queued_lock);
635 static void xvip_dma_stop_streaming(struct vb2_queue *vq)
637 struct xvip_dma *dma = vb2_get_drv_priv(vq);
638 struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity);
639 struct xvip_dma_buffer *buf, *nbuf;
641 /* Stop the pipeline. */
642 xvip_pipeline_set_stream(pipe, false);
644 /* Stop and reset the DMA engine. */
645 dmaengine_terminate_all(dma->dma);
647 /* Cleanup the pipeline and mark it as being stopped. */
648 xvip_pipeline_cleanup(pipe);
649 media_pipeline_stop(&dma->video.entity);
651 /* Give back all queued buffers to videobuf2. */
652 spin_lock_irq(&dma->queued_lock);
653 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
654 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
655 list_del(&buf->queue);
657 spin_unlock_irq(&dma->queued_lock);
660 static const struct vb2_ops xvip_dma_queue_qops = {
661 .queue_setup = xvip_dma_queue_setup,
662 .buf_prepare = xvip_dma_buffer_prepare,
663 .buf_queue = xvip_dma_buffer_queue,
664 .wait_prepare = vb2_ops_wait_prepare,
665 .wait_finish = vb2_ops_wait_finish,
666 .start_streaming = xvip_dma_start_streaming,
667 .stop_streaming = xvip_dma_stop_streaming,
670 /* -----------------------------------------------------------------------------
675 xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
677 struct v4l2_fh *vfh = file->private_data;
678 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
680 cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
681 | dma->xdev->v4l2_caps;
683 cap->device_caps = V4L2_CAP_STREAMING;
684 switch (dma->queue.type) {
685 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
686 cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE_MPLANE;
688 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
689 cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
691 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
692 cap->device_caps |= V4L2_CAP_VIDEO_OUTPUT_MPLANE;
694 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
695 cap->device_caps |= V4L2_CAP_VIDEO_OUTPUT;
699 strlcpy(cap->driver, "xilinx-vipp", sizeof(cap->driver));
700 strlcpy(cap->card, dma->video.name, sizeof(cap->card));
701 snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s:%u",
702 dma->xdev->dev->of_node->name, dma->port);
707 static int xvip_xdma_enum_fmt(struct xvip_dma *dma, struct v4l2_fmtdesc *f,
708 struct v4l2_subdev_format *v4l_fmt)
710 const struct xvip_video_format *fmt;
712 u32 i, fmt_cnt, *fmts;
714 ret = xilinx_xdma_get_v4l2_vid_fmts(dma->dma, &fmt_cnt, &fmts);
718 /* Has media pad value changed? */
719 if (v4l_fmt->format.code != dma->remote_subdev_med_bus ||
720 !dma->remote_subdev_med_bus) {
721 /* Re-generate legal list of fourcc codes */
722 dma->poss_v4l2_fmt_cnt = 0;
723 dma->remote_subdev_med_bus = v4l_fmt->format.code;
725 if (!dma->poss_v4l2_fmts) {
726 dma->poss_v4l2_fmts =
727 devm_kzalloc(&dma->video.dev,
728 sizeof(u32) * fmt_cnt,
730 if (!dma->poss_v4l2_fmts)
734 for (i = 0; i < fmt_cnt; i++) {
735 fmt = xvip_get_format_by_fourcc(fmts[i]);
739 if (fmt->code != dma->remote_subdev_med_bus)
742 dma->poss_v4l2_fmts[dma->poss_v4l2_fmt_cnt++] = fmts[i];
746 /* Return err if index is greater than count of legal values */
747 if (f->index >= dma->poss_v4l2_fmt_cnt)
750 /* Else return pix format in table */
751 fmt = xvip_get_format_by_fourcc(dma->poss_v4l2_fmts[f->index]);
755 f->pixelformat = fmt->fourcc;
756 strlcpy(f->description, fmt->description,
757 sizeof(f->description));
762 /* FIXME: without this callback function, some applications are not configured
763 * with correct formats, and it results in frames in wrong format. Whether this
764 * callback needs to be required is not clearly defined, so it should be
765 * clarified through the mailing list.
768 xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
770 struct v4l2_fh *vfh = file->private_data;
771 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
772 struct v4l2_subdev *subdev;
773 struct v4l2_subdev_format v4l_fmt;
774 const struct xvip_video_format *fmt;
777 /* Establish media pad format */
778 subdev = xvip_dma_remote_subdev(&dma->pad, &v4l_fmt.pad);
782 v4l_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
783 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &v4l_fmt);
785 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
788 * In case of frmbuf DMA, this will invoke frambuf driver specific APIs
789 * to enumerate formats otherwise return the pix format corresponding
790 * to subdev's media bus format. This kind of separation would be
791 * helpful for clean up and upstreaming.
793 err = xvip_xdma_enum_fmt(dma, f, &v4l_fmt);
798 * This logic will just return one pix format based on subdev's
804 fmt = xvip_get_format_by_code(v4l_fmt.format.code);
808 f->pixelformat = fmt->fourcc;
809 strlcpy(f->description, fmt->description,
810 sizeof(f->description));
816 xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format)
818 struct v4l2_fh *vfh = file->private_data;
819 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
821 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
822 format->fmt.pix_mp = dma->format.fmt.pix_mp;
824 format->fmt.pix = dma->format.fmt.pix;
830 __xvip_dma_try_format(struct xvip_dma *dma,
831 struct v4l2_format *format,
832 const struct xvip_video_format **fmtinfo)
834 const struct xvip_video_format *info;
835 unsigned int min_width;
836 unsigned int max_width;
837 unsigned int min_bpl;
838 unsigned int max_bpl;
842 unsigned int i, hsub, vsub, plane_width, plane_height;
844 unsigned int padding_factor_nume, padding_factor_deno;
845 unsigned int bpl_nume, bpl_deno;
846 struct v4l2_subdev_format fmt;
847 struct v4l2_subdev *subdev;
850 subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
854 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
855 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
859 if (fmt.format.field == V4L2_FIELD_ALTERNATE) {
860 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
861 dma->format.fmt.pix_mp.field = V4L2_FIELD_ALTERNATE;
863 dma->format.fmt.pix.field = V4L2_FIELD_ALTERNATE;
865 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
866 dma->format.fmt.pix_mp.field = V4L2_FIELD_NONE;
868 dma->format.fmt.pix.field = V4L2_FIELD_NONE;
871 /* Retrieve format information and select the default format if the
872 * requested format isn't supported.
874 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
875 fourcc = format->fmt.pix_mp.pixelformat;
877 fourcc = format->fmt.pix.pixelformat;
879 info = xvip_get_format_by_fourcc(fourcc);
882 info = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
884 xvip_width_padding_factor(info->fourcc, &padding_factor_nume,
885 &padding_factor_deno);
886 xvip_bpl_scaling_factor(info->fourcc, &bpl_nume, &bpl_deno);
888 /* The transfer alignment requirements are expressed in bytes. Compute
889 * the minimum and maximum values, clamp the requested width and convert
892 align = lcm(dma->align, info->bpp >> 3);
893 min_width = roundup(XVIP_DMA_MIN_WIDTH, align);
894 max_width = rounddown(XVIP_DMA_MAX_WIDTH, align);
896 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
897 struct v4l2_pix_format_mplane *pix_mp;
898 struct v4l2_plane_pix_format *plane_fmt;
900 pix_mp = &format->fmt.pix_mp;
901 plane_fmt = pix_mp->plane_fmt;
902 pix_mp->field = dma->format.fmt.pix_mp.field;
903 width = rounddown(pix_mp->width * info->bpl_factor, align);
904 pix_mp->width = clamp(width, min_width, max_width) /
906 pix_mp->height = clamp(pix_mp->height, XVIP_DMA_MIN_HEIGHT,
907 XVIP_DMA_MAX_HEIGHT);
910 * Clamp the requested bytes per line value. If the maximum
911 * bytes per line value is zero, the module doesn't support
912 * user configurable line sizes. Override the requested value
913 * with the minimum in that case.
916 max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
918 /* Handling contiguous data with mplanes */
919 if (info->buffers == 1) {
920 min_bpl = (pix_mp->width * info->bpl_factor *
921 padding_factor_nume * bpl_nume) /
922 (padding_factor_deno * bpl_deno);
923 min_bpl = roundup(min_bpl, dma->align);
924 bpl = roundup(plane_fmt[0].bytesperline, dma->align);
925 plane_fmt[0].bytesperline = clamp(bpl, min_bpl,
928 if (info->num_planes == 1) {
929 /* Single plane formats */
930 plane_fmt[0].sizeimage =
931 plane_fmt[0].bytesperline *
934 /* Multi plane formats */
935 plane_fmt[0].sizeimage =
936 DIV_ROUND_UP(plane_fmt[0].bytesperline *
941 /* Handling non-contiguous data with mplanes */
944 for (i = 0; i < info->num_planes; i++) {
945 plane_width = pix_mp->width / (i ? hsub : 1);
946 plane_height = pix_mp->height / (i ? vsub : 1);
947 min_bpl = (plane_width * info->bpl_factor *
948 padding_factor_nume * bpl_nume) /
949 (padding_factor_deno * bpl_deno);
950 min_bpl = roundup(min_bpl, dma->align);
951 bpl = rounddown(plane_fmt[i].bytesperline,
953 plane_fmt[i].bytesperline =
954 clamp(bpl, min_bpl, max_bpl);
955 plane_fmt[i].sizeimage =
956 plane_fmt[i].bytesperline *
961 struct v4l2_pix_format *pix;
963 pix = &format->fmt.pix;
964 pix->field = dma->format.fmt.pix.field;
965 width = rounddown(pix->width * info->bpl_factor, align);
966 pix->width = clamp(width, min_width, max_width) /
968 pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
969 XVIP_DMA_MAX_HEIGHT);
971 min_bpl = (pix->width * info->bpl_factor *
972 padding_factor_nume * bpl_nume) /
973 (padding_factor_deno * bpl_deno);
974 min_bpl = roundup(min_bpl, dma->align);
975 max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
976 bpl = rounddown(pix->bytesperline, dma->align);
977 pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
978 pix->sizeimage = pix->width * pix->height * info->bpp / 8;
986 xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
988 struct v4l2_fh *vfh = file->private_data;
989 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
991 __xvip_dma_try_format(dma, format, NULL);
996 xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
998 struct v4l2_fh *vfh = file->private_data;
999 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
1000 const struct xvip_video_format *info;
1002 __xvip_dma_try_format(dma, format, &info);
1004 if (vb2_is_busy(&dma->queue))
1007 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
1008 dma->format.fmt.pix_mp = format->fmt.pix_mp;
1010 dma->format.fmt.pix = format->fmt.pix;
1012 dma->fmtinfo = info;
1017 static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
1018 .vidioc_querycap = xvip_dma_querycap,
1019 .vidioc_enum_fmt_vid_cap = xvip_dma_enum_format,
1020 .vidioc_enum_fmt_vid_cap_mplane = xvip_dma_enum_format,
1021 .vidioc_enum_fmt_vid_out = xvip_dma_enum_format,
1022 .vidioc_enum_fmt_vid_out_mplane = xvip_dma_enum_format,
1023 .vidioc_g_fmt_vid_cap = xvip_dma_get_format,
1024 .vidioc_g_fmt_vid_cap_mplane = xvip_dma_get_format,
1025 .vidioc_g_fmt_vid_out = xvip_dma_get_format,
1026 .vidioc_g_fmt_vid_out_mplane = xvip_dma_get_format,
1027 .vidioc_s_fmt_vid_cap = xvip_dma_set_format,
1028 .vidioc_s_fmt_vid_cap_mplane = xvip_dma_set_format,
1029 .vidioc_s_fmt_vid_out = xvip_dma_set_format,
1030 .vidioc_s_fmt_vid_out_mplane = xvip_dma_set_format,
1031 .vidioc_try_fmt_vid_cap = xvip_dma_try_format,
1032 .vidioc_try_fmt_vid_cap_mplane = xvip_dma_try_format,
1033 .vidioc_try_fmt_vid_out = xvip_dma_try_format,
1034 .vidioc_try_fmt_vid_out_mplane = xvip_dma_try_format,
1035 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1036 .vidioc_querybuf = vb2_ioctl_querybuf,
1037 .vidioc_qbuf = vb2_ioctl_qbuf,
1038 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1039 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1040 .vidioc_expbuf = vb2_ioctl_expbuf,
1041 .vidioc_streamon = vb2_ioctl_streamon,
1042 .vidioc_streamoff = vb2_ioctl_streamoff,
1045 /* -----------------------------------------------------------------------------
1046 * V4L2 file operations
1049 static const struct v4l2_file_operations xvip_dma_fops = {
1050 .owner = THIS_MODULE,
1051 .unlocked_ioctl = video_ioctl2,
1052 .open = v4l2_fh_open,
1053 .release = vb2_fop_release,
1054 .poll = vb2_fop_poll,
1055 .mmap = vb2_fop_mmap,
1058 /* -----------------------------------------------------------------------------
1059 * Xilinx Video DMA Core
1062 int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
1063 enum v4l2_buf_type type, unsigned int port)
1067 u32 i, hsub, vsub, width, height;
1071 mutex_init(&dma->lock);
1072 mutex_init(&dma->pipe.lock);
1073 INIT_LIST_HEAD(&dma->queued_bufs);
1074 spin_lock_init(&dma->queued_lock);
1076 dma->fmtinfo = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
1077 dma->format.type = type;
1079 if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
1080 struct v4l2_pix_format_mplane *pix_mp;
1082 pix_mp = &dma->format.fmt.pix_mp;
1083 pix_mp->pixelformat = dma->fmtinfo->fourcc;
1084 pix_mp->colorspace = V4L2_COLORSPACE_SRGB;
1085 pix_mp->field = V4L2_FIELD_NONE;
1086 pix_mp->width = XVIP_DMA_DEF_WIDTH;
1088 /* Handling contiguous data with mplanes */
1089 if (dma->fmtinfo->buffers == 1) {
1090 pix_mp->plane_fmt[0].bytesperline =
1091 pix_mp->width * dma->fmtinfo->bpl_factor;
1092 pix_mp->plane_fmt[0].sizeimage =
1093 pix_mp->width * pix_mp->height *
1094 dma->fmtinfo->bpp / 8;
1096 /* Handling non-contiguous data with mplanes */
1097 hsub = dma->fmtinfo->hsub;
1098 vsub = dma->fmtinfo->vsub;
1099 for (i = 0; i < dma->fmtinfo->buffers; i++) {
1100 width = pix_mp->width / (i ? hsub : 1);
1101 height = pix_mp->height / (i ? vsub : 1);
1102 pix_mp->plane_fmt[i].bytesperline =
1103 width * dma->fmtinfo->bpl_factor;
1104 pix_mp->plane_fmt[i].sizeimage = width * height;
1108 struct v4l2_pix_format *pix;
1110 pix = &dma->format.fmt.pix;
1111 pix->pixelformat = dma->fmtinfo->fourcc;
1112 pix->colorspace = V4L2_COLORSPACE_SRGB;
1113 pix->field = V4L2_FIELD_NONE;
1114 pix->width = XVIP_DMA_DEF_WIDTH;
1115 pix->height = XVIP_DMA_DEF_HEIGHT;
1116 pix->bytesperline = pix->width * dma->fmtinfo->bpl_factor;
1118 pix->width * pix->height * dma->fmtinfo->bpp / 8;
1121 /* Initialize the media entity... */
1122 if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1123 type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1124 dma->pad.flags = MEDIA_PAD_FL_SINK;
1126 dma->pad.flags = MEDIA_PAD_FL_SOURCE;
1128 ret = media_entity_pads_init(&dma->video.entity, 1, &dma->pad);
1132 /* ... and the video node... */
1133 dma->video.fops = &xvip_dma_fops;
1134 dma->video.v4l2_dev = &xdev->v4l2_dev;
1135 dma->video.queue = &dma->queue;
1136 snprintf(dma->video.name, sizeof(dma->video.name), "%s %s %u",
1137 xdev->dev->of_node->name,
1138 (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1139 type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1140 ? "output" : "input",
1143 dma->video.vfl_type = VFL_TYPE_GRABBER;
1144 if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1145 type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1146 dma->video.vfl_dir = VFL_DIR_RX;
1148 dma->video.vfl_dir = VFL_DIR_TX;
1150 dma->video.release = video_device_release_empty;
1151 dma->video.ioctl_ops = &xvip_dma_ioctl_ops;
1152 dma->video.lock = &dma->lock;
1154 video_set_drvdata(&dma->video, dma);
1156 /* ... and the buffers queue... */
1157 /* Don't enable VB2_READ and VB2_WRITE, as using the read() and write()
1158 * V4L2 APIs would be inefficient. Testing on the command line with a
1159 * 'cat /dev/video?' thus won't be possible, but given that the driver
1160 * anyway requires a test tool to setup the pipeline before any video
1161 * stream can be started, requiring a specific V4L2 test tool as well
1162 * instead of 'cat' isn't really a drawback.
1164 dma->queue.type = type;
1165 dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
1166 dma->queue.lock = &dma->lock;
1167 dma->queue.drv_priv = dma;
1168 dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer);
1169 dma->queue.ops = &xvip_dma_queue_qops;
1170 dma->queue.mem_ops = &vb2_dma_contig_memops;
1171 dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
1172 | V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
1173 dma->queue.dev = dma->xdev->dev;
1174 ret = vb2_queue_init(&dma->queue);
1176 dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n");
1180 /* ... and the DMA channel. */
1181 snprintf(name, sizeof(name), "port%u", port);
1182 dma->dma = dma_request_chan(dma->xdev->dev, name);
1183 if (IS_ERR(dma->dma)) {
1184 ret = PTR_ERR(dma->dma);
1185 if (ret != -EPROBE_DEFER)
1186 dev_err(dma->xdev->dev,
1187 "No Video DMA channel found");
1191 dma->align = 1 << dma->dma->device->copy_align;
1193 ret = video_register_device(&dma->video, VFL_TYPE_GRABBER, -1);
1195 dev_err(dma->xdev->dev, "failed to register video device\n");
1202 xvip_dma_cleanup(dma);
1206 void xvip_dma_cleanup(struct xvip_dma *dma)
1208 if (video_is_registered(&dma->video))
1209 video_unregister_device(&dma->video);
1211 if (!IS_ERR(dma->dma))
1212 dma_release_channel(dma->dma);
1214 media_entity_cleanup(&dma->video.entity);
1216 mutex_destroy(&dma->lock);
1217 mutex_destroy(&dma->pipe.lock);