]> rtime.felk.cvut.cz Git - zynq/linux.git/blob - drivers/media/platform/xilinx/xilinx-dma.c
4cf00779d486866a6e7dba60d139c14e550cfae2
[zynq/linux.git] / drivers / media / platform / xilinx / xilinx-dma.c
1 /*
2  * Xilinx Video DMA
3  *
4  * Copyright (C) 2013-2015 Ideas on Board
5  * Copyright (C) 2013-2015 Xilinx, Inc.
6  *
7  * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
8  *           Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14
15 #include <linux/dma/xilinx_dma.h>
16 #include <linux/dma/xilinx_frmbuf.h>
17 #include <linux/lcm.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/slab.h>
22
23 #include <media/v4l2-dev.h>
24 #include <media/v4l2-fh.h>
25 #include <media/v4l2-ioctl.h>
26 #include <media/videobuf2-v4l2.h>
27 #include <media/videobuf2-dma-contig.h>
28
29 #include "xilinx-dma.h"
30 #include "xilinx-vip.h"
31 #include "xilinx-vipp.h"
32
33 #define XVIP_DMA_DEF_FORMAT             V4L2_PIX_FMT_YUYV
34 #define XVIP_DMA_DEF_WIDTH              1920
35 #define XVIP_DMA_DEF_HEIGHT             1080
36
37 /* Minimum and maximum widths are expressed in bytes */
38 #define XVIP_DMA_MIN_WIDTH              1U
39 #define XVIP_DMA_MAX_WIDTH              65535U
40 #define XVIP_DMA_MIN_HEIGHT             1U
41 #define XVIP_DMA_MAX_HEIGHT             8191U
42
43 /* -----------------------------------------------------------------------------
44  * Helper functions
45  */
46
47 static struct v4l2_subdev *
48 xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
49 {
50         struct media_pad *remote;
51
52         remote = media_entity_remote_pad(local);
53         if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
54                 return NULL;
55
56         if (pad)
57                 *pad = remote->index;
58
59         return media_entity_to_v4l2_subdev(remote->entity);
60 }
61
62 static int xvip_dma_verify_format(struct xvip_dma *dma)
63 {
64         struct v4l2_subdev_format fmt;
65         struct v4l2_subdev *subdev;
66         int ret;
67         int width, height;
68
69         subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
70         if (!subdev)
71                 return -EPIPE;
72
73         fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
74         ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
75         if (ret < 0)
76                 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
77
78         if (dma->fmtinfo->code != fmt.format.code)
79                 return -EINVAL;
80
81         if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
82                 width = dma->format.fmt.pix_mp.width;
83                 height = dma->format.fmt.pix_mp.height;
84         } else {
85                 width = dma->format.fmt.pix.width;
86                 height = dma->format.fmt.pix.height;
87         }
88
89         if (width != fmt.format.width || height != fmt.format.height)
90                 return -EINVAL;
91
92         return 0;
93 }
94
95 /* -----------------------------------------------------------------------------
96  * Pipeline Stream Management
97  */
98
99 /**
100  * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
101  * @xdev: Composite video device
102  * @dma: xvip dma
103  * @start: Start (when true) or stop (when false) the pipeline
104  *
105  * Walk the entities chain starting @dma and start or stop all of them
106  *
107  * Return: 0 if successful, or the return value of the failed video::s_stream
108  * operation otherwise.
109  */
110 static int xvip_pipeline_start_stop(struct xvip_composite_device *xdev,
111                                     struct xvip_dma *dma, bool start)
112 {
113         struct media_graph graph;
114         struct media_entity *entity = &dma->video.entity;
115         struct media_device *mdev = entity->graph_obj.mdev;
116         struct v4l2_subdev *subdev;
117         bool is_streaming;
118         int ret;
119
120         mutex_lock(&mdev->graph_mutex);
121
122         /* Walk the graph to locate the subdev nodes */
123         ret = media_graph_walk_init(&graph, mdev);
124         if (ret) {
125                 mutex_unlock(&mdev->graph_mutex);
126                 return ret;
127         }
128
129         media_graph_walk_start(&graph, entity);
130
131         while ((entity = media_graph_walk_next(&graph))) {
132                 /* We want to stream on/off only subdevs */
133                 if (!is_media_entity_v4l2_subdev(entity))
134                         continue;
135
136                 subdev = media_entity_to_v4l2_subdev(entity);
137
138                 /* This is to maintain list of stream on/off devices */
139                 is_streaming = xvip_subdev_set_streaming(xdev, subdev, start);
140
141                 /*
142                  * start or stop the subdev only once in case if they are
143                  * shared between sub-graphs
144                  */
145                 if (start != is_streaming) {
146                         ret = v4l2_subdev_call(subdev, video, s_stream,
147                                                start);
148                         if (start && ret < 0 && ret != -ENOIOCTLCMD) {
149                                 dev_err(xdev->dev, "s_stream is failed on subdev\n");
150                                 xvip_subdev_set_streaming(xdev, subdev, !start);
151                                 return ret;
152                         }
153                 }
154         }
155
156         mutex_unlock(&mdev->graph_mutex);
157         media_graph_walk_cleanup(&graph);
158
159         return 0;
160 }
161
162 /**
163  * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
164  * @pipe: The pipeline
165  * @on: Turn the stream on when true or off when false
166  *
167  * The pipeline is shared between all DMA engines connect at its input and
168  * output. While the stream state of DMA engines can be controlled
169  * independently, pipelines have a shared stream state that enable or disable
170  * all entities in the pipeline. For this reason the pipeline uses a streaming
171  * counter that tracks the number of DMA engines that have requested the stream
172  * to be enabled. This will walk the graph starting from each DMA and enable or
173  * disable the entities in the path.
174  *
175  * When called with the @on argument set to true, this function will increment
176  * the pipeline streaming count. If the streaming count reaches the number of
177  * DMA engines in the pipeline it will enable all entities that belong to the
178  * pipeline.
179  *
180  * Similarly, when called with the @on argument set to false, this function will
181  * decrement the pipeline streaming count and disable all entities in the
182  * pipeline when the streaming count reaches zero.
183  *
184  * Return: 0 if successful, or the return value of the failed video::s_stream
185  * operation otherwise. Stopping the pipeline never fails. The pipeline state is
186  * not updated when the operation fails.
187  */
188 static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
189 {
190         struct xvip_composite_device *xdev;
191         struct xvip_dma *dma;
192         int ret = 0;
193
194         mutex_lock(&pipe->lock);
195         xdev = pipe->xdev;
196
197         if (on) {
198                 if (pipe->stream_count == pipe->num_dmas - 1) {
199                         /*
200                          * This will iterate the DMAs and the stream-on of
201                          * subdevs may not be sequential due to multiple
202                          * sub-graph path
203                          */
204                         list_for_each_entry(dma, &xdev->dmas, list) {
205                                 ret = xvip_pipeline_start_stop(xdev, dma, true);
206                                 if (ret < 0)
207                                         goto done;
208                         }
209                 }
210                 pipe->stream_count++;
211         } else {
212                 if (--pipe->stream_count == 0)
213                         list_for_each_entry(dma, &xdev->dmas, list)
214                                 xvip_pipeline_start_stop(xdev, dma, false);
215         }
216
217 done:
218         mutex_unlock(&pipe->lock);
219         return ret;
220 }
221
222 static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
223                                   struct xvip_dma *start)
224 {
225         struct media_graph graph;
226         struct media_entity *entity = &start->video.entity;
227         struct media_device *mdev = entity->graph_obj.mdev;
228         unsigned int num_inputs = 0;
229         unsigned int num_outputs = 0;
230         int ret;
231
232         mutex_lock(&mdev->graph_mutex);
233
234         /* Walk the graph to locate the video nodes. */
235         ret = media_graph_walk_init(&graph, mdev);
236         if (ret) {
237                 mutex_unlock(&mdev->graph_mutex);
238                 return ret;
239         }
240
241         media_graph_walk_start(&graph, entity);
242
243         while ((entity = media_graph_walk_next(&graph))) {
244                 struct xvip_dma *dma;
245
246                 if (entity->function != MEDIA_ENT_F_IO_V4L)
247                         continue;
248
249                 dma = to_xvip_dma(media_entity_to_video_device(entity));
250
251                 if (dma->pad.flags & MEDIA_PAD_FL_SINK) {
252                         num_outputs++;
253                 } else {
254                         num_inputs++;
255                 }
256         }
257
258         mutex_unlock(&mdev->graph_mutex);
259
260         media_graph_walk_cleanup(&graph);
261
262         /* We need at least one DMA to proceed */
263         if (num_outputs == 0 && num_inputs == 0)
264                 return -EPIPE;
265
266         pipe->num_dmas = num_inputs + num_outputs;
267         pipe->xdev = start->xdev;
268
269         return 0;
270 }
271
272 static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
273 {
274         pipe->num_dmas = 0;
275 }
276
277 /**
278  * xvip_pipeline_cleanup - Cleanup the pipeline after streaming
279  * @pipe: the pipeline
280  *
281  * Decrease the pipeline use count and clean it up if we were the last user.
282  */
283 static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
284 {
285         mutex_lock(&pipe->lock);
286
287         /* If we're the last user clean up the pipeline. */
288         if (--pipe->use_count == 0)
289                 __xvip_pipeline_cleanup(pipe);
290
291         mutex_unlock(&pipe->lock);
292 }
293
294 /**
295  * xvip_pipeline_prepare - Prepare the pipeline for streaming
296  * @pipe: the pipeline
297  * @dma: DMA engine at one end of the pipeline
298  *
299  * Validate the pipeline if no user exists yet, otherwise just increase the use
300  * count.
301  *
302  * Return: 0 if successful or -EPIPE if the pipeline is not valid.
303  */
304 static int xvip_pipeline_prepare(struct xvip_pipeline *pipe,
305                                  struct xvip_dma *dma)
306 {
307         int ret;
308
309         mutex_lock(&pipe->lock);
310
311         /* If we're the first user validate and initialize the pipeline. */
312         if (pipe->use_count == 0) {
313                 ret = xvip_pipeline_validate(pipe, dma);
314                 if (ret < 0) {
315                         __xvip_pipeline_cleanup(pipe);
316                         goto done;
317                 }
318         }
319
320         pipe->use_count++;
321         ret = 0;
322
323 done:
324         mutex_unlock(&pipe->lock);
325         return ret;
326 }
327
328 /* -----------------------------------------------------------------------------
329  * videobuf2 queue operations
330  */
331
332 /**
333  * struct xvip_dma_buffer - Video DMA buffer
334  * @buf: vb2 buffer base object
335  * @queue: buffer list entry in the DMA engine queued buffers list
336  * @dma: DMA channel that uses the buffer
337  * @desc: Descriptor associated with this structure
338  */
339 struct xvip_dma_buffer {
340         struct vb2_v4l2_buffer buf;
341         struct list_head queue;
342         struct xvip_dma *dma;
343         struct dma_async_tx_descriptor *desc;
344 };
345
346 #define to_xvip_dma_buffer(vb)  container_of(vb, struct xvip_dma_buffer, buf)
347
348 static void xvip_dma_complete(void *param)
349 {
350         struct xvip_dma_buffer *buf = param;
351         struct xvip_dma *dma = buf->dma;
352         int i, sizeimage;
353         u32 fid;
354         int status;
355
356         spin_lock(&dma->queued_lock);
357         list_del(&buf->queue);
358         spin_unlock(&dma->queued_lock);
359
360         buf->buf.field = V4L2_FIELD_NONE;
361         buf->buf.sequence = dma->sequence++;
362         buf->buf.vb2_buf.timestamp = ktime_get_ns();
363
364         status = xilinx_xdma_get_fid(dma->dma, buf->desc, &fid);
365         if (!status) {
366                 if (((V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) &&
367                      dma->format.fmt.pix_mp.field == V4L2_FIELD_ALTERNATE) ||
368                      dma->format.fmt.pix.field == V4L2_FIELD_ALTERNATE) {
369                         /*
370                          * fid = 1 is odd field i.e. V4L2_FIELD_TOP.
371                          * fid = 0 is even field i.e. V4L2_FIELD_BOTTOM.
372                          */
373                         buf->buf.field = fid ?
374                                          V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
375
376                         if (fid == dma->prev_fid)
377                                 buf->buf.sequence = dma->sequence++;
378
379                         buf->buf.sequence >>= 1;
380                         dma->prev_fid = fid;
381                 }
382         }
383
384         if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
385                 for (i = 0; i < dma->fmtinfo->buffers; i++) {
386                         sizeimage =
387                                 dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
388                         vb2_set_plane_payload(&buf->buf.vb2_buf, i, sizeimage);
389                 }
390         } else {
391                 sizeimage = dma->format.fmt.pix.sizeimage;
392                 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, sizeimage);
393         }
394
395         vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
396 }
397
398 static int
399 xvip_dma_queue_setup(struct vb2_queue *vq,
400                      unsigned int *nbuffers, unsigned int *nplanes,
401                      unsigned int sizes[], struct device *alloc_devs[])
402 {
403         struct xvip_dma *dma = vb2_get_drv_priv(vq);
404         u8 i;
405         int sizeimage;
406
407         /* Multi planar case: Make sure the image size is large enough */
408         if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
409                 if (*nplanes) {
410                         if (*nplanes != dma->format.fmt.pix_mp.num_planes)
411                                 return -EINVAL;
412
413                         for (i = 0; i < *nplanes; i++) {
414                                 sizeimage =
415                                   dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
416                                 if (sizes[i] < sizeimage)
417                                         return -EINVAL;
418                         }
419                 } else {
420                         *nplanes = dma->fmtinfo->buffers;
421                         for (i = 0; i < dma->fmtinfo->buffers; i++) {
422                                 sizeimage =
423                                   dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
424                                 sizes[i] = sizeimage;
425                         }
426                 }
427                 return 0;
428         }
429
430         /* Single planar case: Make sure the image size is large enough */
431         sizeimage = dma->format.fmt.pix.sizeimage;
432         if (*nplanes == 1)
433                 return sizes[0] < sizeimage ? -EINVAL : 0;
434
435         *nplanes = 1;
436         sizes[0] = sizeimage;
437
438         return 0;
439 }
440
441 static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
442 {
443         struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
444         struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
445         struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
446
447         buf->dma = dma;
448
449         return 0;
450 }
451
452 static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
453 {
454         struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
455         struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
456         struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
457         struct dma_async_tx_descriptor *desc;
458         dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
459         u32 flags;
460         u32 luma_size;
461         u32 padding_factor_nume, padding_factor_deno, bpl_nume, bpl_deno;
462         u32 fid = ~0;
463
464         if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
465             dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
466                 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
467                 dma->xt.dir = DMA_DEV_TO_MEM;
468                 dma->xt.src_sgl = false;
469                 dma->xt.dst_sgl = true;
470                 dma->xt.dst_start = addr;
471         } else if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
472                    dma->queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
473                 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
474                 dma->xt.dir = DMA_MEM_TO_DEV;
475                 dma->xt.src_sgl = true;
476                 dma->xt.dst_sgl = false;
477                 dma->xt.src_start = addr;
478         }
479
480         /*
481          * DMA IP supports only 2 planes, so one datachunk is sufficient
482          * to get start address of 2nd plane
483          */
484         if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
485                 struct v4l2_pix_format_mplane *pix_mp;
486
487                 pix_mp = &dma->format.fmt.pix_mp;
488                 xilinx_xdma_v4l2_config(dma->dma, pix_mp->pixelformat);
489                 xvip_width_padding_factor(pix_mp->pixelformat,
490                                           &padding_factor_nume,
491                                           &padding_factor_deno);
492                 xvip_bpl_scaling_factor(pix_mp->pixelformat, &bpl_nume,
493                                         &bpl_deno);
494                 dma->xt.frame_size = dma->fmtinfo->num_planes;
495                 dma->sgl[0].size = (pix_mp->width * dma->fmtinfo->bpl_factor *
496                                     padding_factor_nume * bpl_nume) /
497                                     (padding_factor_deno * bpl_deno);
498                 dma->sgl[0].icg = pix_mp->plane_fmt[0].bytesperline -
499                                                         dma->sgl[0].size;
500                 dma->xt.numf = pix_mp->height;
501
502                 /*
503                  * dst_icg is the number of bytes to jump after last luma addr
504                  * and before first chroma addr
505                  */
506
507                 /* Handling contiguous data with mplanes */
508                 if (dma->fmtinfo->buffers == 1) {
509                         dma->sgl[0].dst_icg = 0;
510                 } else {
511                         /* Handling non-contiguous data with mplanes */
512                         if (dma->fmtinfo->buffers == 2) {
513                                 dma_addr_t chroma_addr =
514                                         vb2_dma_contig_plane_dma_addr(vb, 1);
515                                 luma_size = pix_mp->plane_fmt[0].bytesperline *
516                                             dma->xt.numf;
517                                 if (chroma_addr > addr)
518                                         dma->sgl[0].dst_icg = chroma_addr -
519                                                               addr - luma_size;
520                                 }
521                 }
522         } else {
523                 struct v4l2_pix_format *pix;
524
525                 pix = &dma->format.fmt.pix;
526                 xilinx_xdma_v4l2_config(dma->dma, pix->pixelformat);
527                 xvip_width_padding_factor(pix->pixelformat,
528                                           &padding_factor_nume,
529                                           &padding_factor_deno);
530                 xvip_bpl_scaling_factor(pix->pixelformat, &bpl_nume,
531                                         &bpl_deno);
532                 dma->xt.frame_size = dma->fmtinfo->num_planes;
533                 dma->sgl[0].size = (pix->width * dma->fmtinfo->bpl_factor *
534                                     padding_factor_nume * bpl_nume) /
535                                     (padding_factor_deno * bpl_deno);
536                 dma->sgl[0].icg = pix->bytesperline - dma->sgl[0].size;
537                 dma->xt.numf = pix->height;
538                 dma->sgl[0].dst_icg = 0;
539         }
540
541         desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
542         if (!desc) {
543                 dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
544                 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
545                 return;
546         }
547         desc->callback = xvip_dma_complete;
548         desc->callback_param = buf;
549         buf->desc = desc;
550
551         if (buf->buf.field == V4L2_FIELD_TOP)
552                 fid = 1;
553         else if (buf->buf.field == V4L2_FIELD_BOTTOM)
554                 fid = 0;
555         else if (buf->buf.field == V4L2_FIELD_NONE)
556                 fid = 0;
557
558         xilinx_xdma_set_fid(dma->dma, desc, fid);
559
560         spin_lock_irq(&dma->queued_lock);
561         list_add_tail(&buf->queue, &dma->queued_bufs);
562         spin_unlock_irq(&dma->queued_lock);
563
564         dmaengine_submit(desc);
565
566         if (vb2_is_streaming(&dma->queue))
567                 dma_async_issue_pending(dma->dma);
568 }
569
570 static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
571 {
572         struct xvip_dma *dma = vb2_get_drv_priv(vq);
573         struct xvip_dma_buffer *buf, *nbuf;
574         struct xvip_pipeline *pipe;
575         int ret;
576
577         dma->sequence = 0;
578         dma->prev_fid = ~0;
579
580         /*
581          * Start streaming on the pipeline. No link touching an entity in the
582          * pipeline can be activated or deactivated once streaming is started.
583          *
584          * Use the pipeline object embedded in the first DMA object that starts
585          * streaming.
586          */
587         mutex_lock(&dma->xdev->lock);
588         pipe = dma->video.entity.pipe
589              ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
590
591         ret = media_pipeline_start(&dma->video.entity, &pipe->pipe);
592         mutex_unlock(&dma->xdev->lock);
593         if (ret < 0)
594                 goto error;
595
596         /* Verify that the configured format matches the output of the
597          * connected subdev.
598          */
599         ret = xvip_dma_verify_format(dma);
600         if (ret < 0)
601                 goto error_stop;
602
603         ret = xvip_pipeline_prepare(pipe, dma);
604         if (ret < 0)
605                 goto error_stop;
606
607         /* Start the DMA engine. This must be done before starting the blocks
608          * in the pipeline to avoid DMA synchronization issues.
609          */
610         dma_async_issue_pending(dma->dma);
611
612         /* Start the pipeline. */
613         ret = xvip_pipeline_set_stream(pipe, true);
614         if (ret < 0)
615                 goto error_stop;
616
617         return 0;
618
619 error_stop:
620         media_pipeline_stop(&dma->video.entity);
621
622 error:
623         dmaengine_terminate_all(dma->dma);
624         /* Give back all queued buffers to videobuf2. */
625         spin_lock_irq(&dma->queued_lock);
626         list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
627                 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_QUEUED);
628                 list_del(&buf->queue);
629         }
630         spin_unlock_irq(&dma->queued_lock);
631
632         return ret;
633 }
634
635 static void xvip_dma_stop_streaming(struct vb2_queue *vq)
636 {
637         struct xvip_dma *dma = vb2_get_drv_priv(vq);
638         struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity);
639         struct xvip_dma_buffer *buf, *nbuf;
640
641         /* Stop the pipeline. */
642         xvip_pipeline_set_stream(pipe, false);
643
644         /* Stop and reset the DMA engine. */
645         dmaengine_terminate_all(dma->dma);
646
647         /* Cleanup the pipeline and mark it as being stopped. */
648         xvip_pipeline_cleanup(pipe);
649         media_pipeline_stop(&dma->video.entity);
650
651         /* Give back all queued buffers to videobuf2. */
652         spin_lock_irq(&dma->queued_lock);
653         list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
654                 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
655                 list_del(&buf->queue);
656         }
657         spin_unlock_irq(&dma->queued_lock);
658 }
659
660 static const struct vb2_ops xvip_dma_queue_qops = {
661         .queue_setup = xvip_dma_queue_setup,
662         .buf_prepare = xvip_dma_buffer_prepare,
663         .buf_queue = xvip_dma_buffer_queue,
664         .wait_prepare = vb2_ops_wait_prepare,
665         .wait_finish = vb2_ops_wait_finish,
666         .start_streaming = xvip_dma_start_streaming,
667         .stop_streaming = xvip_dma_stop_streaming,
668 };
669
670 /* -----------------------------------------------------------------------------
671  * V4L2 ioctls
672  */
673
674 static int
675 xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
676 {
677         struct v4l2_fh *vfh = file->private_data;
678         struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
679
680         cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
681                           | dma->xdev->v4l2_caps;
682
683         cap->device_caps = V4L2_CAP_STREAMING;
684         switch (dma->queue.type) {
685         case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
686                 cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE_MPLANE;
687                 break;
688         case V4L2_BUF_TYPE_VIDEO_CAPTURE:
689                 cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
690                 break;
691         case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
692                 cap->device_caps |= V4L2_CAP_VIDEO_OUTPUT_MPLANE;
693                 break;
694         case V4L2_BUF_TYPE_VIDEO_OUTPUT:
695                 cap->device_caps |= V4L2_CAP_VIDEO_OUTPUT;
696                 break;
697         }
698
699         strlcpy(cap->driver, "xilinx-vipp", sizeof(cap->driver));
700         strlcpy(cap->card, dma->video.name, sizeof(cap->card));
701         snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s:%u",
702                  dma->xdev->dev->of_node->name, dma->port);
703
704         return 0;
705 }
706
707 static int xvip_xdma_enum_fmt(struct xvip_dma *dma, struct v4l2_fmtdesc *f,
708                               struct v4l2_subdev_format *v4l_fmt)
709 {
710         const struct xvip_video_format *fmt;
711         int ret;
712         u32 i, fmt_cnt, *fmts;
713
714         ret = xilinx_xdma_get_v4l2_vid_fmts(dma->dma, &fmt_cnt, &fmts);
715         if (ret)
716                 return ret;
717
718         /* Has media pad value changed? */
719         if (v4l_fmt->format.code != dma->remote_subdev_med_bus ||
720             !dma->remote_subdev_med_bus) {
721                 /* Re-generate legal list of fourcc codes */
722                 dma->poss_v4l2_fmt_cnt = 0;
723                 dma->remote_subdev_med_bus = v4l_fmt->format.code;
724
725                 if (!dma->poss_v4l2_fmts) {
726                         dma->poss_v4l2_fmts =
727                                 devm_kzalloc(&dma->video.dev,
728                                              sizeof(u32) * fmt_cnt,
729                                              GFP_KERNEL);
730                         if (!dma->poss_v4l2_fmts)
731                                 return -ENOMEM;
732                 }
733
734                 for (i = 0; i < fmt_cnt; i++) {
735                         fmt = xvip_get_format_by_fourcc(fmts[i]);
736                         if (IS_ERR(fmt))
737                                 return PTR_ERR(fmt);
738
739                         if (fmt->code != dma->remote_subdev_med_bus)
740                                 continue;
741
742                         dma->poss_v4l2_fmts[dma->poss_v4l2_fmt_cnt++] = fmts[i];
743                 }
744         }
745
746         /* Return err if index is greater than count of legal values */
747         if (f->index >= dma->poss_v4l2_fmt_cnt)
748                 return -EINVAL;
749
750         /* Else return pix format in table */
751         fmt = xvip_get_format_by_fourcc(dma->poss_v4l2_fmts[f->index]);
752         if (IS_ERR(fmt))
753                 return PTR_ERR(fmt);
754
755         f->pixelformat = fmt->fourcc;
756         strlcpy(f->description, fmt->description,
757                 sizeof(f->description));
758
759         return 0;
760 }
761
762 /* FIXME: without this callback function, some applications are not configured
763  * with correct formats, and it results in frames in wrong format. Whether this
764  * callback needs to be required is not clearly defined, so it should be
765  * clarified through the mailing list.
766  */
767 static int
768 xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
769 {
770         struct v4l2_fh *vfh = file->private_data;
771         struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
772         struct v4l2_subdev *subdev;
773         struct v4l2_subdev_format v4l_fmt;
774         const struct xvip_video_format *fmt;
775         int err, ret;
776
777         /* Establish media pad format */
778         subdev = xvip_dma_remote_subdev(&dma->pad, &v4l_fmt.pad);
779         if (!subdev)
780                 return -EPIPE;
781
782         v4l_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
783         ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &v4l_fmt);
784         if (ret < 0)
785                 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
786
787         /*
788          * In case of frmbuf DMA, this will invoke frambuf driver specific APIs
789          * to enumerate formats otherwise return the pix format corresponding
790          * to subdev's media bus format. This kind of separation would be
791          * helpful for clean up and upstreaming.
792          */
793         err = xvip_xdma_enum_fmt(dma, f, &v4l_fmt);
794         if (!err)
795                 return err;
796
797         /*
798          * This logic will just return one pix format based on subdev's
799          * media bus format
800          */
801         if (f->index > 0)
802                 return -EINVAL;
803
804         fmt = xvip_get_format_by_code(v4l_fmt.format.code);
805         if (IS_ERR(fmt))
806                 return PTR_ERR(fmt);
807
808         f->pixelformat = fmt->fourcc;
809         strlcpy(f->description, fmt->description,
810                 sizeof(f->description));
811
812         return 0;
813 }
814
815 static int
816 xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format)
817 {
818         struct v4l2_fh *vfh = file->private_data;
819         struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
820
821         if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
822                 format->fmt.pix_mp = dma->format.fmt.pix_mp;
823         else
824                 format->fmt.pix = dma->format.fmt.pix;
825
826         return 0;
827 }
828
829 static void
830 __xvip_dma_try_format(struct xvip_dma *dma,
831                       struct v4l2_format *format,
832                       const struct xvip_video_format **fmtinfo)
833 {
834         const struct xvip_video_format *info;
835         unsigned int min_width;
836         unsigned int max_width;
837         unsigned int min_bpl;
838         unsigned int max_bpl;
839         unsigned int width;
840         unsigned int align;
841         unsigned int bpl;
842         unsigned int i, hsub, vsub, plane_width, plane_height;
843         unsigned int fourcc;
844         unsigned int padding_factor_nume, padding_factor_deno;
845         unsigned int bpl_nume, bpl_deno;
846         struct v4l2_subdev_format fmt;
847         struct v4l2_subdev *subdev;
848         int ret;
849
850         subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
851         if (!subdev)
852                 return;
853
854         fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
855         ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
856         if (ret < 0)
857                 return;
858
859         if (fmt.format.field == V4L2_FIELD_ALTERNATE) {
860                 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
861                         dma->format.fmt.pix_mp.field = V4L2_FIELD_ALTERNATE;
862                 else
863                         dma->format.fmt.pix.field = V4L2_FIELD_ALTERNATE;
864         } else {
865                 if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
866                         dma->format.fmt.pix_mp.field = V4L2_FIELD_NONE;
867                 else
868                         dma->format.fmt.pix.field = V4L2_FIELD_NONE;
869         }
870
871         /* Retrieve format information and select the default format if the
872          * requested format isn't supported.
873          */
874         if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
875                 fourcc = format->fmt.pix_mp.pixelformat;
876         else
877                 fourcc = format->fmt.pix.pixelformat;
878
879         info = xvip_get_format_by_fourcc(fourcc);
880
881         if (IS_ERR(info))
882                 info = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
883
884         xvip_width_padding_factor(info->fourcc, &padding_factor_nume,
885                                   &padding_factor_deno);
886         xvip_bpl_scaling_factor(info->fourcc, &bpl_nume, &bpl_deno);
887
888         /* The transfer alignment requirements are expressed in bytes. Compute
889          * the minimum and maximum values, clamp the requested width and convert
890          * it back to pixels.
891          */
892         align = lcm(dma->align, info->bpp >> 3);
893         min_width = roundup(XVIP_DMA_MIN_WIDTH, align);
894         max_width = rounddown(XVIP_DMA_MAX_WIDTH, align);
895
896         if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
897                 struct v4l2_pix_format_mplane *pix_mp;
898                 struct v4l2_plane_pix_format *plane_fmt;
899
900                 pix_mp = &format->fmt.pix_mp;
901                 plane_fmt = pix_mp->plane_fmt;
902                 pix_mp->field = dma->format.fmt.pix_mp.field;
903                 width = rounddown(pix_mp->width * info->bpl_factor, align);
904                 pix_mp->width = clamp(width, min_width, max_width) /
905                                 info->bpl_factor;
906                 pix_mp->height = clamp(pix_mp->height, XVIP_DMA_MIN_HEIGHT,
907                                        XVIP_DMA_MAX_HEIGHT);
908
909                 /*
910                  * Clamp the requested bytes per line value. If the maximum
911                  * bytes per line value is zero, the module doesn't support
912                  * user configurable line sizes. Override the requested value
913                  * with the minimum in that case.
914                  */
915
916                 max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
917
918                 /* Handling contiguous data with mplanes */
919                 if (info->buffers == 1) {
920                         min_bpl = (pix_mp->width * info->bpl_factor *
921                                    padding_factor_nume * bpl_nume) /
922                                    (padding_factor_deno * bpl_deno);
923                         min_bpl = roundup(min_bpl, dma->align);
924                         bpl = roundup(plane_fmt[0].bytesperline, dma->align);
925                         plane_fmt[0].bytesperline = clamp(bpl, min_bpl,
926                                                           max_bpl);
927
928                         if (info->num_planes == 1) {
929                                 /* Single plane formats */
930                                 plane_fmt[0].sizeimage =
931                                                 plane_fmt[0].bytesperline *
932                                                 pix_mp->height;
933                         } else {
934                                 /* Multi plane formats */
935                                 plane_fmt[0].sizeimage =
936                                         DIV_ROUND_UP(plane_fmt[0].bytesperline *
937                                                      pix_mp->height *
938                                                      info->bpp, 8);
939                         }
940                 } else {
941                         /* Handling non-contiguous data with mplanes */
942                         hsub = info->hsub;
943                         vsub = info->vsub;
944                         for (i = 0; i < info->num_planes; i++) {
945                                 plane_width = pix_mp->width / (i ? hsub : 1);
946                                 plane_height = pix_mp->height / (i ? vsub : 1);
947                                 min_bpl = (plane_width * info->bpl_factor *
948                                            padding_factor_nume * bpl_nume) /
949                                            (padding_factor_deno * bpl_deno);
950                                 min_bpl = roundup(min_bpl, dma->align);
951                                 bpl = rounddown(plane_fmt[i].bytesperline,
952                                                 dma->align);
953                                 plane_fmt[i].bytesperline =
954                                                 clamp(bpl, min_bpl, max_bpl);
955                                 plane_fmt[i].sizeimage =
956                                                 plane_fmt[i].bytesperline *
957                                                 plane_height;
958                         }
959                 }
960         } else {
961                 struct v4l2_pix_format *pix;
962
963                 pix = &format->fmt.pix;
964                 pix->field = dma->format.fmt.pix.field;
965                 width = rounddown(pix->width * info->bpl_factor, align);
966                 pix->width = clamp(width, min_width, max_width) /
967                              info->bpl_factor;
968                 pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
969                                     XVIP_DMA_MAX_HEIGHT);
970
971                 min_bpl = (pix->width * info->bpl_factor *
972                           padding_factor_nume * bpl_nume) /
973                           (padding_factor_deno * bpl_deno);
974                 min_bpl = roundup(min_bpl, dma->align);
975                 max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
976                 bpl = rounddown(pix->bytesperline, dma->align);
977                 pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
978                 pix->sizeimage = pix->width * pix->height * info->bpp / 8;
979         }
980
981         if (fmtinfo)
982                 *fmtinfo = info;
983 }
984
985 static int
986 xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
987 {
988         struct v4l2_fh *vfh = file->private_data;
989         struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
990
991         __xvip_dma_try_format(dma, format, NULL);
992         return 0;
993 }
994
995 static int
996 xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
997 {
998         struct v4l2_fh *vfh = file->private_data;
999         struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
1000         const struct xvip_video_format *info;
1001
1002         __xvip_dma_try_format(dma, format, &info);
1003
1004         if (vb2_is_busy(&dma->queue))
1005                 return -EBUSY;
1006
1007         if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
1008                 dma->format.fmt.pix_mp = format->fmt.pix_mp;
1009         else
1010                 dma->format.fmt.pix = format->fmt.pix;
1011
1012         dma->fmtinfo = info;
1013
1014         return 0;
1015 }
1016
1017 static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
1018         .vidioc_querycap                = xvip_dma_querycap,
1019         .vidioc_enum_fmt_vid_cap        = xvip_dma_enum_format,
1020         .vidioc_enum_fmt_vid_cap_mplane = xvip_dma_enum_format,
1021         .vidioc_enum_fmt_vid_out        = xvip_dma_enum_format,
1022         .vidioc_enum_fmt_vid_out_mplane = xvip_dma_enum_format,
1023         .vidioc_g_fmt_vid_cap           = xvip_dma_get_format,
1024         .vidioc_g_fmt_vid_cap_mplane    = xvip_dma_get_format,
1025         .vidioc_g_fmt_vid_out           = xvip_dma_get_format,
1026         .vidioc_g_fmt_vid_out_mplane    = xvip_dma_get_format,
1027         .vidioc_s_fmt_vid_cap           = xvip_dma_set_format,
1028         .vidioc_s_fmt_vid_cap_mplane    = xvip_dma_set_format,
1029         .vidioc_s_fmt_vid_out           = xvip_dma_set_format,
1030         .vidioc_s_fmt_vid_out_mplane    = xvip_dma_set_format,
1031         .vidioc_try_fmt_vid_cap         = xvip_dma_try_format,
1032         .vidioc_try_fmt_vid_cap_mplane  = xvip_dma_try_format,
1033         .vidioc_try_fmt_vid_out         = xvip_dma_try_format,
1034         .vidioc_try_fmt_vid_out_mplane  = xvip_dma_try_format,
1035         .vidioc_reqbufs                 = vb2_ioctl_reqbufs,
1036         .vidioc_querybuf                = vb2_ioctl_querybuf,
1037         .vidioc_qbuf                    = vb2_ioctl_qbuf,
1038         .vidioc_dqbuf                   = vb2_ioctl_dqbuf,
1039         .vidioc_create_bufs             = vb2_ioctl_create_bufs,
1040         .vidioc_expbuf                  = vb2_ioctl_expbuf,
1041         .vidioc_streamon                = vb2_ioctl_streamon,
1042         .vidioc_streamoff               = vb2_ioctl_streamoff,
1043 };
1044
1045 /* -----------------------------------------------------------------------------
1046  * V4L2 file operations
1047  */
1048
1049 static const struct v4l2_file_operations xvip_dma_fops = {
1050         .owner          = THIS_MODULE,
1051         .unlocked_ioctl = video_ioctl2,
1052         .open           = v4l2_fh_open,
1053         .release        = vb2_fop_release,
1054         .poll           = vb2_fop_poll,
1055         .mmap           = vb2_fop_mmap,
1056 };
1057
1058 /* -----------------------------------------------------------------------------
1059  * Xilinx Video DMA Core
1060  */
1061
1062 int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
1063                   enum v4l2_buf_type type, unsigned int port)
1064 {
1065         char name[16];
1066         int ret;
1067         u32 i, hsub, vsub, width, height;
1068
1069         dma->xdev = xdev;
1070         dma->port = port;
1071         mutex_init(&dma->lock);
1072         mutex_init(&dma->pipe.lock);
1073         INIT_LIST_HEAD(&dma->queued_bufs);
1074         spin_lock_init(&dma->queued_lock);
1075
1076         dma->fmtinfo = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
1077         dma->format.type = type;
1078
1079         if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
1080                 struct v4l2_pix_format_mplane *pix_mp;
1081
1082                 pix_mp = &dma->format.fmt.pix_mp;
1083                 pix_mp->pixelformat = dma->fmtinfo->fourcc;
1084                 pix_mp->colorspace = V4L2_COLORSPACE_SRGB;
1085                 pix_mp->field = V4L2_FIELD_NONE;
1086                 pix_mp->width = XVIP_DMA_DEF_WIDTH;
1087
1088                 /* Handling contiguous data with mplanes */
1089                 if (dma->fmtinfo->buffers == 1) {
1090                         pix_mp->plane_fmt[0].bytesperline =
1091                                 pix_mp->width * dma->fmtinfo->bpl_factor;
1092                         pix_mp->plane_fmt[0].sizeimage =
1093                                         pix_mp->width * pix_mp->height *
1094                                         dma->fmtinfo->bpp / 8;
1095                 } else {
1096                     /* Handling non-contiguous data with mplanes */
1097                         hsub = dma->fmtinfo->hsub;
1098                         vsub = dma->fmtinfo->vsub;
1099                         for (i = 0; i < dma->fmtinfo->buffers; i++) {
1100                                 width = pix_mp->width / (i ? hsub : 1);
1101                                 height = pix_mp->height / (i ? vsub : 1);
1102                                 pix_mp->plane_fmt[i].bytesperline =
1103                                         width * dma->fmtinfo->bpl_factor;
1104                                 pix_mp->plane_fmt[i].sizeimage = width * height;
1105                         }
1106                 }
1107         } else {
1108                 struct v4l2_pix_format *pix;
1109
1110                 pix = &dma->format.fmt.pix;
1111                 pix->pixelformat = dma->fmtinfo->fourcc;
1112                 pix->colorspace = V4L2_COLORSPACE_SRGB;
1113                 pix->field = V4L2_FIELD_NONE;
1114                 pix->width = XVIP_DMA_DEF_WIDTH;
1115                 pix->height = XVIP_DMA_DEF_HEIGHT;
1116                 pix->bytesperline = pix->width * dma->fmtinfo->bpl_factor;
1117                 pix->sizeimage =
1118                         pix->width * pix->height * dma->fmtinfo->bpp / 8;
1119         }
1120
1121         /* Initialize the media entity... */
1122         if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1123             type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1124                 dma->pad.flags = MEDIA_PAD_FL_SINK;
1125         else
1126                 dma->pad.flags = MEDIA_PAD_FL_SOURCE;
1127
1128         ret = media_entity_pads_init(&dma->video.entity, 1, &dma->pad);
1129         if (ret < 0)
1130                 goto error;
1131
1132         /* ... and the video node... */
1133         dma->video.fops = &xvip_dma_fops;
1134         dma->video.v4l2_dev = &xdev->v4l2_dev;
1135         dma->video.queue = &dma->queue;
1136         snprintf(dma->video.name, sizeof(dma->video.name), "%s %s %u",
1137                  xdev->dev->of_node->name,
1138                  (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1139                   type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1140                                         ? "output" : "input",
1141                  port);
1142
1143         dma->video.vfl_type = VFL_TYPE_GRABBER;
1144         if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1145             type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1146                 dma->video.vfl_dir = VFL_DIR_RX;
1147         else
1148                 dma->video.vfl_dir = VFL_DIR_TX;
1149
1150         dma->video.release = video_device_release_empty;
1151         dma->video.ioctl_ops = &xvip_dma_ioctl_ops;
1152         dma->video.lock = &dma->lock;
1153
1154         video_set_drvdata(&dma->video, dma);
1155
1156         /* ... and the buffers queue... */
1157         /* Don't enable VB2_READ and VB2_WRITE, as using the read() and write()
1158          * V4L2 APIs would be inefficient. Testing on the command line with a
1159          * 'cat /dev/video?' thus won't be possible, but given that the driver
1160          * anyway requires a test tool to setup the pipeline before any video
1161          * stream can be started, requiring a specific V4L2 test tool as well
1162          * instead of 'cat' isn't really a drawback.
1163          */
1164         dma->queue.type = type;
1165         dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
1166         dma->queue.lock = &dma->lock;
1167         dma->queue.drv_priv = dma;
1168         dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer);
1169         dma->queue.ops = &xvip_dma_queue_qops;
1170         dma->queue.mem_ops = &vb2_dma_contig_memops;
1171         dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
1172                                    | V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
1173         dma->queue.dev = dma->xdev->dev;
1174         ret = vb2_queue_init(&dma->queue);
1175         if (ret < 0) {
1176                 dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n");
1177                 goto error;
1178         }
1179
1180         /* ... and the DMA channel. */
1181         snprintf(name, sizeof(name), "port%u", port);
1182         dma->dma = dma_request_chan(dma->xdev->dev, name);
1183         if (IS_ERR(dma->dma)) {
1184                 ret = PTR_ERR(dma->dma);
1185                 if (ret != -EPROBE_DEFER)
1186                         dev_err(dma->xdev->dev,
1187                                 "No Video DMA channel found");
1188                 goto error;
1189         }
1190
1191         dma->align = 1 << dma->dma->device->copy_align;
1192
1193         ret = video_register_device(&dma->video, VFL_TYPE_GRABBER, -1);
1194         if (ret < 0) {
1195                 dev_err(dma->xdev->dev, "failed to register video device\n");
1196                 goto error;
1197         }
1198
1199         return 0;
1200
1201 error:
1202         xvip_dma_cleanup(dma);
1203         return ret;
1204 }
1205
1206 void xvip_dma_cleanup(struct xvip_dma *dma)
1207 {
1208         if (video_is_registered(&dma->video))
1209                 video_unregister_device(&dma->video);
1210
1211         if (!IS_ERR(dma->dma))
1212                 dma_release_channel(dma->dma);
1213
1214         media_entity_cleanup(&dma->video.entity);
1215
1216         mutex_destroy(&dma->lock);
1217         mutex_destroy(&dma->pipe.lock);
1218 }