]> rtime.felk.cvut.cz Git - zynq/linux.git/blob - drivers/media/platform/xilinx/xilinx-m2m.c
10ca904f9c19d6bcfaa9a3c13d405c62be3491ed
[zynq/linux.git] / drivers / media / platform / xilinx / xilinx-m2m.c
1 //SPDX-License-Identifier: GPL-2.0
2 /*
3  * Xilinx V4L2 mem2mem driver
4  *
5  * Copyright (C) 2017-2018 Xilinx, Inc.
6  *
7  * Author: Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
8  */
9
10 #include <drm/drm_fourcc.h>
11 #include <linux/delay.h>
12 #include <linux/dma/xilinx_frmbuf.h>
13 #include <linux/lcm.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/of_graph.h>
18 #include <linux/platform_device.h>
19 #include <linux/slab.h>
20
21 #include <media/v4l2-async.h>
22 #include <media/v4l2-common.h>
23 #include <media/v4l2-device.h>
24 #include <media/v4l2-fwnode.h>
25 #include <media/v4l2-ioctl.h>
26 #include <media/v4l2-mem2mem.h>
27 #include <media/videobuf2-dma-contig.h>
28
29 #include "xilinx-vip.h"
30
31 #define XVIP_M2M_NAME           "xilinx-mem2mem"
32 #define XVIP_M2M_DEFAULT_FMT    V4L2_PIX_FMT_RGB24
33
34 /* Minimum and maximum widths are expressed in bytes */
35 #define XVIP_M2M_MIN_WIDTH      1U
36 #define XVIP_M2M_MAX_WIDTH      65535U
37 #define XVIP_M2M_MIN_HEIGHT     1U
38 #define XVIP_M2M_MAX_HEIGHT     8191U
39
40 #define XVIP_M2M_DEF_WIDTH      1920
41 #define XVIP_M2M_DEF_HEIGHT     1080
42
43 #define XVIP_M2M_PAD_SINK       1
44 #define XVIP_M2M_PAD_SOURCE     0
45
46 /**
47  * struct xvip_graph_entity - Entity in the video graph
48  * @list: list entry in a graph entities list
49  * @node: the entity's DT node
50  * @entity: media entity, from the corresponding V4L2 subdev
51  * @asd: subdev asynchronous registration information
52  * @subdev: V4L2 subdev
53  * @streaming: status of the V4L2 subdev if streaming or not
54  */
55 struct xvip_graph_entity {
56         struct list_head list;
57         struct device_node *node;
58         struct media_entity *entity;
59
60         struct v4l2_async_subdev asd;
61         struct v4l2_subdev *subdev;
62         bool streaming;
63 };
64
65 /**
66  * struct xvip_pipeline - Xilinx Video IP pipeline structure
67  * @pipe: media pipeline
68  * @lock: protects the pipeline @stream_count
69  * @use_count: number of DMA engines using the pipeline
70  * @stream_count: number of DMA engines currently streaming
71  * @num_dmas: number of DMA engines in the pipeline
72  * @xdev: Composite device the pipe belongs to
73  */
74 struct xvip_pipeline {
75         struct media_pipeline pipe;
76
77         /* protects the pipeline @stream_count */
78         struct mutex lock;
79         unsigned int use_count;
80         unsigned int stream_count;
81
82         unsigned int num_dmas;
83         struct xvip_m2m_dev *xdev;
84 };
85
86 struct xventity_list {
87         struct list_head list;
88         struct media_entity *entity;
89 };
90
91 /**
92  * struct xvip_m2m_dev - Xilinx Video mem2mem device structure
93  * @v4l2_dev: V4L2 device
94  * @dev: (OF) device
95  * @media_dev: media device
96  * @notifier: V4L2 asynchronous subdevs notifier
97  * @entities: entities in the graph as a list of xvip_graph_entity
98  * @num_subdevs: number of subdevs in the pipeline
99  * @lock: This is to protect mem2mem context structure data
100  * @queued_lock: This is to protect video buffer information
101  * @dma: Video DMA channels
102  * @m2m_dev: V4L2 mem2mem device structure
103  * @v4l2_caps: V4L2 capabilities of the whole device
104  */
105 struct xvip_m2m_dev {
106         struct v4l2_device v4l2_dev;
107         struct device *dev;
108
109         struct media_device media_dev;
110         struct v4l2_async_notifier notifier;
111         struct list_head entities;
112         unsigned int num_subdevs;
113
114         /* Protects to m2m context data */
115         struct mutex lock;
116
117         /* Protects vb2_v4l2_buffer data */
118         spinlock_t queued_lock;
119         struct xvip_m2m_dma *dma;
120         struct v4l2_m2m_dev *m2m_dev;
121         u32 v4l2_caps;
122 };
123
124 static inline struct xvip_pipeline *to_xvip_pipeline(struct media_entity *e)
125 {
126         return container_of(e->pipe, struct xvip_pipeline, pipe);
127 }
128
129 /**
130  * struct xvip_m2m_dma - Video DMA channel
131  * @video: V4L2 video device associated with the DMA channel
132  * @xdev: composite mem2mem device the DMA channels belongs to
133  * @chan_tx: DMA engine channel for MEM2DEV transfer
134  * @chan_rx: DMA engine channel for DEV2MEM transfer
135  * @outfmt: active V4L2 OUTPUT port pixel format
136  * @capfmt: active V4L2 CAPTURE port pixel format
137  * @r: crop rectangle parameters
138  * @outinfo: format information corresponding to the active @outfmt
139  * @capinfo: format information corresponding to the active @capfmt
140  * @align: transfer alignment required by the DMA channel (in bytes)
141  * @crop: boolean flag to indicate if crop is requested
142  * @pads: media pads for the video M2M device entity
143  * @pipe: pipeline belonging to the DMA channel
144  */
145 struct xvip_m2m_dma {
146         struct video_device video;
147         struct xvip_m2m_dev *xdev;
148         struct dma_chan *chan_tx;
149         struct dma_chan *chan_rx;
150         struct v4l2_format outfmt;
151         struct v4l2_format capfmt;
152         struct v4l2_rect r;
153         const struct xvip_video_format *outinfo;
154         const struct xvip_video_format *capinfo;
155         u32 align;
156         bool crop;
157
158         struct media_pad pads[2];
159         struct xvip_pipeline pipe;
160 };
161
162 /**
163  * struct xvip_m2m_ctx - VIPP mem2mem context
164  * @fh: V4L2 file handler
165  * @xdev: composite mem2mem device the DMA channels belongs to
166  * @xt: dma interleaved template for dma configuration
167  * @sgl: data chunk structure for dma_interleaved_template
168  */
169 struct xvip_m2m_ctx {
170         struct v4l2_fh fh;
171         struct xvip_m2m_dev *xdev;
172         struct dma_interleaved_template xt;
173         struct data_chunk sgl[1];
174 };
175
176 static inline struct xvip_m2m_ctx *file2ctx(struct file *file)
177 {
178         return container_of(file->private_data, struct xvip_m2m_ctx, fh);
179 }
180
181 static struct v4l2_subdev *
182 xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
183 {
184         struct media_pad *remote;
185
186         remote = media_entity_remote_pad(local);
187         if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
188                 return NULL;
189
190         if (pad)
191                 *pad = remote->index;
192
193         return media_entity_to_v4l2_subdev(remote->entity);
194 }
195
196 static int xvip_dma_verify_format(struct xvip_m2m_dma *dma)
197 {
198         struct v4l2_subdev_format fmt;
199         struct v4l2_subdev *subdev;
200         int ret;
201         int width, height;
202
203         subdev = xvip_dma_remote_subdev(&dma->pads[XVIP_PAD_SOURCE], &fmt.pad);
204         if (!subdev)
205                 return -EPIPE;
206
207         fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
208         ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
209         if (ret < 0)
210                 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
211
212         if (dma->outinfo->code != fmt.format.code)
213                 return -EINVAL;
214
215         if (V4L2_TYPE_IS_MULTIPLANAR(dma->outfmt.type)) {
216                 width = dma->outfmt.fmt.pix_mp.width;
217                 height = dma->outfmt.fmt.pix_mp.height;
218         } else {
219                 width = dma->outfmt.fmt.pix.width;
220                 height = dma->outfmt.fmt.pix.height;
221         }
222
223         if (width != fmt.format.width || height != fmt.format.height)
224                 return -EINVAL;
225
226         return 0;
227 }
228
229 #define to_xvip_dma(vdev)       container_of(vdev, struct xvip_m2m_dma, video)
230 /* -----------------------------------------------------------------------------
231  * Pipeline Stream Management
232  */
233
234 /**
235  * xvip_subdev_set_streaming - Find and update streaming status of subdev
236  * @xdev: Composite video device
237  * @subdev: V4L2 sub-device
238  * @enable: enable/disable streaming status
239  *
240  * Walk the xvip graph entities list and find if subdev is present. Returns
241  * streaming status of subdev and update the status as requested
242  *
243  * Return: streaming status (true or false) if successful or warn_on if subdev
244  * is not present and return false
245  */
246 static bool xvip_subdev_set_streaming(struct xvip_m2m_dev *xdev,
247                                       struct v4l2_subdev *subdev, bool enable)
248 {
249         struct xvip_graph_entity *entity;
250
251         list_for_each_entry(entity, &xdev->entities, list)
252                 if (entity->node == subdev->dev->of_node) {
253                         bool status = entity->streaming;
254
255                         entity->streaming = enable;
256                         return status;
257                 }
258
259         WARN(1, "Should never get here\n");
260         return false;
261 }
262
263 static int xvip_entity_start_stop(struct xvip_m2m_dev *xdev,
264                                   struct media_entity *entity, bool start)
265 {
266         struct v4l2_subdev *subdev;
267         bool is_streaming;
268         int ret = 0;
269
270         dev_dbg(xdev->dev, "%s entity %s\n",
271                 start ? "Starting" : "Stopping", entity->name);
272         subdev = media_entity_to_v4l2_subdev(entity);
273
274         /* This is to maintain list of stream on/off devices */
275         is_streaming = xvip_subdev_set_streaming(xdev, subdev, start);
276
277         /*
278          * start or stop the subdev only once in case if they are
279          * shared between sub-graphs
280          */
281         if (start && !is_streaming) {
282                 /* power-on subdevice */
283                 ret = v4l2_subdev_call(subdev, core, s_power, 1);
284                 if (ret < 0 && ret != -ENOIOCTLCMD) {
285                         dev_err(xdev->dev,
286                                 "s_power on failed on subdev\n");
287                         xvip_subdev_set_streaming(xdev, subdev, 0);
288                         return ret;
289                 }
290
291                 /* stream-on subdevice */
292                 ret = v4l2_subdev_call(subdev, video, s_stream, 1);
293                 if (ret < 0 && ret != -ENOIOCTLCMD) {
294                         dev_err(xdev->dev,
295                                 "s_stream on failed on subdev\n");
296                         v4l2_subdev_call(subdev, core, s_power, 0);
297                         xvip_subdev_set_streaming(xdev, subdev, 0);
298                 }
299         } else if (!start && is_streaming) {
300                 /* stream-off subdevice */
301                 ret = v4l2_subdev_call(subdev, video, s_stream, 0);
302                 if (ret < 0 && ret != -ENOIOCTLCMD) {
303                         dev_err(xdev->dev,
304                                 "s_stream off failed on subdev\n");
305                         xvip_subdev_set_streaming(xdev, subdev, 1);
306                 }
307
308                 /* power-off subdevice */
309                 ret = v4l2_subdev_call(subdev, core, s_power, 0);
310                 if (ret < 0 && ret != -ENOIOCTLCMD)
311                         dev_err(xdev->dev,
312                                 "s_power off failed on subdev\n");
313         }
314
315         return ret;
316 }
317
318 /**
319  * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
320  * @xdev: Composite video device
321  * @dma: xvip dma
322  * @start: Start (when true) or stop (when false) the pipeline
323  *
324  * Walk the entities chain starting @dma and start or stop all of them
325  *
326  * Return: 0 if successful, or the return value of the failed video::s_stream
327  * operation otherwise.
328  */
329 static int xvip_pipeline_start_stop(struct xvip_m2m_dev *xdev,
330                                     struct xvip_m2m_dma *dma, bool start)
331 {
332         struct media_graph graph;
333         struct media_entity *entity = &dma->video.entity;
334         struct media_device *mdev = entity->graph_obj.mdev;
335         struct xventity_list *temp, *_temp;
336         LIST_HEAD(ent_list);
337         int ret = 0;
338
339         mutex_lock(&mdev->graph_mutex);
340
341         /* Walk the graph to locate the subdev nodes */
342         ret = media_graph_walk_init(&graph, mdev);
343         if (ret)
344                 goto error;
345
346         media_graph_walk_start(&graph, entity);
347
348         /* get the list of entities */
349         while ((entity = media_graph_walk_next(&graph))) {
350                 struct xventity_list *ele;
351
352                 /* We want to stream on/off only subdevs */
353                 if (!is_media_entity_v4l2_subdev(entity))
354                         continue;
355
356                 /* Maintain the pipeline sequence in a list */
357                 ele = kzalloc(sizeof(*ele), GFP_KERNEL);
358                 if (!ele) {
359                         ret = -ENOMEM;
360                         goto error;
361                 }
362
363                 ele->entity = entity;
364                 list_add(&ele->list, &ent_list);
365         }
366
367         if (start) {
368                 list_for_each_entry_safe(temp, _temp, &ent_list, list) {
369                         /* Enable all subdevs from sink to source */
370                         ret = xvip_entity_start_stop(xdev, temp->entity, start);
371                         if (ret < 0) {
372                                 dev_err(xdev->dev, "ret = %d for entity %s\n",
373                                         ret, temp->entity->name);
374                                 break;
375                         }
376                 }
377         } else {
378                 list_for_each_entry_safe_reverse(temp, _temp, &ent_list, list)
379                         /* Enable all subdevs from source to sink */
380                         xvip_entity_start_stop(xdev, temp->entity, start);
381         }
382
383         list_for_each_entry_safe(temp, _temp, &ent_list, list) {
384                 list_del(&temp->list);
385                 kfree(temp);
386         }
387
388 error:
389         mutex_unlock(&mdev->graph_mutex);
390         media_graph_walk_cleanup(&graph);
391         return ret;
392 }
393
394 /**
395  * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
396  * @pipe: The pipeline
397  * @on: Turn the stream on when true or off when false
398  *
399  * The pipeline is shared between all DMA engines connect at its input and
400  * output. While the stream state of DMA engines can be controlled
401  * independently, pipelines have a shared stream state that enable or disable
402  * all entities in the pipeline. For this reason the pipeline uses a streaming
403  * counter that tracks the number of DMA engines that have requested the stream
404  * to be enabled. This will walk the graph starting from each DMA and enable or
405  * disable the entities in the path.
406  *
407  * When called with the @on argument set to true, this function will increment
408  * the pipeline streaming count. If the streaming count reaches the number of
409  * DMA engines in the pipeline it will enable all entities that belong to the
410  * pipeline.
411  *
412  * Similarly, when called with the @on argument set to false, this function will
413  * decrement the pipeline streaming count and disable all entities in the
414  * pipeline when the streaming count reaches zero.
415  *
416  * Return: 0 if successful, or the return value of the failed video::s_stream
417  * operation otherwise. Stopping the pipeline never fails. The pipeline state is
418  * not updated when the operation fails.
419  */
420 static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
421 {
422         struct xvip_m2m_dev *xdev;
423         struct xvip_m2m_dma *dma;
424         int ret = 0;
425
426         mutex_lock(&pipe->lock);
427         xdev = pipe->xdev;
428         dma = xdev->dma;
429
430         if (on) {
431                 ret = xvip_pipeline_start_stop(xdev, dma, true);
432                 if (ret < 0)
433                         goto done;
434                 pipe->stream_count++;
435         } else {
436                 if (--pipe->stream_count == 0)
437                         xvip_pipeline_start_stop(xdev, dma, false);
438         }
439
440 done:
441         mutex_unlock(&pipe->lock);
442         return ret;
443 }
444
445 static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
446                                   struct xvip_m2m_dma *start)
447 {
448         struct media_graph graph;
449         struct media_entity *entity = &start->video.entity;
450         struct media_device *mdev = entity->graph_obj.mdev;
451         unsigned int num_inputs = 0;
452         unsigned int num_outputs = 0;
453         int ret;
454
455         mutex_lock(&mdev->graph_mutex);
456
457         /* Walk the graph to locate the video nodes. */
458         ret = media_graph_walk_init(&graph, mdev);
459         if (ret) {
460                 mutex_unlock(&mdev->graph_mutex);
461                 return ret;
462         }
463
464         media_graph_walk_start(&graph, entity);
465
466         while ((entity = media_graph_walk_next(&graph))) {
467                 struct xvip_m2m_dma *dma;
468
469                 if (entity->function != MEDIA_ENT_F_IO_V4L)
470                         continue;
471
472                 dma = to_xvip_dma(media_entity_to_video_device(entity));
473
474                 num_outputs++;
475                 num_inputs++;
476         }
477
478         mutex_unlock(&mdev->graph_mutex);
479
480         media_graph_walk_cleanup(&graph);
481
482         /* We need at least one DMA to proceed */
483         if (num_outputs == 0 && num_inputs == 0)
484                 return -EPIPE;
485
486         pipe->num_dmas = num_inputs + num_outputs;
487         pipe->xdev = start->xdev;
488
489         return 0;
490 }
491
492 static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
493 {
494         pipe->num_dmas = 0;
495 }
496
497 /**
498  * xvip_pipeline_cleanup - Cleanup the pipeline after streaming
499  * @pipe: the pipeline
500  *
501  * Decrease the pipeline use count and clean it up if we were the last user.
502  */
503 static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
504 {
505         mutex_lock(&pipe->lock);
506
507         /* If we're the last user clean up the pipeline. */
508         if (--pipe->use_count == 0)
509                 __xvip_pipeline_cleanup(pipe);
510
511         mutex_unlock(&pipe->lock);
512 }
513
514 /**
515  * xvip_pipeline_prepare - Prepare the pipeline for streaming
516  * @pipe: the pipeline
517  * @dma: DMA engine at one end of the pipeline
518  *
519  * Validate the pipeline if no user exists yet, otherwise just increase the use
520  * count.
521  *
522  * Return: 0 if successful or -EPIPE if the pipeline is not valid.
523  */
524 static int xvip_pipeline_prepare(struct xvip_pipeline *pipe,
525                                  struct xvip_m2m_dma *dma)
526 {
527         int ret;
528
529         mutex_lock(&pipe->lock);
530
531         /* If we're the first user validate and initialize the pipeline. */
532         if (pipe->use_count == 0) {
533                 ret = xvip_pipeline_validate(pipe, dma);
534                 if (ret < 0) {
535                         __xvip_pipeline_cleanup(pipe);
536                         goto done;
537                 }
538         }
539
540         pipe->use_count++;
541         ret = 0;
542
543 done:
544         mutex_unlock(&pipe->lock);
545         return ret;
546 }
547
548 static void xvip_m2m_dma_callback_mem2dev(void *data)
549 {
550 }
551
552 static void xvip_m2m_dma_callback(void *data)
553 {
554         struct xvip_m2m_ctx *ctx = data;
555         struct xvip_m2m_dev *xdev = ctx->xdev;
556         struct vb2_v4l2_buffer *src_vb, *dst_vb;
557
558         spin_lock(&xdev->queued_lock);
559         src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
560         dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
561
562         dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
563         dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
564         dst_vb->flags |=
565                 src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
566         dst_vb->timecode = src_vb->timecode;
567
568         v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
569         v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
570         v4l2_m2m_job_finish(xdev->m2m_dev, ctx->fh.m2m_ctx);
571         spin_unlock(&xdev->queued_lock);
572 }
573
574 /*
575  * Queue operations
576  */
577
578 static int xvip_m2m_queue_setup(struct vb2_queue *vq,
579                                 u32 *nbuffers, u32 *nplanes,
580                                 u32 sizes[], struct device *alloc_devs[])
581 {
582         struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(vq);
583         struct xvip_m2m_dma *dma = ctx->xdev->dma;
584         struct v4l2_format *f;
585         const struct xvip_video_format *info;
586         u32 i;
587
588         if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
589                 f = &dma->outfmt;
590                 info = dma->outinfo;
591         } else {
592                 f = &dma->capfmt;
593                 info = dma->capinfo;
594         }
595
596         if (*nplanes) {
597                 if (*nplanes != f->fmt.pix_mp.num_planes)
598                         return -EINVAL;
599
600                 for (i = 0; i < *nplanes; i++) {
601                         if (sizes[i] < f->fmt.pix_mp.plane_fmt[i].sizeimage)
602                                 return -EINVAL;
603                 }
604         } else {
605                 *nplanes = info->buffers;
606                 for (i = 0; i < info->buffers; i++)
607                         sizes[i] = f->fmt.pix_mp.plane_fmt[i].sizeimage;
608         }
609
610         return 0;
611 }
612
613 static int xvip_m2m_buf_prepare(struct vb2_buffer *vb)
614 {
615         struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
616         struct xvip_m2m_dma *dma = ctx->xdev->dma;
617         struct v4l2_format *f;
618         const struct xvip_video_format *info;
619         u32 i;
620
621         if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
622                 f = &dma->outfmt;
623                 info = dma->outinfo;
624         } else {
625                 f = &dma->capfmt;
626                 info = dma->capinfo;
627         }
628
629         for (i = 0; i < info->buffers; i++) {
630                 if (vb2_plane_size(vb, i) <
631                         f->fmt.pix_mp.plane_fmt[i].sizeimage) {
632                         dev_err(ctx->xdev->dev,
633                                 "insufficient plane size (%u < %u)\n",
634                                 (u32)vb2_plane_size(vb, i),
635                                 f->fmt.pix_mp.plane_fmt[i].sizeimage);
636                         return -EINVAL;
637                 }
638
639                 vb2_set_plane_payload(vb, i,
640                                       f->fmt.pix_mp.plane_fmt[i].sizeimage);
641         }
642
643         return 0;
644 }
645
646 static void xvip_m2m_buf_queue(struct vb2_buffer *vb)
647 {
648         struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
649         struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
650
651         v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
652 }
653
654 static void xvip_m2m_stop_streaming(struct vb2_queue *q)
655 {
656         struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(q);
657         struct xvip_m2m_dma *dma = ctx->xdev->dma;
658         struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity);
659         struct vb2_v4l2_buffer *vbuf;
660
661         dma->crop = false;
662         if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
663                 dmaengine_terminate_sync(dma->chan_tx);
664         else
665                 dmaengine_terminate_sync(dma->chan_rx);
666
667         if (ctx->xdev->num_subdevs) {
668                 /* Stop the pipeline. */
669                 xvip_pipeline_set_stream(pipe, false);
670
671                 /* Cleanup the pipeline and mark it as being stopped. */
672                 xvip_pipeline_cleanup(pipe);
673                 media_pipeline_stop(&dma->video.entity);
674         }
675
676         for (;;) {
677                 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
678                         vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
679                 else
680                         vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
681
682                 if (!vbuf)
683                         return;
684
685                 spin_lock(&ctx->xdev->queued_lock);
686                 v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
687                 spin_unlock(&ctx->xdev->queued_lock);
688         }
689 }
690
691 static int xvip_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
692 {
693         struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(q);
694         struct xvip_m2m_dma *dma = ctx->xdev->dma;
695         struct xvip_m2m_dev *xdev = ctx->xdev;
696         struct xvip_pipeline *pipe;
697         int ret;
698
699         if (!xdev->num_subdevs)
700                 return 0;
701
702         pipe = dma->video.entity.pipe
703              ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
704
705         ret = media_pipeline_start(&dma->video.entity, &pipe->pipe);
706         if (ret < 0)
707                 goto error;
708
709         /* Verify that the configured format matches the output of the
710          * connected subdev.
711          */
712         ret = xvip_dma_verify_format(dma);
713         if (ret < 0)
714                 goto error_stop;
715
716         ret = xvip_pipeline_prepare(pipe, dma);
717         if (ret < 0)
718                 goto error_stop;
719
720         /* Start the pipeline. */
721         ret = xvip_pipeline_set_stream(pipe, true);
722         if (ret < 0)
723                 goto error_stop;
724
725         return 0;
726 error_stop:
727         media_pipeline_stop(&dma->video.entity);
728
729 error:
730         xvip_m2m_stop_streaming(q);
731
732         return ret;
733 }
734
735 static const struct vb2_ops m2m_vb2_ops = {
736         .queue_setup = xvip_m2m_queue_setup,
737         .buf_prepare = xvip_m2m_buf_prepare,
738         .buf_queue = xvip_m2m_buf_queue,
739         .start_streaming = xvip_m2m_start_streaming,
740         .stop_streaming = xvip_m2m_stop_streaming,
741         .wait_prepare = vb2_ops_wait_prepare,
742         .wait_finish = vb2_ops_wait_finish,
743 };
744
745 static int xvip_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
746                                struct vb2_queue *dst_vq)
747 {
748         struct xvip_m2m_ctx *ctx = priv;
749         int ret;
750
751         src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
752         src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
753         src_vq->drv_priv = ctx;
754         src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
755         src_vq->ops = &m2m_vb2_ops;
756         src_vq->mem_ops = &vb2_dma_contig_memops;
757         src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
758         src_vq->dev = ctx->xdev->v4l2_dev.dev;
759
760         ret = vb2_queue_init(src_vq);
761         if (ret)
762                 return ret;
763
764         dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
765         dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
766         dst_vq->drv_priv = ctx;
767         dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
768         dst_vq->ops = &m2m_vb2_ops;
769         dst_vq->mem_ops = &vb2_dma_contig_memops;
770         dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
771         dst_vq->dev = ctx->xdev->v4l2_dev.dev;
772
773         return vb2_queue_init(dst_vq);
774 }
775
776 /* -----------------------------------------------------------------------------
777  * V4L2 ioctls
778  */
779
780 static int
781 xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
782 {
783         cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
784         cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
785
786         strlcpy(cap->driver, XVIP_M2M_NAME, sizeof(cap->driver));
787         strlcpy(cap->card, XVIP_M2M_NAME, sizeof(cap->card));
788         strlcpy(cap->bus_info, XVIP_M2M_NAME, sizeof(cap->card));
789
790         return 0;
791 }
792
793 static int
794 xvip_m2m_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
795 {
796         struct xvip_m2m_ctx *ctx = file2ctx(file);
797         struct xvip_m2m_dma *dma = ctx->xdev->dma;
798         const struct xvip_video_format *fmtinfo;
799         const struct xvip_video_format *fmt;
800         struct v4l2_subdev *subdev;
801         struct v4l2_subdev_format v4l_fmt;
802         struct xvip_m2m_dev *xdev = ctx->xdev;
803         u32 i, fmt_cnt, *fmts;
804         int ret;
805
806         if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
807                 ret = xilinx_xdma_get_v4l2_vid_fmts(dma->chan_rx,
808                                                     &fmt_cnt, &fmts);
809         else
810                 ret = xilinx_xdma_get_v4l2_vid_fmts(dma->chan_tx,
811                                                     &fmt_cnt, &fmts);
812         if (ret)
813                 return ret;
814
815         if (f->index >= fmt_cnt)
816                 return -EINVAL;
817
818         if (!xdev->num_subdevs) {
819                 fmt = xvip_get_format_by_fourcc(fmts[f->index]);
820                 if (IS_ERR(fmt))
821                         return PTR_ERR(fmt);
822
823                 f->pixelformat = fmt->fourcc;
824                 strlcpy(f->description, fmt->description,
825                         sizeof(f->description));
826                 return 0;
827         }
828
829         if (f->index > 0)
830                 return -EINVAL;
831
832         /* Establish media pad format */
833         if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
834                 subdev = xvip_dma_remote_subdev(&dma->pads[XVIP_PAD_SOURCE],
835                                                 &v4l_fmt.pad);
836         else
837                 subdev = xvip_dma_remote_subdev(&dma->pads[XVIP_PAD_SINK],
838                                                 &v4l_fmt.pad);
839         if (!subdev)
840                 return -EPIPE;
841
842         v4l_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
843         ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &v4l_fmt);
844         if (ret < 0)
845                 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
846
847         for (i = 0; i < fmt_cnt; i++) {
848                 fmt = xvip_get_format_by_fourcc(fmts[i]);
849                 if (IS_ERR(fmt))
850                         return PTR_ERR(fmt);
851
852                 if (fmt->code == v4l_fmt.format.code)
853                         break;
854         }
855
856         if (i >= fmt_cnt)
857                 return -EINVAL;
858
859         fmtinfo = xvip_get_format_by_fourcc(fmts[i]);
860         f->pixelformat = fmtinfo->fourcc;
861         strlcpy(f->description, fmtinfo->description, sizeof(f->description));
862
863         return 0;
864 }
865
866 static int xvip_m2m_get_fmt(struct file *file, void *fh, struct v4l2_format *f)
867 {
868         struct xvip_m2m_ctx *ctx = file2ctx(file);
869         struct xvip_m2m_dma *dma = ctx->xdev->dma;
870         struct vb2_queue *vq;
871
872         vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
873         if (!vq)
874                 return -EINVAL;
875
876         if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
877                 f->fmt.pix_mp = dma->outfmt.fmt.pix_mp;
878         else
879                 f->fmt.pix_mp = dma->capfmt.fmt.pix_mp;
880
881         return 0;
882 }
883
884 static int __xvip_m2m_try_fmt(struct xvip_m2m_ctx *ctx, struct v4l2_format *f)
885 {
886         struct xvip_m2m_dma *dma = ctx->xdev->dma;
887         const struct xvip_video_format *info;
888         struct v4l2_pix_format_mplane *pix_mp;
889         struct v4l2_plane_pix_format *plane_fmt;
890         u32 align, min_width, max_width;
891         u32 bpl, min_bpl, max_bpl;
892         u32 padding_factor_nume, padding_factor_deno;
893         u32 bpl_nume, bpl_deno;
894         u32 i, plane_width, plane_height;
895         struct v4l2_subdev_format fmt;
896         struct v4l2_subdev *subdev;
897         struct xvip_m2m_dev *xdev = ctx->xdev;
898         int ret;
899
900         if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
901             f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
902                 return -EINVAL;
903
904         if (xdev->num_subdevs) {
905                 if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
906                         subdev = xvip_dma_remote_subdev
907                                 (&dma->pads[XVIP_PAD_SOURCE], &fmt.pad);
908                 else
909                         subdev = xvip_dma_remote_subdev
910                                 (&dma->pads[XVIP_PAD_SINK], &fmt.pad);
911
912                 if (!subdev)
913                         return -EPIPE;
914
915                 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
916                 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
917                 if (ret < 0)
918                         return -EINVAL;
919         }
920
921         pix_mp = &f->fmt.pix_mp;
922         plane_fmt = pix_mp->plane_fmt;
923         info = xvip_get_format_by_fourcc(f->fmt.pix_mp.pixelformat);
924         if (info) {
925                 if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
926                         dma->outinfo = info;
927                 else
928                         dma->capinfo = info;
929         } else {
930                 info = xvip_get_format_by_fourcc(XVIP_M2M_DEFAULT_FMT);
931         }
932
933         if (xdev->num_subdevs) {
934                 if (info->code != fmt.format.code ||
935                     fmt.format.width != pix_mp->width ||
936                     fmt.format.height != pix_mp->height) {
937                         dev_err(xdev->dev, "Failed to set format\n");
938                         dev_info(xdev->dev,
939                                  "Reqed Code = %d, Width = %d, Height = %d\n",
940                                  info->code, pix_mp->width, pix_mp->height);
941                         dev_info(xdev->dev,
942                                  "Subdev Code = %d, Width = %d, Height = %d",
943                                  fmt.format.code, fmt.format.width,
944                                  fmt.format.height);
945                         return -EINVAL;
946                 }
947         }
948
949         xvip_width_padding_factor(info->fourcc, &padding_factor_nume,
950                                   &padding_factor_deno);
951         xvip_bpl_scaling_factor(info->fourcc, &bpl_nume, &bpl_deno);
952
953         /*
954          * V4L2 specification suggests the driver corrects the format struct
955          * if any of the dimensions is unsupported
956          */
957         align = lcm(dma->align, info->bpp >> 3);
958         min_width = roundup(XVIP_M2M_MIN_WIDTH, align);
959         max_width = rounddown(XVIP_M2M_MAX_WIDTH, align);
960         pix_mp->width = clamp(pix_mp->width, min_width, max_width);
961         pix_mp->height = clamp(pix_mp->height, XVIP_M2M_MIN_HEIGHT,
962                                XVIP_M2M_MAX_HEIGHT);
963
964         /*
965          * Clamp the requested bytes per line value. If the maximum
966          * bytes per line value is zero, the module doesn't support
967          * user configurable line sizes. Override the requested value
968          * with the minimum in that case.
969          */
970         max_bpl = rounddown(XVIP_M2M_MAX_WIDTH, align);
971
972         if (info->buffers == 1) {
973                 /* Handling contiguous data with mplanes */
974                 min_bpl = (pix_mp->width * info->bpl_factor *
975                            padding_factor_nume * bpl_nume) /
976                            (padding_factor_deno * bpl_deno);
977                 min_bpl = roundup(min_bpl, align);
978                 bpl = roundup(plane_fmt[0].bytesperline, align);
979                 plane_fmt[0].bytesperline = clamp(bpl, min_bpl, max_bpl);
980
981                 if (info->num_planes == 1) {
982                         /* Single plane formats */
983                         plane_fmt[0].sizeimage = plane_fmt[0].bytesperline *
984                                                  pix_mp->height;
985                 } else {
986                         /* Multi plane formats in contiguous buffer*/
987                         plane_fmt[0].sizeimage =
988                                 DIV_ROUND_UP(plane_fmt[0].bytesperline *
989                                              pix_mp->height *
990                                              info->bpp, 8);
991                 }
992         } else {
993                 /* Handling non-contiguous data with mplanes */
994                 for (i = 0; i < info->num_planes; i++) {
995                         plane_width = pix_mp->width / (i ? info->hsub : 1);
996                         plane_height = pix_mp->height / (i ? info->vsub : 1);
997                         min_bpl = (plane_width * info->bpl_factor *
998                                    padding_factor_nume * bpl_nume) /
999                                    (padding_factor_deno * bpl_deno);
1000                         min_bpl = roundup(min_bpl, align);
1001                         bpl = rounddown(plane_fmt[i].bytesperline, align);
1002                         plane_fmt[i].bytesperline = clamp(bpl, min_bpl,
1003                                                           max_bpl);
1004                         plane_fmt[i].sizeimage = plane_fmt[i].bytesperline *
1005                                                  plane_height;
1006                 }
1007         }
1008
1009         return 0;
1010 }
1011
1012 static int xvip_m2m_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
1013 {
1014         struct xvip_m2m_ctx *ctx = file2ctx(file);
1015         int ret;
1016
1017         ret = __xvip_m2m_try_fmt(ctx, f);
1018         if (ret < 0)
1019                 return ret;
1020
1021         return 0;
1022 }
1023
1024 static int xvip_m2m_set_fmt(struct file *file, void *fh, struct v4l2_format *f)
1025 {
1026         struct xvip_m2m_ctx *ctx = file2ctx(file);
1027         struct xvip_m2m_dma *dma = ctx->xdev->dma;
1028         struct vb2_queue *vq;
1029         int ret;
1030
1031         vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
1032         if (!vq)
1033                 return -EINVAL;
1034
1035         if (vb2_is_busy(vq)) {
1036                 v4l2_err(&ctx->xdev->v4l2_dev, "%s queue busy\n", __func__);
1037                 return -EBUSY;
1038         }
1039
1040         ret = __xvip_m2m_try_fmt(ctx, f);
1041         if (ret < 0)
1042                 return ret;
1043
1044         if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
1045                 dma->outfmt.fmt.pix_mp = f->fmt.pix_mp;
1046         else
1047                 dma->capfmt.fmt.pix_mp = f->fmt.pix_mp;
1048
1049         return 0;
1050 }
1051
1052 static int
1053 xvip_m2m_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
1054 {
1055         struct xvip_m2m_ctx *ctx = file2ctx(file);
1056         struct xvip_m2m_dma *dma = ctx->xdev->dma;
1057         int ret = 0;
1058
1059         if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
1060             s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1061                 return -EINVAL;
1062
1063         switch (s->target) {
1064         case V4L2_SEL_TGT_COMPOSE:
1065                 ret = -ENOTTY;
1066                 break;
1067         case V4L2_SEL_TGT_CROP:
1068                 s->r.left = 0;
1069                 s->r.top = 0;
1070                 s->r.width = dma->r.width;
1071                 s->r.height = dma->r.height;
1072                 break;
1073         default:
1074                 ret = -EINVAL;
1075         }
1076
1077         return ret;
1078 }
1079
1080 static int
1081 xvip_m2m_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
1082 {
1083         struct xvip_m2m_ctx *ctx = file2ctx(file);
1084         struct xvip_m2m_dma *dma = ctx->xdev->dma;
1085         u32 min_width, max_width;
1086         int ret = 0;
1087
1088         if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
1089             s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1090                 return -EINVAL;
1091
1092         switch (s->target) {
1093         case V4L2_SEL_TGT_COMPOSE:
1094                 ret = -ENOTTY;
1095                 break;
1096         case V4L2_SEL_TGT_CROP:
1097                 if (s->r.width > dma->outfmt.fmt.pix_mp.width ||
1098                     s->r.height > dma->outfmt.fmt.pix_mp.height ||
1099                     s->r.top != 0 || s->r.left != 0)
1100                         return -EINVAL;
1101
1102                 dma->crop = true;
1103                 min_width = roundup(XVIP_M2M_MIN_WIDTH, dma->align);
1104                 max_width = rounddown(XVIP_M2M_MAX_WIDTH, dma->align);
1105                 dma->r.width = clamp(s->r.width, min_width, max_width);
1106                 dma->r.height = s->r.height;
1107                 break;
1108         default:
1109                 ret = -EINVAL;
1110         }
1111
1112         return ret;
1113 }
1114
1115 static const struct v4l2_ioctl_ops xvip_m2m_ioctl_ops = {
1116         .vidioc_querycap                = xvip_dma_querycap,
1117
1118         .vidioc_enum_fmt_vid_cap_mplane = xvip_m2m_enum_fmt,
1119         .vidioc_g_fmt_vid_cap_mplane    = xvip_m2m_get_fmt,
1120         .vidioc_try_fmt_vid_cap_mplane  = xvip_m2m_try_fmt,
1121         .vidioc_s_fmt_vid_cap_mplane    = xvip_m2m_set_fmt,
1122
1123         .vidioc_enum_fmt_vid_out_mplane = xvip_m2m_enum_fmt,
1124         .vidioc_g_fmt_vid_out_mplane    = xvip_m2m_get_fmt,
1125         .vidioc_try_fmt_vid_out_mplane  = xvip_m2m_try_fmt,
1126         .vidioc_s_fmt_vid_out_mplane    = xvip_m2m_set_fmt,
1127         .vidioc_s_selection             = xvip_m2m_s_selection,
1128         .vidioc_g_selection             = xvip_m2m_g_selection,
1129
1130         .vidioc_reqbufs                 = v4l2_m2m_ioctl_reqbufs,
1131         .vidioc_querybuf                = v4l2_m2m_ioctl_querybuf,
1132         .vidioc_qbuf                    = v4l2_m2m_ioctl_qbuf,
1133         .vidioc_dqbuf                   = v4l2_m2m_ioctl_dqbuf,
1134         .vidioc_prepare_buf             = v4l2_m2m_ioctl_prepare_buf,
1135         .vidioc_create_bufs             = v4l2_m2m_ioctl_create_bufs,
1136         .vidioc_expbuf                  = v4l2_m2m_ioctl_expbuf,
1137
1138         .vidioc_streamon                = v4l2_m2m_ioctl_streamon,
1139         .vidioc_streamoff               = v4l2_m2m_ioctl_streamoff,
1140 };
1141
1142 /*
1143  * File operations
1144  */
1145 static int xvip_m2m_open(struct file *file)
1146 {
1147         struct xvip_m2m_dev *xdev = video_drvdata(file);
1148         struct xvip_m2m_ctx *ctx = NULL;
1149         int ret;
1150
1151         ctx = devm_kzalloc(xdev->dev, sizeof(*ctx), GFP_KERNEL);
1152         if (!ctx)
1153                 return -ENOMEM;
1154
1155         v4l2_fh_init(&ctx->fh, video_devdata(file));
1156         file->private_data = &ctx->fh;
1157         ctx->xdev = xdev;
1158
1159         ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(xdev->m2m_dev, ctx,
1160                                             &xvip_m2m_queue_init);
1161         if (IS_ERR(ctx->fh.m2m_ctx)) {
1162                 ret = PTR_ERR(ctx->fh.m2m_ctx);
1163                 v4l2_fh_exit(&ctx->fh);
1164                 return ret;
1165         }
1166
1167         v4l2_fh_add(&ctx->fh);
1168         dev_info(xdev->dev, "Created instance %p, m2m_ctx: %p\n", ctx,
1169                  ctx->fh.m2m_ctx);
1170         return 0;
1171 }
1172
1173 static int xvip_m2m_release(struct file *file)
1174 {
1175         struct xvip_m2m_ctx *ctx = file->private_data;
1176
1177         v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
1178         return 0;
1179 }
1180
1181 static u32 xvip_m2m_poll(struct file *file,
1182                          struct poll_table_struct *wait)
1183 {
1184         struct xvip_m2m_ctx *ctx = file->private_data;
1185         int ret;
1186
1187         mutex_lock(&ctx->xdev->lock);
1188         ret = v4l2_m2m_poll(file, ctx->fh.m2m_ctx, wait);
1189         mutex_unlock(&ctx->xdev->lock);
1190
1191         return ret;
1192 }
1193
1194 static int xvip_m2m_mmap(struct file *file, struct vm_area_struct *vma)
1195 {
1196         struct xvip_m2m_ctx *ctx = file->private_data;
1197
1198         return v4l2_m2m_mmap(file, ctx->fh.m2m_ctx, vma);
1199 }
1200
1201 /*
1202  * mem2mem callbacks
1203  */
1204
1205 static int xvip_m2m_job_ready(void *priv)
1206 {
1207         struct xvip_m2m_ctx *ctx = priv;
1208
1209         if ((v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0) &&
1210             (v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) > 0))
1211                 return 1;
1212
1213         return 0;
1214 }
1215
1216 static void xvip_m2m_job_abort(void *priv)
1217 {
1218         struct xvip_m2m_ctx *ctx = priv;
1219
1220         /* Will cancel the transaction in the next interrupt handler */
1221         v4l2_m2m_job_finish(ctx->xdev->m2m_dev, ctx->fh.m2m_ctx);
1222 }
1223
1224 static void xvip_m2m_prep_submit_dev2mem_desc(struct xvip_m2m_ctx *ctx,
1225                                               struct vb2_v4l2_buffer *dst_buf)
1226 {
1227         struct xvip_m2m_dma *dma = ctx->xdev->dma;
1228         struct xvip_m2m_dev *xdev = ctx->xdev;
1229         struct dma_async_tx_descriptor *desc;
1230         dma_addr_t p_out;
1231         const struct xvip_video_format *info;
1232         struct v4l2_pix_format_mplane *pix_mp;
1233         u32 padding_factor_nume, padding_factor_deno;
1234         u32 bpl_nume, bpl_deno;
1235         u32 luma_size;
1236         u32 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
1237         enum operation_mode mode = DEFAULT;
1238
1239         p_out = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
1240
1241         if (!p_out) {
1242                 dev_err(xdev->dev,
1243                         "Acquiring kernel pointer to buffer failed\n");
1244                 return;
1245         }
1246
1247         ctx->xt.dir = DMA_DEV_TO_MEM;
1248         ctx->xt.src_sgl = false;
1249         ctx->xt.dst_sgl = true;
1250         ctx->xt.dst_start = p_out;
1251
1252         pix_mp = &dma->capfmt.fmt.pix_mp;
1253         info = dma->capinfo;
1254         xilinx_xdma_set_mode(dma->chan_rx, mode);
1255         xilinx_xdma_v4l2_config(dma->chan_rx, pix_mp->pixelformat);
1256         xvip_width_padding_factor(pix_mp->pixelformat, &padding_factor_nume,
1257                                   &padding_factor_deno);
1258         xvip_bpl_scaling_factor(pix_mp->pixelformat, &bpl_nume, &bpl_deno);
1259
1260         ctx->xt.frame_size = info->num_planes;
1261         ctx->sgl[0].size = (pix_mp->width * info->bpl_factor *
1262                             padding_factor_nume * bpl_nume) /
1263                             (padding_factor_deno * bpl_deno);
1264         ctx->sgl[0].icg = pix_mp->plane_fmt[0].bytesperline - ctx->sgl[0].size;
1265         ctx->xt.numf = pix_mp->height;
1266
1267         /*
1268          * dst_icg is the number of bytes to jump after last luma addr
1269          * and before first chroma addr
1270          */
1271         ctx->sgl[0].src_icg = 0;
1272
1273         if (info->buffers == 1) {
1274                 /* Handling contiguous data with mplanes */
1275                 ctx->sgl[0].dst_icg = 0;
1276         } else {
1277                 /* Handling non-contiguous data with mplanes */
1278                 if (info->buffers == 2) {
1279                         dma_addr_t chroma_cap =
1280                         vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 1);
1281                         luma_size = pix_mp->plane_fmt[0].bytesperline *
1282                                     ctx->xt.numf;
1283                         if (chroma_cap > p_out)
1284                                 ctx->sgl[0].dst_icg = chroma_cap - p_out -
1285                                                       luma_size;
1286                         }
1287         }
1288
1289         desc = dmaengine_prep_interleaved_dma(dma->chan_rx, &ctx->xt, flags);
1290         if (!desc) {
1291                 dev_err(xdev->dev, "Failed to prepare DMA rx transfer\n");
1292                 return;
1293         }
1294
1295         desc->callback = xvip_m2m_dma_callback;
1296         desc->callback_param = ctx;
1297         dmaengine_submit(desc);
1298         dma_async_issue_pending(dma->chan_rx);
1299 }
1300
1301 static void xvip_m2m_prep_submit_mem2dev_desc(struct xvip_m2m_ctx *ctx,
1302                                               struct vb2_v4l2_buffer *src_buf)
1303 {
1304         struct xvip_m2m_dma *dma = ctx->xdev->dma;
1305         struct xvip_m2m_dev *xdev = ctx->xdev;
1306         struct dma_async_tx_descriptor *desc;
1307         dma_addr_t p_in;
1308         const struct xvip_video_format *info;
1309         struct v4l2_pix_format_mplane *pix_mp;
1310         u32 padding_factor_nume, padding_factor_deno;
1311         u32 bpl_nume, bpl_deno;
1312         u32 luma_size;
1313         u32 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
1314         enum operation_mode mode = DEFAULT;
1315         u32 bpl, src_width, src_height;
1316
1317         p_in = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
1318
1319         if (!p_in) {
1320                 dev_err(xdev->dev,
1321                         "Acquiring kernel pointer to buffer failed\n");
1322                 return;
1323         }
1324
1325         ctx->xt.dir = DMA_MEM_TO_DEV;
1326         ctx->xt.src_sgl = true;
1327         ctx->xt.dst_sgl = false;
1328         ctx->xt.src_start = p_in;
1329
1330         pix_mp = &dma->outfmt.fmt.pix_mp;
1331         bpl = pix_mp->plane_fmt[0].bytesperline;
1332         if (dma->crop) {
1333                 src_width = dma->r.width;
1334                 src_height = dma->r.height;
1335         } else {
1336                 src_width = pix_mp->width;
1337                 src_height = pix_mp->height;
1338         }
1339
1340         info = dma->outinfo;
1341         xilinx_xdma_set_mode(dma->chan_tx, mode);
1342         xilinx_xdma_v4l2_config(dma->chan_tx, pix_mp->pixelformat);
1343         xvip_width_padding_factor(pix_mp->pixelformat, &padding_factor_nume,
1344                                   &padding_factor_deno);
1345         xvip_bpl_scaling_factor(pix_mp->pixelformat, &bpl_nume, &bpl_deno);
1346
1347         ctx->xt.frame_size = info->num_planes;
1348         ctx->sgl[0].size = (src_width * info->bpl_factor *
1349                             padding_factor_nume * bpl_nume) /
1350                             (padding_factor_deno * bpl_deno);
1351         ctx->sgl[0].icg = bpl - ctx->sgl[0].size;
1352         ctx->xt.numf = src_height;
1353
1354         /*
1355          * src_icg is the number of bytes to jump after last luma addr
1356          * and before first chroma addr
1357          */
1358         ctx->sgl[0].dst_icg = 0;
1359
1360         if (info->buffers == 1) {
1361                 /* Handling contiguous data with mplanes */
1362                 ctx->sgl[0].src_icg = 0;
1363                 if (dma->crop)
1364                         ctx->sgl[0].src_icg = bpl *
1365                                               (pix_mp->height - src_height);
1366         } else {
1367                 /* Handling non-contiguous data with mplanes */
1368                 if (info->buffers == 2) {
1369                         dma_addr_t chroma_out =
1370                         vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 1);
1371                         luma_size = bpl * ctx->xt.numf;
1372                         if (chroma_out > p_in)
1373                                 ctx->sgl[0].src_icg = chroma_out - p_in -
1374                                                       luma_size;
1375                         }
1376         }
1377
1378         desc = dmaengine_prep_interleaved_dma(dma->chan_tx, &ctx->xt, flags);
1379         if (!desc) {
1380                 dev_err(xdev->dev, "Failed to prepare DMA tx transfer\n");
1381                 return;
1382         }
1383
1384         desc->callback = xvip_m2m_dma_callback_mem2dev;
1385         desc->callback_param = ctx;
1386         dmaengine_submit(desc);
1387         dma_async_issue_pending(dma->chan_tx);
1388 }
1389
1390 /**
1391  * xvip_m2m_device_run - prepares and starts the device
1392  *
1393  * @priv: Instance private data
1394  *
1395  * This simulates all the immediate preparations required before starting
1396  * a device. This will be called by the framework when it decides to schedule
1397  * a particular instance.
1398  */
1399 static void xvip_m2m_device_run(void *priv)
1400 {
1401         struct xvip_m2m_ctx *ctx = priv;
1402         struct vb2_v4l2_buffer *src_buf, *dst_buf;
1403
1404         src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
1405         dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
1406
1407         /* Prepare and submit mem2dev transaction */
1408         xvip_m2m_prep_submit_mem2dev_desc(ctx, src_buf);
1409
1410         /* Prepare and submit dev2mem transaction */
1411         xvip_m2m_prep_submit_dev2mem_desc(ctx, dst_buf);
1412 }
1413
1414 static const struct v4l2_file_operations xvip_m2m_fops = {
1415         .owner          = THIS_MODULE,
1416         .open           = xvip_m2m_open,
1417         .release        = xvip_m2m_release,
1418         .poll           = xvip_m2m_poll,
1419         .unlocked_ioctl = video_ioctl2,
1420         .mmap           = xvip_m2m_mmap,
1421 };
1422
1423 static struct video_device xvip_m2m_videodev = {
1424         .name           = XVIP_M2M_NAME,
1425         .fops           = &xvip_m2m_fops,
1426         .ioctl_ops      = &xvip_m2m_ioctl_ops,
1427         .release        = video_device_release_empty,
1428         .vfl_dir        = VFL_DIR_M2M,
1429         .device_caps    = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING,
1430         .vfl_type       = VFL_TYPE_GRABBER,
1431 };
1432
1433 static const struct v4l2_m2m_ops xvip_m2m_ops = {
1434         .device_run     = xvip_m2m_device_run,
1435         .job_ready      = xvip_m2m_job_ready,
1436         .job_abort      = xvip_m2m_job_abort,
1437 };
1438
1439 static int xvip_m2m_dma_init(struct xvip_m2m_dma *dma)
1440 {
1441         struct xvip_m2m_dev *xdev;
1442         struct v4l2_pix_format_mplane *pix_mp;
1443         int ret;
1444
1445         xdev = dma->xdev;
1446         mutex_init(&xdev->lock);
1447         mutex_init(&dma->pipe.lock);
1448         spin_lock_init(&xdev->queued_lock);
1449
1450         /* Format info on capture port - NV12 is the default format */
1451         dma->capinfo = xvip_get_format_by_fourcc(XVIP_M2M_DEFAULT_FMT);
1452         pix_mp = &dma->capfmt.fmt.pix_mp;
1453         pix_mp->pixelformat = dma->capinfo->fourcc;
1454
1455         pix_mp->field = V4L2_FIELD_NONE;
1456         pix_mp->width = XVIP_M2M_DEF_WIDTH;
1457         pix_mp->height = XVIP_M2M_DEF_HEIGHT;
1458         pix_mp->plane_fmt[0].bytesperline = pix_mp->width *
1459                                             dma->capinfo->bpl_factor;
1460         pix_mp->plane_fmt[0].sizeimage =
1461                         DIV_ROUND_UP(pix_mp->plane_fmt[0].bytesperline *
1462                                      pix_mp->height * dma->capinfo->bpp, 8);
1463
1464         /* Format info on output port - NV12 is the default format */
1465         dma->outinfo = xvip_get_format_by_fourcc(XVIP_M2M_DEFAULT_FMT);
1466         pix_mp = &dma->capfmt.fmt.pix_mp;
1467         pix_mp->pixelformat = dma->outinfo->fourcc;
1468         pix_mp->field = V4L2_FIELD_NONE;
1469         pix_mp->width = XVIP_M2M_DEF_WIDTH;
1470         pix_mp->height = XVIP_M2M_DEF_HEIGHT;
1471         pix_mp->plane_fmt[0].bytesperline = pix_mp->width *
1472                                             dma->outinfo->bpl_factor;
1473         pix_mp->plane_fmt[0].sizeimage =
1474                         DIV_ROUND_UP(pix_mp->plane_fmt[0].bytesperline *
1475                                      pix_mp->height * dma->outinfo->bpp, 8);
1476
1477         /* DMA channels for mem2mem */
1478         dma->chan_tx = dma_request_chan(xdev->dev, "tx");
1479         if (IS_ERR(dma->chan_tx)) {
1480                 ret = PTR_ERR(dma->chan_tx);
1481                 if (ret != -EPROBE_DEFER)
1482                         dev_err(xdev->dev, "mem2mem DMA tx channel not found");
1483
1484                 return ret;
1485         }
1486
1487         dma->chan_rx = dma_request_chan(xdev->dev, "rx");
1488         if (IS_ERR(dma->chan_rx)) {
1489                 ret = PTR_ERR(dma->chan_rx);
1490                 if (ret != -EPROBE_DEFER)
1491                         dev_err(xdev->dev, "mem2mem DMA rx channel not found");
1492
1493                 goto tx;
1494         }
1495
1496         dma->align = BIT(dma->chan_tx->device->copy_align);
1497
1498         /* Video node */
1499         dma->video = xvip_m2m_videodev;
1500         dma->video.v4l2_dev = &xdev->v4l2_dev;
1501         dma->video.lock = &xdev->lock;
1502
1503         dma->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
1504         dma->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
1505
1506         ret = media_entity_pads_init(&dma->video.entity, 2, dma->pads);
1507         if (ret < 0)
1508                 goto error;
1509
1510         ret = video_register_device(&dma->video, VFL_TYPE_GRABBER, -1);
1511         if (ret < 0) {
1512                 dev_err(xdev->dev, "Failed to register mem2mem video device\n");
1513                 goto tx_rx;
1514         }
1515
1516         video_set_drvdata(&dma->video, dma->xdev);
1517         return 0;
1518
1519 tx_rx:
1520         dma_release_channel(dma->chan_rx);
1521 tx:
1522         dma_release_channel(dma->chan_tx);
1523 error:
1524         return ret;
1525 }
1526
1527 static void xvip_m2m_dma_deinit(struct xvip_m2m_dma *dma)
1528 {
1529         if (video_is_registered(&dma->video))
1530                 video_unregister_device(&dma->video);
1531
1532         mutex_destroy(&dma->pipe.lock);
1533         mutex_destroy(&dma->xdev->lock);
1534         dma_release_channel(dma->chan_tx);
1535         dma_release_channel(dma->chan_rx);
1536 }
1537
1538 static int xvip_m2m_dma_alloc_init(struct xvip_m2m_dev *xdev)
1539 {
1540         struct xvip_m2m_dma *dma = NULL;
1541         int ret;
1542
1543         dma = devm_kzalloc(xdev->dev, sizeof(*dma), GFP_KERNEL);
1544         if (!dma)
1545                 return -ENOMEM;
1546
1547         dma->xdev = xdev;
1548         xdev->dma = dma;
1549
1550         ret = xvip_m2m_dma_init(xdev->dma);
1551         if (ret) {
1552                 dev_err(xdev->dev, "DMA initialization failed\n");
1553                 return ret;
1554         }
1555
1556         xdev->v4l2_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
1557         return 0;
1558 }
1559
1560 /* -----------------------------------------------------------------------------
1561  * Platform Device Driver
1562  */
1563 static void xvip_composite_v4l2_cleanup(struct xvip_m2m_dev *xdev)
1564 {
1565         v4l2_device_unregister(&xdev->v4l2_dev);
1566         media_device_unregister(&xdev->media_dev);
1567         media_device_cleanup(&xdev->media_dev);
1568 }
1569
1570 static int xvip_composite_v4l2_init(struct xvip_m2m_dev *xdev)
1571 {
1572         int ret;
1573
1574         xdev->media_dev.dev = xdev->dev;
1575         strlcpy(xdev->media_dev.model, "Xilinx Videoi M2M Composite Device",
1576                 sizeof(xdev->media_dev.model));
1577         xdev->media_dev.hw_revision = 0;
1578
1579         media_device_init(&xdev->media_dev);
1580
1581         xdev->v4l2_dev.mdev = &xdev->media_dev;
1582         ret = v4l2_device_register(xdev->dev, &xdev->v4l2_dev);
1583         if (ret < 0) {
1584                 dev_err(xdev->dev, "V4L2 device registration failed (%d)\n",
1585                         ret);
1586                 media_device_cleanup(&xdev->media_dev);
1587                 return ret;
1588         }
1589
1590         return 0;
1591 }
1592
1593 static struct xvip_graph_entity *
1594 xvip_graph_find_entity(struct xvip_m2m_dev *xdev,
1595                        const struct device_node *node)
1596 {
1597         struct xvip_graph_entity *entity;
1598
1599         list_for_each_entry(entity, &xdev->entities, list) {
1600                 if (entity->node == node)
1601                         return entity;
1602         }
1603
1604         return NULL;
1605 }
1606
1607 static int xvip_graph_build_one(struct xvip_m2m_dev *xdev,
1608                                 struct xvip_graph_entity *entity)
1609 {
1610         u32 link_flags = MEDIA_LNK_FL_ENABLED;
1611         struct media_entity *local = entity->entity;
1612         struct media_entity *remote;
1613         struct media_pad *local_pad;
1614         struct media_pad *remote_pad;
1615         struct xvip_graph_entity *ent;
1616         struct v4l2_fwnode_link link;
1617         struct device_node *ep = NULL;
1618         struct device_node *next;
1619         int ret = 0;
1620
1621         dev_dbg(xdev->dev, "creating links for entity %s\n", local->name);
1622
1623         while (1) {
1624                 /* Get the next endpoint and parse its link. */
1625                 next = of_graph_get_next_endpoint(entity->node, ep);
1626                 if (!next)
1627                         break;
1628
1629                 ep = next;
1630
1631                 dev_dbg(xdev->dev, "processing endpoint %pOF\n", ep);
1632
1633                 ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link);
1634                 if (ret < 0) {
1635                         dev_err(xdev->dev, "failed to parse link for %pOF\n",
1636                                 ep);
1637                         continue;
1638                 }
1639
1640                 /* Skip sink ports, they will be processed from the other end of
1641                  * the link.
1642                  */
1643                 if (link.local_port >= local->num_pads) {
1644                         dev_err(xdev->dev, "invalid port number %u for %pOF\n",
1645                                 link.local_port,
1646                                 to_of_node(link.local_node));
1647                         v4l2_fwnode_put_link(&link);
1648                         ret = -EINVAL;
1649                         break;
1650                 }
1651
1652                 local_pad = &local->pads[link.local_port];
1653
1654                 if (local_pad->flags & MEDIA_PAD_FL_SINK) {
1655                         dev_dbg(xdev->dev, "skipping sink port %pOF:%u\n",
1656                                 to_of_node(link.local_node),
1657                                 link.local_port);
1658                         v4l2_fwnode_put_link(&link);
1659                         continue;
1660                 }
1661
1662                 /* Skip DMA engines, they will be processed separately. */
1663                 if (link.remote_node == of_fwnode_handle(xdev->dev->of_node)) {
1664                         dev_dbg(xdev->dev, "skipping DMA port %pOF:%u\n",
1665                                 to_of_node(link.local_node),
1666                                 link.local_port);
1667                         v4l2_fwnode_put_link(&link);
1668                         continue;
1669                 }
1670
1671                 /* Find the remote entity. */
1672                 ent = xvip_graph_find_entity(xdev,
1673                                              to_of_node(link.remote_node));
1674                 if (!ent) {
1675                         dev_err(xdev->dev, "no entity found for %pOF\n",
1676                                 to_of_node(link.remote_node));
1677                         v4l2_fwnode_put_link(&link);
1678                         ret = -ENODEV;
1679                         break;
1680                 }
1681
1682                 remote = ent->entity;
1683
1684                 if (link.remote_port >= remote->num_pads) {
1685                         dev_err(xdev->dev, "invalid port number %u on %pOF\n",
1686                                 link.remote_port, to_of_node(link.remote_node));
1687                         v4l2_fwnode_put_link(&link);
1688                         ret = -EINVAL;
1689                         break;
1690                 }
1691
1692                 remote_pad = &remote->pads[link.remote_port];
1693
1694                 v4l2_fwnode_put_link(&link);
1695
1696                 /* Create the media link. */
1697                 dev_dbg(xdev->dev, "creating %s:%u -> %s:%u link\n",
1698                         local->name, local_pad->index,
1699                         remote->name, remote_pad->index);
1700
1701                 ret = media_create_pad_link(local, local_pad->index,
1702                                             remote, remote_pad->index,
1703                                             link_flags);
1704                 if (ret < 0) {
1705                         dev_err(xdev->dev,
1706                                 "failed to create %s:%u -> %s:%u link\n",
1707                                 local->name, local_pad->index,
1708                                 remote->name, remote_pad->index);
1709                         break;
1710                 }
1711         }
1712
1713         return ret;
1714 }
1715
1716 static int xvip_graph_parse_one(struct xvip_m2m_dev *xdev,
1717                                 struct device_node *node)
1718 {
1719         struct xvip_graph_entity *entity;
1720         struct device_node *remote;
1721         struct device_node *ep = NULL;
1722         int ret = 0;
1723
1724         dev_dbg(xdev->dev, "parsing node %pOF\n", node);
1725
1726         while (1) {
1727                 ep = of_graph_get_next_endpoint(node, ep);
1728                 if (!ep)
1729                         break;
1730
1731                 dev_dbg(xdev->dev, "handling endpoint %pOF %s\n",
1732                         ep, ep->name);
1733
1734                 remote = of_graph_get_remote_port_parent(ep);
1735                 if (!remote) {
1736                         ret = -EINVAL;
1737                         break;
1738                 }
1739                 dev_dbg(xdev->dev, "Remote endpoint %pOF %s\n",
1740                         remote, remote->name);
1741
1742                 /* Skip entities that we have already processed. */
1743                 if (remote == xdev->dev->of_node ||
1744                     xvip_graph_find_entity(xdev, remote)) {
1745                         of_node_put(remote);
1746                         continue;
1747                 }
1748
1749                 entity = devm_kzalloc(xdev->dev, sizeof(*entity), GFP_KERNEL);
1750                 if (!entity) {
1751                         of_node_put(remote);
1752                         ret = -ENOMEM;
1753                         break;
1754                 }
1755
1756                 entity->node = remote;
1757                 entity->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
1758                 entity->asd.match.fwnode = of_fwnode_handle(remote);
1759                 list_add_tail(&entity->list, &xdev->entities);
1760                 xdev->num_subdevs++;
1761         }
1762
1763         of_node_put(ep);
1764         return ret;
1765 }
1766
1767 static int xvip_graph_parse(struct xvip_m2m_dev *xdev)
1768 {
1769         struct xvip_graph_entity *entity;
1770         int ret;
1771
1772         /*
1773          * Walk the links to parse the full graph. Start by parsing the
1774          * composite node and then parse entities in turn. The list_for_each
1775          * loop will handle entities added at the end of the list while walking
1776          * the links.
1777          */
1778         ret = xvip_graph_parse_one(xdev, xdev->dev->of_node);
1779         if (ret < 0)
1780                 return 0;
1781
1782         list_for_each_entry(entity, &xdev->entities, list) {
1783                 ret = xvip_graph_parse_one(xdev, entity->node);
1784                 if (ret < 0)
1785                         break;
1786         }
1787
1788         return ret;
1789 }
1790
1791 static int xvip_graph_build_dma(struct xvip_m2m_dev *xdev)
1792 {
1793         u32 link_flags = MEDIA_LNK_FL_ENABLED;
1794         struct device_node *node = xdev->dev->of_node;
1795         struct media_entity *source;
1796         struct media_entity *sink;
1797         struct media_pad *source_pad;
1798         struct media_pad *sink_pad;
1799         struct xvip_graph_entity *ent;
1800         struct v4l2_fwnode_link link;
1801         struct device_node *ep = NULL;
1802         struct device_node *next;
1803         struct xvip_m2m_dma *dma = xdev->dma;
1804         int ret = 0;
1805
1806         dev_dbg(xdev->dev, "creating links for DMA engines\n");
1807
1808         while (1) {
1809                 /* Get the next endpoint and parse its link. */
1810                 next = of_graph_get_next_endpoint(node, ep);
1811                 if (!next)
1812                         break;
1813
1814                 ep = next;
1815
1816                 dev_dbg(xdev->dev, "processing endpoint %pOF\n", ep);
1817
1818                 ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link);
1819                 if (ret < 0) {
1820                         dev_err(xdev->dev, "failed to parse link for %pOF\n",
1821                                 ep);
1822                         continue;
1823                 }
1824
1825                 dev_dbg(xdev->dev, "creating link for DMA engine %s\n",
1826                         dma->video.name);
1827
1828                 /* Find the remote entity. */
1829                 ent = xvip_graph_find_entity(xdev,
1830                                              to_of_node(link.remote_node));
1831                 if (!ent) {
1832                         dev_err(xdev->dev, "no entity found for %pOF\n",
1833                                 to_of_node(link.remote_node));
1834                         v4l2_fwnode_put_link(&link);
1835                         ret = -ENODEV;
1836                         break;
1837                 }
1838                 if (link.remote_port >= ent->entity->num_pads) {
1839                         dev_err(xdev->dev, "invalid port number %u on %pOF\n",
1840                                 link.remote_port,
1841                                 to_of_node(link.remote_node));
1842                         v4l2_fwnode_put_link(&link);
1843                         ret = -EINVAL;
1844                         break;
1845                 }
1846
1847                 dev_dbg(xdev->dev, "Entity %s %s\n", ent->node->name,
1848                         ent->node->full_name);
1849                 dev_dbg(xdev->dev, "port number %u on %pOF\n",
1850                         link.remote_port, to_of_node(link.remote_node));
1851                 dev_dbg(xdev->dev, "local port number %u on %pOF\n",
1852                         link.local_port, to_of_node(link.local_node));
1853
1854                 if (link.local_port == XVIP_PAD_SOURCE) {
1855                         source = &dma->video.entity;
1856                         source_pad = &dma->pads[XVIP_PAD_SOURCE];
1857                         sink = ent->entity;
1858                         sink_pad = &sink->pads[XVIP_PAD_SINK];
1859
1860                 } else {
1861                         source = ent->entity;
1862                         source_pad = &source->pads[XVIP_PAD_SOURCE];
1863                         sink = &dma->video.entity;
1864                         sink_pad = &dma->pads[XVIP_PAD_SINK];
1865                 }
1866
1867                 v4l2_fwnode_put_link(&link);
1868
1869                 /* Create the media link. */
1870                 dev_dbg(xdev->dev, "creating %s:%u -> %s:%u link\n",
1871                         source->name, source_pad->index,
1872                         sink->name, sink_pad->index);
1873
1874                 ret = media_create_pad_link(source, source_pad->index,
1875                                             sink, sink_pad->index,
1876                                             link_flags);
1877                 if (ret < 0) {
1878                         dev_err(xdev->dev,
1879                                 "failed to create %s:%u -> %s:%u link\n",
1880                                 source->name, source_pad->index,
1881                                 sink->name, sink_pad->index);
1882                         break;
1883                 }
1884         }
1885
1886         return ret;
1887 }
1888
1889 static int xvip_graph_notify_complete(struct v4l2_async_notifier *notifier)
1890 {
1891         struct xvip_m2m_dev *xdev =
1892                 container_of(notifier, struct xvip_m2m_dev, notifier);
1893         struct xvip_graph_entity *entity;
1894         int ret;
1895
1896         dev_dbg(xdev->dev, "notify complete, all subdevs registered\n");
1897
1898         /* Create links for every entity. */
1899         list_for_each_entry(entity, &xdev->entities, list) {
1900                 ret = xvip_graph_build_one(xdev, entity);
1901                 if (ret < 0)
1902                         return ret;
1903         }
1904
1905         /* Create links for DMA channels. */
1906         ret = xvip_graph_build_dma(xdev);
1907         if (ret < 0)
1908                 return ret;
1909
1910         ret = v4l2_device_register_subdev_nodes(&xdev->v4l2_dev);
1911         if (ret < 0)
1912                 dev_err(xdev->dev, "failed to register subdev nodes\n");
1913
1914         return media_device_register(&xdev->media_dev);
1915 }
1916
1917 static int xvip_graph_notify_bound(struct v4l2_async_notifier *notifier,
1918                                    struct v4l2_subdev *subdev,
1919                                    struct v4l2_async_subdev *asd)
1920 {
1921         struct xvip_m2m_dev *xdev =
1922                 container_of(notifier, struct xvip_m2m_dev, notifier);
1923         struct xvip_graph_entity *entity;
1924
1925         /* Locate the entity corresponding to the bound subdev and store the
1926          * subdev pointer.
1927          */
1928         list_for_each_entry(entity, &xdev->entities, list) {
1929                 if (entity->node != subdev->dev->of_node)
1930                         continue;
1931
1932                 if (entity->subdev) {
1933                         dev_err(xdev->dev, "duplicate subdev for node %pOF\n",
1934                                 entity->node);
1935                         return -EINVAL;
1936                 }
1937
1938                 dev_dbg(xdev->dev, "subdev %s bound\n", subdev->name);
1939                 entity->entity = &subdev->entity;
1940                 entity->subdev = subdev;
1941                 return 0;
1942         }
1943
1944         dev_err(xdev->dev, "no entity for subdev %s\n", subdev->name);
1945         return -EINVAL;
1946 }
1947
1948 static const struct v4l2_async_notifier_operations xvip_graph_notify_ops = {
1949         .bound = xvip_graph_notify_bound,
1950         .complete = xvip_graph_notify_complete,
1951 };
1952
1953 static void xvip_graph_cleanup(struct xvip_m2m_dev *xdev)
1954 {
1955         struct xvip_graph_entity *entityp;
1956         struct xvip_graph_entity *entity;
1957
1958         v4l2_async_notifier_unregister(&xdev->notifier);
1959
1960         list_for_each_entry_safe(entity, entityp, &xdev->entities, list) {
1961                 of_node_put(entity->node);
1962                 list_del(&entity->list);
1963         }
1964 }
1965
1966 static int xvip_graph_init(struct xvip_m2m_dev *xdev)
1967 {
1968         struct xvip_graph_entity *entity;
1969         struct v4l2_async_subdev **subdevs = NULL;
1970         unsigned int num_subdevs;
1971         unsigned int i;
1972         int ret;
1973
1974         /* Init the DMA channels. */
1975         ret = xvip_m2m_dma_alloc_init(xdev);
1976         if (ret < 0) {
1977                 dev_err(xdev->dev, "DMA initialization failed\n");
1978                 goto done;
1979         }
1980
1981         /* Parse the graph to extract a list of subdevice DT nodes. */
1982         ret = xvip_graph_parse(xdev);
1983         if (ret < 0) {
1984                 dev_err(xdev->dev, "graph parsing failed\n");
1985                 goto done;
1986         }
1987         dev_dbg(xdev->dev, "Number of subdev = %d\n", xdev->num_subdevs);
1988
1989         if (!xdev->num_subdevs) {
1990                 dev_err(xdev->dev, "no subdev found in graph\n");
1991                 goto done;
1992         }
1993
1994         /* Register the subdevices notifier. */
1995         num_subdevs = xdev->num_subdevs;
1996         subdevs = devm_kzalloc(xdev->dev, sizeof(*subdevs) * num_subdevs,
1997                                GFP_KERNEL);
1998         if (!subdevs) {
1999                 ret = -ENOMEM;
2000                 goto done;
2001         }
2002
2003         i = 0;
2004         list_for_each_entry(entity, &xdev->entities, list)
2005                 subdevs[i++] = &entity->asd;
2006
2007         xdev->notifier.subdevs = subdevs;
2008         xdev->notifier.num_subdevs = num_subdevs;
2009         xdev->notifier.ops = &xvip_graph_notify_ops;
2010
2011         ret = v4l2_async_notifier_register(&xdev->v4l2_dev, &xdev->notifier);
2012         if (ret < 0) {
2013                 dev_err(xdev->dev, "notifier registration failed\n");
2014                 goto done;
2015         }
2016
2017         ret = 0;
2018
2019 done:
2020         if (ret < 0)
2021                 xvip_graph_cleanup(xdev);
2022
2023         return ret;
2024 }
2025
2026 static int xvip_composite_remove(struct platform_device *pdev)
2027 {
2028         struct xvip_m2m_dev *xdev = platform_get_drvdata(pdev);
2029
2030         xvip_graph_cleanup(xdev);
2031         xvip_composite_v4l2_cleanup(xdev);
2032
2033         return 0;
2034 }
2035
2036 static int xvip_m2m_probe(struct platform_device *pdev)
2037 {
2038         struct xvip_m2m_dev *xdev = NULL;
2039         int ret;
2040
2041         xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2042         if (!xdev)
2043                 return -ENOMEM;
2044
2045         xdev->dev = &pdev->dev;
2046         INIT_LIST_HEAD(&xdev->entities);
2047
2048         ret = xvip_composite_v4l2_init(xdev);
2049         if (ret)
2050                 return -EINVAL;
2051
2052         ret = xvip_graph_init(xdev);
2053         if (ret < 0)
2054                 goto error;
2055
2056         ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
2057         if (ret) {
2058                 dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret);
2059                 goto dma_cleanup;
2060         }
2061
2062         platform_set_drvdata(pdev, xdev);
2063
2064         xdev->m2m_dev = v4l2_m2m_init(&xvip_m2m_ops);
2065         if (IS_ERR(xdev->m2m_dev)) {
2066                 dev_err(xdev->dev, "Failed to init mem2mem device\n");
2067                 ret = PTR_ERR(xdev->m2m_dev);
2068                 goto dma_cleanup;
2069         }
2070
2071         dev_info(xdev->dev, "mem2mem device registered\n");
2072         return 0;
2073
2074 dma_cleanup:
2075         xvip_m2m_dma_deinit(xdev->dma);
2076
2077 error:
2078         v4l2_device_unregister(&xdev->v4l2_dev);
2079         return ret;
2080 }
2081
2082 static int xvip_m2m_remove(struct platform_device *pdev)
2083 {
2084         xvip_composite_remove(pdev);
2085         return 0;
2086 }
2087
2088 static const struct of_device_id xvip_m2m_of_id_table[] = {
2089         { .compatible = "xlnx,mem2mem" },
2090         { }
2091 };
2092 MODULE_DEVICE_TABLE(of, xvip_m2m_of_id_table);
2093
2094 static struct platform_driver xvip_m2m_driver = {
2095         .driver = {
2096                 .name = XVIP_M2M_NAME,
2097                 .of_match_table = xvip_m2m_of_id_table,
2098         },
2099         .probe = xvip_m2m_probe,
2100         .remove = xvip_m2m_remove,
2101 };
2102
2103 module_platform_driver(xvip_m2m_driver);
2104
2105 MODULE_AUTHOR("Xilinx Inc.");
2106 MODULE_DESCRIPTION("Xilinx V4L2 mem2mem driver");
2107 MODULE_LICENSE("GPL v2");