]> rtime.felk.cvut.cz Git - zynq/linux.git/blob - drivers/media/platform/xilinx/xilinx-scenechange-channel.c
6a86213d3a1441fe77da14cba69cb8d9055e75c7
[zynq/linux.git] / drivers / media / platform / xilinx / xilinx-scenechange-channel.c
1 //SPDX-License-Identifier: GPL-2.0
2 /*
3  * Xilinx Scene Change Detection driver
4  *
5  * Copyright (C) 2018 Xilinx, Inc.
6  *
7  * Authors: Anand Ashok Dumbre <anand.ashok.dumbre@xilinx.com>
8  *          Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
9  */
10
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/dmaengine.h>
14 #include <linux/module.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/of_irq.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
23 #include <linux/xilinx-v4l2-controls.h>
24 #include <linux/xilinx-v4l2-events.h>
25 #include <media/v4l2-async.h>
26 #include <media/v4l2-ctrls.h>
27 #include <media/v4l2-device.h>
28 #include <media/v4l2-event.h>
29 #include <media/v4l2-subdev.h>
30
31 #include "xilinx-scenechange.h"
32 #include "xilinx-vip.h"
33
34 #define XSCD_MAX_WIDTH          3840
35 #define XSCD_MAX_HEIGHT         2160
36 #define XSCD_MIN_WIDTH          640
37 #define XSCD_MIN_HEIGHT         480
38
39 #define XSCD_WIDTH_OFFSET               0x10
40 #define XSCD_HEIGHT_OFFSET              0x18
41 #define XSCD_STRIDE_OFFSET              0x20
42 #define XSCD_VID_FMT_OFFSET             0x28
43 #define XSCD_SUBSAMPLE_OFFSET           0x30
44
45 /* Hardware video formats for memory based IP */
46 #define XSCD_COLOR_FMT_Y8               24
47 #define XSCD_COLOR_FMT_Y10              25
48
49 /* Hardware video formats for streaming based IP */
50 #define XSCD_COLOR_FMT_RGB              0
51 #define XSCD_COLOR_FMT_YUV_444          1
52 #define XSCD_COLOR_FMT_YUV_422          2
53 #define XSCD_COLOR_FMT_YUV_420          4
54
55 #define XSCD_V_SUBSAMPLING              16
56 #define XSCD_BYTE_ALIGN                 16
57
58 /* -----------------------------------------------------------------------------
59  * V4L2 Subdevice Pad Operations
60  */
61
62 static int xscd_enum_mbus_code(struct v4l2_subdev *subdev,
63                                struct v4l2_subdev_pad_config *cfg,
64                                struct v4l2_subdev_mbus_code_enum *code)
65 {
66         return 0;
67 }
68
69 static int xscd_enum_frame_size(struct v4l2_subdev *subdev,
70                                 struct v4l2_subdev_pad_config *cfg,
71                                 struct v4l2_subdev_frame_size_enum *fse)
72 {
73         return 0;
74 }
75
76 static struct v4l2_mbus_framefmt *
77 __xscd_get_pad_format(struct xscd_chan *chan,
78                       struct v4l2_subdev_pad_config *cfg,
79                       unsigned int pad, u32 which)
80 {
81         switch (which) {
82         case V4L2_SUBDEV_FORMAT_TRY:
83                 return v4l2_subdev_get_try_format(&chan->subdev, cfg, pad);
84         case V4L2_SUBDEV_FORMAT_ACTIVE:
85                 return &chan->format;
86         default:
87                 return NULL;
88         }
89         return NULL;
90 }
91
92 static int xscd_get_format(struct v4l2_subdev *subdev,
93                            struct v4l2_subdev_pad_config *cfg,
94                            struct v4l2_subdev_format *fmt)
95 {
96         struct xscd_chan *chan = to_chan(subdev);
97
98         fmt->format = *__xscd_get_pad_format(chan, cfg, fmt->pad, fmt->which);
99         return 0;
100 }
101
102 static int xscd_set_format(struct v4l2_subdev *subdev,
103                            struct v4l2_subdev_pad_config *cfg,
104                            struct v4l2_subdev_format *fmt)
105 {
106         struct xscd_chan *chan = to_chan(subdev);
107         struct v4l2_mbus_framefmt *format;
108
109         format = __xscd_get_pad_format(chan, cfg, fmt->pad, fmt->which);
110         format->width = clamp_t(unsigned int, fmt->format.width,
111                                 XSCD_MIN_WIDTH, XSCD_MAX_WIDTH);
112         format->height = clamp_t(unsigned int, fmt->format.height,
113                                  XSCD_MIN_HEIGHT, XSCD_MAX_HEIGHT);
114         fmt->format = *format;
115
116         return 0;
117 }
118
119 static int xscd_chan_get_vid_fmt(u32 media_bus_fmt, bool memory_based)
120 {
121         /*
122          * FIXME: We have same media bus codes for both 8bit and 10bit pixel
123          * formats. So, there is no way to differentiate between 8bit and 10bit
124          * formats based on media bus code. This will be fixed when we have
125          * dedicated media bus code for each format.
126          */
127         if (memory_based)
128                 return XSCD_COLOR_FMT_Y8;
129
130         switch (media_bus_fmt) {
131         case MEDIA_BUS_FMT_VYYUYY8_1X24:
132                 return XSCD_COLOR_FMT_YUV_420;
133         case MEDIA_BUS_FMT_UYVY8_1X16:
134                 return XSCD_COLOR_FMT_YUV_422;
135         case MEDIA_BUS_FMT_VUY8_1X24:
136                 return XSCD_COLOR_FMT_YUV_444;
137         case MEDIA_BUS_FMT_RBG888_1X24:
138                 return XSCD_COLOR_FMT_RGB;
139         default:
140                 return XSCD_COLOR_FMT_YUV_420;
141         }
142 }
143
144 /**
145  * xscd_chan_configure_params - Program parameters to HW registers
146  * @chan: Driver specific channel struct pointer
147  * @shared_data: Shared data
148  * @chan_offset: Register offset for a channel
149  */
150 void xscd_chan_configure_params(struct xscd_chan *chan,
151                                 struct xscd_shared_data *shared_data,
152                                 u32 chan_offset)
153 {
154         u32 vid_fmt, stride;
155
156         xscd_write(chan->iomem, XSCD_WIDTH_OFFSET + chan_offset,
157                    chan->format.width);
158
159         /* Stride is required only for memory based IP, not for streaming IP */
160         if (shared_data->memory_based) {
161                 stride = roundup(chan->format.width, XSCD_BYTE_ALIGN);
162                 xscd_write(chan->iomem, XSCD_STRIDE_OFFSET + chan_offset,
163                            stride);
164         }
165
166         xscd_write(chan->iomem, XSCD_HEIGHT_OFFSET + chan_offset,
167                    chan->format.height);
168
169         /* Hardware video format */
170         vid_fmt = xscd_chan_get_vid_fmt(chan->format.code,
171                                         shared_data->memory_based);
172         xscd_write(chan->iomem, XSCD_VID_FMT_OFFSET + chan_offset, vid_fmt);
173
174         /*
175          * This is the vertical subsampling factor of the input image. Instead
176          * of sampling every line to calculate the histogram, IP uses this
177          * register value to sample only specific lines of the frame.
178          */
179         xscd_write(chan->iomem, XSCD_SUBSAMPLE_OFFSET + chan_offset,
180                    XSCD_V_SUBSAMPLING);
181 }
182
183 /* -----------------------------------------------------------------------------
184  * V4L2 Subdevice Operations
185  */
186 static int xscd_s_stream(struct v4l2_subdev *subdev, int enable)
187 {
188         struct xscd_chan *chan = to_chan(subdev);
189         struct xscd_shared_data *shared_data;
190         unsigned long flags;
191         u32 chan_offset;
192
193         /* TODO: Re-organise shared data in a better way */
194         shared_data = (struct xscd_shared_data *)chan->dev->parent->driver_data;
195         chan->dmachan.en = enable;
196
197         spin_lock_irqsave(&chan->dmachan.lock, flags);
198
199         if (shared_data->memory_based) {
200                 chan_offset = chan->id * XILINX_XSCD_CHAN_OFFSET;
201                 xscd_chan_configure_params(chan, shared_data, chan_offset);
202                 if (enable) {
203                         if (!shared_data->active_streams) {
204                                 chan->dmachan.valid_interrupt = true;
205                                 shared_data->active_streams++;
206                                 xscd_dma_start_transfer(&chan->dmachan);
207                                 xscd_dma_reset(&chan->dmachan);
208                                 xscd_dma_chan_enable(&chan->dmachan,
209                                                      BIT(chan->id));
210                                 xscd_dma_start(&chan->dmachan);
211                         } else {
212                                 shared_data->active_streams++;
213                         }
214                 } else {
215                         shared_data->active_streams--;
216                 }
217         } else {
218                 /* Streaming based */
219                 if (enable) {
220                         xscd_chan_configure_params(chan, shared_data, chan->id);
221                         xscd_dma_reset(&chan->dmachan);
222                         xscd_dma_chan_enable(&chan->dmachan, BIT(chan->id));
223                         xscd_dma_start(&chan->dmachan);
224                 } else {
225                         xscd_dma_halt(&chan->dmachan);
226                 }
227         }
228
229         spin_unlock_irqrestore(&chan->dmachan.lock, flags);
230         return 0;
231 }
232
233 static int xscd_subscribe_event(struct v4l2_subdev *sd,
234                                 struct v4l2_fh *fh,
235                                 struct v4l2_event_subscription *sub)
236 {
237         int ret;
238         struct xscd_chan *chan = to_chan(sd);
239
240         mutex_lock(&chan->lock);
241
242         switch (sub->type) {
243         case V4L2_EVENT_XLNXSCD:
244                 ret = v4l2_event_subscribe(fh, sub, 1, NULL);
245                 break;
246         default:
247                 ret = -EINVAL;
248         }
249
250         mutex_unlock(&chan->lock);
251
252         return ret;
253 }
254
255 static int xscd_unsubscribe_event(struct v4l2_subdev *sd,
256                                   struct v4l2_fh *fh,
257                                   struct v4l2_event_subscription *sub)
258 {
259         int ret;
260         struct xscd_chan *chan = to_chan(sd);
261
262         mutex_lock(&chan->lock);
263         ret = v4l2_event_unsubscribe(fh, sub);
264         mutex_unlock(&chan->lock);
265
266         return ret;
267 }
268
269 static int xscd_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
270 {
271         return 0;
272 }
273
274 static int xscd_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
275 {
276         return 0;
277 }
278
279 static const struct v4l2_subdev_core_ops xscd_core_ops = {
280         .subscribe_event = xscd_subscribe_event,
281         .unsubscribe_event = xscd_unsubscribe_event
282 };
283
284 static struct v4l2_subdev_video_ops xscd_video_ops = {
285         .s_stream = xscd_s_stream,
286 };
287
288 static struct v4l2_subdev_pad_ops xscd_pad_ops = {
289         .enum_mbus_code = xscd_enum_mbus_code,
290         .enum_frame_size = xscd_enum_frame_size,
291         .get_fmt = xscd_get_format,
292         .set_fmt = xscd_set_format,
293 };
294
295 static struct v4l2_subdev_ops xscd_ops = {
296         .core = &xscd_core_ops,
297         .video = &xscd_video_ops,
298         .pad = &xscd_pad_ops,
299 };
300
301 static const struct v4l2_subdev_internal_ops xscd_internal_ops = {
302         .open = xscd_open,
303         .close = xscd_close,
304 };
305
306 /* -----------------------------------------------------------------------------
307  * Media Operations
308  */
309
310 static const struct media_entity_operations xscd_media_ops = {
311         .link_validate = v4l2_subdev_link_validate,
312 };
313
314 static irqreturn_t xscd_chan_irq_handler(int irq, void *data)
315 {
316         struct xscd_chan *chan = (struct xscd_chan *)data;
317         u32 sad;
318         u32 *eventdata;
319
320         spin_lock(&chan->dmachan.lock);
321         if (chan->dmachan.valid_interrupt) {
322                 spin_unlock(&chan->dmachan.lock);
323                 sad = xscd_read(chan->iomem, XILINX_XSCD_SAD_OFFSET +
324                                 (chan->id * XILINX_XSCD_CHAN_OFFSET));
325                 sad = (sad * 16) / (chan->format.width * chan->format.height);
326                 memset(&chan->event, 0, sizeof(chan->event));
327                 eventdata = (u32 *)&chan->event.u.data;
328
329                 if (sad >= 1)
330                         eventdata[0] = 1;
331                 else
332                         eventdata[0] = 0;
333
334                 chan->event.type = V4L2_EVENT_XLNXSCD;
335                 v4l2_subdev_notify(&chan->subdev, V4L2_DEVICE_NOTIFY_EVENT,
336                                    &chan->event);
337                 return IRQ_HANDLED;
338         }
339
340         spin_unlock(&chan->dmachan.lock);
341         return IRQ_NONE;
342 }
343
344 static int xscd_chan_parse_of(struct xscd_chan *chan)
345 {
346         struct device_node *parent_node;
347         struct xscd_shared_data *shared_data;
348         int err;
349
350         parent_node = chan->dev->parent->of_node;
351         shared_data = (struct xscd_shared_data *)chan->dev->parent->driver_data;
352         shared_data->dma_chan_list[chan->id] = &chan->dmachan;
353         chan->iomem = shared_data->iomem;
354
355         chan->irq = irq_of_parse_and_map(parent_node, 0);
356         if (!chan->irq) {
357                 dev_err(chan->dev, "No valid irq found\n");
358                 return -EINVAL;
359         }
360
361         err = devm_request_irq(chan->dev, chan->irq, xscd_chan_irq_handler,
362                                IRQF_SHARED, dev_name(chan->dev), chan);
363         if (err) {
364                 dev_err(chan->dev, "unable to request IRQ %d\n", chan->irq);
365                 return err;
366         }
367
368         chan->dmachan.iomem = shared_data->iomem;
369         chan->dmachan.id = chan->id;
370
371         return 0;
372 }
373
374 /**
375  * xscd_chan_probe - Driver probe function
376  * @pdev: Pointer to the device structure
377  *
378  * Return: '0' on success and failure value on error
379  */
380 static int xscd_chan_probe(struct platform_device *pdev)
381 {
382         struct xscd_chan *chan;
383         struct v4l2_subdev *subdev;
384         struct xscd_shared_data *shared_data;
385         int ret;
386         u32 num_pads;
387
388         shared_data = (struct xscd_shared_data *)pdev->dev.parent->driver_data;
389         chan = devm_kzalloc(&pdev->dev, sizeof(*chan), GFP_KERNEL);
390         if (!chan)
391                 return -ENOMEM;
392
393         mutex_init(&chan->lock);
394         chan->dev = &pdev->dev;
395         chan->id = pdev->id;
396         ret = xscd_chan_parse_of(chan);
397         if (ret < 0)
398                 return ret;
399
400         /* Initialize V4L2 subdevice and media entity */
401         subdev = &chan->subdev;
402         v4l2_subdev_init(subdev, &xscd_ops);
403         subdev->dev = &pdev->dev;
404         subdev->internal_ops = &xscd_internal_ops;
405         strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
406         v4l2_set_subdevdata(subdev, chan);
407         subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
408
409         /* Initialize default format */
410         chan->format.code = MEDIA_BUS_FMT_VYYUYY8_1X24;
411         chan->format.field = V4L2_FIELD_NONE;
412         chan->format.width = XSCD_MAX_WIDTH;
413         chan->format.height = XSCD_MAX_HEIGHT;
414
415         /* Initialize media pads */
416         num_pads = shared_data->memory_based ? 1 : 2;
417         chan->pad = devm_kzalloc(&pdev->dev,
418                                  sizeof(struct media_pad) * num_pads,
419                                  GFP_KERNEL);
420         if (!chan->pad)
421                 return -ENOMEM;
422
423         chan->pad[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
424         if (!shared_data->memory_based)
425                 chan->pad[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
426
427         ret = media_entity_pads_init(&subdev->entity, num_pads, chan->pad);
428         if (ret < 0)
429                 goto error;
430
431         subdev->entity.ops = &xscd_media_ops;
432         ret = v4l2_async_register_subdev(subdev);
433         if (ret < 0) {
434                 dev_err(&pdev->dev, "failed to register subdev\n");
435                 goto error;
436         }
437
438         dev_info(chan->dev, "Scene change detection channel found!\n");
439         return 0;
440
441 error:
442         media_entity_cleanup(&subdev->entity);
443         return ret;
444 }
445
446 static int xscd_chan_remove(struct platform_device *pdev)
447 {
448         return 0;
449 }
450
451 static struct platform_driver xscd_chan_driver = {
452         .probe          = xscd_chan_probe,
453         .remove         = xscd_chan_remove,
454         .driver         = {
455                 .name   = "xlnx-scdchan",
456         },
457 };
458
459 static int __init xscd_chan_init(void)
460 {
461         platform_driver_register(&xscd_chan_driver);
462         return 0;
463 }
464
465 static void __exit xscd_chan_exit(void)
466 {
467         platform_driver_unregister(&xscd_chan_driver);
468 }
469
470 module_init(xscd_chan_init);
471 module_exit(xscd_chan_exit);
472
473 MODULE_AUTHOR("Xilinx Inc.");
474 MODULE_DESCRIPTION("Xilinx Scene Change Detection");
475 MODULE_LICENSE("GPL v2");