]> rtime.felk.cvut.cz Git - zynq/linux.git/blob - drivers/media/platform/xilinx/xilinx-scenechange-dma.c
v4l: xilinx: scd: Consolidate structure definitions
[zynq/linux.git] / drivers / media / platform / xilinx / xilinx-scenechange-dma.c
1 //SPDX-License-Identifier: GPL-2.0
2 /*
3  * Xilinx Scene Change Detection DMA driver
4  *
5  * Copyright (C) 2018 Xilinx, Inc.
6  *
7  * Authors: Anand Ashok Dumbre <anand.ashok.dumbre@xilinx.com>
8  *          Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
9  */
10
11 #include <linux/bitops.h>
12 #include <linux/clk.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/io-64-nonatomic-lo-hi.h>
19 #include <linux/iopoll.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_dma.h>
23 #include <linux/of_irq.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26
27 #include "xilinx-scenechange.h"
28
29 /**
30  * xscd_dma_irq_handler - scdma Interrupt handler
31  * @irq: IRQ number
32  * @data: Pointer to the Xilinx scdma channel structure
33  *
34  * Return: IRQ_HANDLED/IRQ_NONE
35  */
36 static irqreturn_t xscd_dma_irq_handler(int irq, void *data)
37 {
38         struct xscd_dma_device *dev = data;
39         struct xscd_dma_chan *chan;
40
41         if (dev->memory_based) {
42                 u32 chan_en = 0, id;
43
44                 for (id = 0; id < dev->numchannels; id++) {
45                         chan = dev->chan[id];
46                         spin_lock(&chan->lock);
47                         chan->idle = true;
48
49                         if (chan->en && (!list_empty(&chan->pending_list))) {
50                                 chan_en |= 1 << chan->id;
51                                 chan->valid_interrupt = true;
52                         } else {
53                                 chan->valid_interrupt = false;
54                         }
55
56                         xscd_dma_start_transfer(chan);
57                         spin_unlock(&chan->lock);
58                 }
59
60                 if (chan_en) {
61                         xscd_dma_reset(chan);
62                         xscd_dma_chan_enable(chan, chan_en);
63                         xscd_dma_start(chan);
64                 }
65
66                 for (id = 0; id < dev->numchannels; id++) {
67                         chan = dev->chan[id];
68                         tasklet_schedule(&chan->tasklet);
69                 }
70         }
71
72         return IRQ_HANDLED;
73 }
74
75 /* -----------------------------------------------------------------------------
76  * Descriptors alloc and free
77  */
78
79 /**
80  * xscd_dma_tx_descriptor - Allocate transaction descriptor
81  * @chan: Driver specific dma channel
82  *
83  * Return: The allocated descriptor on success and NULL on failure.
84  */
85 struct xscd_dma_tx_descriptor *
86 xscd_dma_alloc_tx_descriptor(struct xscd_dma_chan *chan)
87 {
88         struct xscd_dma_tx_descriptor *desc;
89
90         desc = kzalloc(sizeof(*desc), GFP_KERNEL);
91         if (!desc)
92                 return NULL;
93
94         return desc;
95 }
96
97 /**
98  * xscd_dma_tx_submit - Submit DMA transaction
99  * @tx: Async transaction descriptor
100  *
101  * Return: cookie value on success and failure value on error
102  */
103 dma_cookie_t xscd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
104 {
105         struct xscd_dma_tx_descriptor *desc = to_xscd_dma_tx_descriptor(tx);
106         struct xscd_dma_chan *chan = to_xscd_dma_chan(tx->chan);
107         dma_cookie_t cookie;
108         unsigned long flags;
109
110         spin_lock_irqsave(&chan->lock, flags);
111         cookie = dma_cookie_assign(tx);
112         list_add_tail(&desc->node, &chan->pending_list);
113         spin_unlock_irqrestore(&chan->lock, flags);
114
115         return cookie;
116 }
117
118 /**
119  * xscd_dma_chan_enable - Enable dma channel
120  * @chan: Driver specific dma channel
121  * @chan_en: Channels ready for transfer, it is a bitmap
122  */
123 void xscd_dma_chan_enable(struct xscd_dma_chan *chan, int chan_en)
124 {
125         xscd_write(chan->iomem, XSCD_CHAN_EN_OFFSET, chan_en);
126 }
127
128 /**
129  * xscd_dma_complete_descriptor - Mark the active descriptor as complete
130  * This function is invoked with spinlock held
131  * @chan : xilinx dma channel
132  *
133  */
134 static void xscd_dma_complete_descriptor(struct xscd_dma_chan *chan)
135 {
136         struct xscd_dma_tx_descriptor *desc = chan->active_desc;
137
138         dma_cookie_complete(&desc->async_tx);
139         list_add_tail(&desc->node, &chan->done_list);
140 }
141
142 /**
143  * xscd_dma_start_transfer - Starts dma transfer
144  * @chan: Driver specific channel struct pointer
145  */
146 void xscd_dma_start_transfer(struct xscd_dma_chan *chan)
147 {
148         struct xscd_dma_tx_descriptor *desc;
149         u32 chanoffset = chan->id * XSCD_CHAN_OFFSET;
150
151         if (!chan->en)
152                 return;
153
154         if (!chan->idle)
155                 return;
156
157         if (chan->active_desc) {
158                 xscd_dma_complete_descriptor(chan);
159                 chan->active_desc = NULL;
160         }
161
162         if (chan->staged_desc) {
163                 chan->active_desc = chan->staged_desc;
164                 chan->staged_desc = NULL;
165         }
166
167         if (list_empty(&chan->pending_list))
168                 return;
169
170         desc = list_first_entry(&chan->pending_list,
171                                 struct xscd_dma_tx_descriptor, node);
172
173         /* Start the transfer */
174         xscd_write(chan->iomem, XSCD_ADDR_OFFSET + chanoffset,
175                    desc->sw.luma_plane_addr);
176
177         list_del(&desc->node);
178         chan->staged_desc = desc;
179 }
180
181 /**
182  * xscd_dma_free_desc_list - Free descriptors list
183  * @chan: Driver specific dma channel
184  * @list: List to parse and delete the descriptor
185  */
186 void xscd_dma_free_desc_list(struct xscd_dma_chan *chan,
187                              struct list_head *list)
188 {
189         struct xscd_dma_tx_descriptor *desc, *next;
190
191         list_for_each_entry_safe(desc, next, list, node) {
192                 list_del(&desc->node);
193                 kfree(desc);
194         }
195 }
196
197 /**
198  * xscd_dma_free_descriptors - Free channel descriptors
199  * @chan: Driver specific dma channel
200  */
201 void xscd_dma_free_descriptors(struct xscd_dma_chan *chan)
202 {
203         unsigned long flags;
204
205         spin_lock_irqsave(&chan->lock, flags);
206
207         xscd_dma_free_desc_list(chan, &chan->pending_list);
208         xscd_dma_free_desc_list(chan, &chan->done_list);
209         kfree(chan->active_desc);
210         kfree(chan->staged_desc);
211
212         chan->staged_desc = NULL;
213         chan->active_desc = NULL;
214         INIT_LIST_HEAD(&chan->pending_list);
215         INIT_LIST_HEAD(&chan->done_list);
216
217         spin_unlock_irqrestore(&chan->lock, flags);
218 }
219
220 /**
221  * scd_dma_chan_desc_cleanup - Clean channel descriptors
222  * @chan: Driver specific dma channel
223  */
224 void xscd_dma_chan_desc_cleanup(struct xscd_dma_chan *chan)
225 {
226         struct xscd_dma_tx_descriptor *desc, *next;
227         unsigned long flags;
228
229         spin_lock_irqsave(&chan->lock, flags);
230
231         list_for_each_entry_safe(desc, next, &chan->done_list, node) {
232                 dma_async_tx_callback callback;
233                 void *callback_param;
234
235                 list_del(&desc->node);
236
237                 /* Run the link descriptor callback function */
238                 callback = desc->async_tx.callback;
239                 callback_param = desc->async_tx.callback_param;
240                 if (callback) {
241                         spin_unlock_irqrestore(&chan->lock, flags);
242                         callback(callback_param);
243                         spin_lock_irqsave(&chan->lock, flags);
244                 }
245
246                 kfree(desc);
247         }
248
249         spin_unlock_irqrestore(&chan->lock, flags);
250 }
251
252 /**
253  * xscd_dma_chan_remove - Per Channel remove function
254  * @chan: Driver specific DMA channel
255  */
256 void xscd_dma_chan_remove(struct xscd_dma_chan *chan)
257 {
258         list_del(&chan->common.device_node);
259 }
260
261 /**
262  * xscd_dma_dma_prep_interleaved - prepare a descriptor for a
263  * DMA_SLAVE transaction
264  * @dchan: DMA channel
265  * @xt: Interleaved template pointer
266  * @flags: transfer ack flags
267  *
268  * Return: Async transaction descriptor on success and NULL on failure
269  */
270 static struct dma_async_tx_descriptor *
271 xscd_dma_prep_interleaved(struct dma_chan *dchan,
272                           struct dma_interleaved_template *xt,
273                           unsigned long flags)
274 {
275         struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
276         struct xscd_dma_tx_descriptor *desc;
277         struct xscd_dma_desc *sw;
278
279         desc = xscd_dma_alloc_tx_descriptor(chan);
280         if (!desc)
281                 return NULL;
282
283         dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
284         desc->async_tx.tx_submit = xscd_dma_tx_submit;
285         async_tx_ack(&desc->async_tx);
286
287         sw = &desc->sw;
288         sw->vsize = xt->numf;
289         sw->hsize = xt->sgl[0].size;
290         sw->stride = xt->sgl[0].size + xt->sgl[0].icg;
291         sw->luma_plane_addr = xt->src_start;
292
293         return &desc->async_tx;
294 }
295
296 /**
297  * xscd_dma_terminate_all - Halt the channel and free descriptors
298  * @dchan: Driver specific dma channel pointer
299  *
300  * Return: 0
301  */
302 static int xscd_dma_terminate_all(struct dma_chan *dchan)
303 {
304         struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
305
306         xscd_dma_halt(chan);
307         xscd_dma_free_descriptors(chan);
308
309         /* Worst case frame-to-frame boundary, ensure frame output complete */
310         msleep(50);
311         xscd_dma_reset(chan);
312
313         return 0;
314 }
315
316 /**
317  * xscd_dma_issue_pending - Issue pending transactions
318  * @dchan: DMA channel
319  */
320 static void xscd_dma_issue_pending(struct dma_chan *dchan)
321 {
322         struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
323         struct xscd_dma_device *dev = chan->xdev;
324         u32 chan_en = 0, id;
325
326         for (id = 0; id < dev->numchannels; id++) {
327                 chan = dev->chan[id];
328                 spin_lock(&chan->lock);
329                 chan->idle = true;
330
331                 if (chan->en && (!list_empty(&chan->pending_list))) {
332                         chan_en |= 1 << chan->id;
333                         chan->valid_interrupt = true;
334                 } else {
335                         chan->valid_interrupt = false;
336                 }
337
338                 xscd_dma_start_transfer(chan);
339                 spin_unlock(&chan->lock);
340         }
341
342         if (chan_en) {
343                 xscd_dma_reset(chan);
344                 xscd_dma_chan_enable(chan, chan_en);
345                 xscd_dma_start(chan);
346         }
347 }
348
349 static enum dma_status xscd_dma_tx_status(struct dma_chan *dchan,
350                                           dma_cookie_t cookie,
351                                           struct dma_tx_state *txstate)
352 {
353         return dma_cookie_status(dchan, cookie, txstate);
354 }
355
356 /**
357  * xscd_dma_halt - Halt dma channel
358  * @chan: Driver specific dma channel
359  */
360 void xscd_dma_halt(struct xscd_dma_chan *chan)
361 {
362         struct xscd_dma_device *xdev = chan->xdev;
363
364         if (xdev->memory_based)
365                 xscd_clr(chan->iomem, XSCD_CTRL_OFFSET, XSCD_CTRL_AP_START);
366         else
367                 /* Streaming based */
368                 xscd_clr(chan->iomem, XSCD_CTRL_OFFSET,
369                          XSCD_CTRL_AP_START | XSCD_CTRL_AUTO_RESTART);
370
371         chan->idle = true;
372 }
373
374 /**
375  * xscd_dma_start - Start dma channel
376  * @chan: Driver specific dma channel
377  */
378 void xscd_dma_start(struct xscd_dma_chan *chan)
379 {
380         struct xscd_dma_device *xdev = chan->xdev;
381
382         if (xdev->memory_based)
383                 xscd_set(chan->iomem, XSCD_CTRL_OFFSET, XSCD_CTRL_AP_START);
384         else
385                 /* Streaming based */
386                 xscd_set(chan->iomem, XSCD_CTRL_OFFSET,
387                          XSCD_CTRL_AP_START | XSCD_CTRL_AUTO_RESTART);
388
389         chan->idle = false;
390 }
391
392 /**
393  * xscd_dma_reset - Reset dma channel and enable interrupts
394  * @chan: Driver specific dma channel
395  */
396 void xscd_dma_reset(struct xscd_dma_chan *chan)
397 {
398         xscd_write(chan->iomem, XSCD_IE_OFFSET, XSCD_IE_AP_DONE);
399         xscd_write(chan->iomem, XSCD_GIE_OFFSET, XSCD_GIE_EN);
400 }
401
402 /**
403  * xscd_dma_free_chan_resources - Free channel resources
404  * @dchan: DMA channel
405  */
406 static void xscd_dma_free_chan_resources(struct dma_chan *dchan)
407 {
408         struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
409
410         xscd_dma_free_descriptors(chan);
411 }
412
413 /**
414  * xscd_dma_do_tasklet - Schedule completion tasklet
415  * @data: Pointer to the Xilinx scdma channel structure
416  */
417 static void xscd_dma_do_tasklet(unsigned long data)
418 {
419         struct xscd_dma_chan *chan = (struct xscd_dma_chan *)data;
420
421         xscd_dma_chan_desc_cleanup(chan);
422 }
423
424 /**
425  * xscd_dma_alloc_chan_resources - Allocate channel resources
426  * @dchan: DMA channel
427  *
428  * Return: '0' on success and failure value on error
429  */
430 static int xscd_dma_alloc_chan_resources(struct dma_chan *dchan)
431 {
432         dma_cookie_init(dchan);
433         return 0;
434 }
435
436 /**
437  * of_scdma_xilinx_xlate - Translation function
438  * @dma_spec: Pointer to DMA specifier as found in the device tree
439  * @ofdma: Pointer to DMA controller data
440  *
441  * Return: DMA channel pointer on success and NULL on error
442  */
443 static struct dma_chan *of_scdma_xilinx_xlate(struct of_phandle_args *dma_spec,
444                                               struct of_dma *ofdma)
445 {
446         struct xscd_dma_device *xdev = ofdma->of_dma_data;
447         u32 chan_id = dma_spec->args[0];
448
449         if (chan_id >= xdev->numchannels)
450                 return NULL;
451
452         if (!xdev->chan[chan_id])
453                 return NULL;
454
455         return dma_get_slave_channel(&xdev->chan[chan_id]->common);
456 }
457
458 static struct xscd_dma_chan *
459 xscd_dma_chan_probe(struct xscd_dma_device *xdev, int chan_id)
460 {
461         struct xscd_dma_chan *chan;
462
463         chan = xdev->chan[chan_id];
464         chan->dev = xdev->dev;
465         chan->xdev = xdev;
466         chan->idle = true;
467
468         spin_lock_init(&chan->lock);
469         INIT_LIST_HEAD(&chan->pending_list);
470         INIT_LIST_HEAD(&chan->done_list);
471         tasklet_init(&chan->tasklet, xscd_dma_do_tasklet,
472                      (unsigned long)chan);
473         chan->common.device = &xdev->common;
474         list_add_tail(&chan->common.device_node, &xdev->common.channels);
475
476         return chan;
477 }
478
479 /**
480  * xilinx_dma_probe - Driver probe function
481  * @pdev: Pointer to the device structure
482  *
483  * Return: '0' on success and failure value on error
484  */
485 static int xscd_dma_probe(struct platform_device *pdev)
486 {
487         struct xscd_dma_device *xdev;
488         struct device_node *node;
489         struct xscd_dma_chan *chan;
490         struct dma_device *ddev;
491         struct xscd_shared_data *shared_data;
492         int  ret, irq_num, chan_id = 0;
493
494         /* Allocate and initialize the DMA engine structure */
495         xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
496         if (!xdev)
497                 return -ENOMEM;
498
499         xdev->dev = &pdev->dev;
500         ddev = &xdev->common;
501         ddev->dev = &pdev->dev;
502         node = xdev->dev->parent->of_node;
503         xdev->dev->of_node = node;
504         shared_data = (struct xscd_shared_data *)pdev->dev.parent->driver_data;
505         xdev->regs = shared_data->iomem;
506         xdev->chan = shared_data->dma_chan_list;
507         xdev->memory_based = shared_data->memory_based;
508         dma_set_mask(xdev->dev, DMA_BIT_MASK(32));
509
510         /* Initialize the DMA engine */
511         xdev->common.dev = &pdev->dev;
512         ret = of_property_read_u32(node, "xlnx,numstreams",
513                                    &xdev->numchannels);
514
515         irq_num = irq_of_parse_and_map(node, 0);
516         if (!irq_num) {
517                 dev_err(xdev->dev, "No valid irq found\n");
518                 return -EINVAL;
519         }
520
521         /* TODO: Clean up multiple interrupt handlers as there is one device */
522         ret = devm_request_irq(xdev->dev, irq_num, xscd_dma_irq_handler,
523                                IRQF_SHARED, "xilinx_scenechange DMA", xdev);
524         INIT_LIST_HEAD(&xdev->common.channels);
525         dma_cap_set(DMA_SLAVE, ddev->cap_mask);
526         dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
527         ddev->device_alloc_chan_resources = xscd_dma_alloc_chan_resources;
528         ddev->device_free_chan_resources = xscd_dma_free_chan_resources;
529         ddev->device_tx_status = xscd_dma_tx_status;
530         ddev->device_issue_pending = xscd_dma_issue_pending;
531         ddev->device_terminate_all = xscd_dma_terminate_all;
532         ddev->device_prep_interleaved_dma = xscd_dma_prep_interleaved;
533         platform_set_drvdata(pdev, xdev);
534
535         for (chan_id = 0; chan_id < xdev->numchannels; chan_id++) {
536                 chan = xscd_dma_chan_probe(xdev, chan_id);
537                 if (IS_ERR(chan)) {
538                         dev_err(xdev->dev, "failed to probe a channel\n");
539                         ret = PTR_ERR(chan);
540                         goto error;
541                 }
542         }
543
544         ret = dma_async_device_register(ddev);
545         if (ret) {
546                 dev_err(xdev->dev, "failed to register the dma device\n");
547                 goto error;
548         }
549
550         ret = of_dma_controller_register(xdev->dev->of_node,
551                                          of_scdma_xilinx_xlate, xdev);
552         if (ret) {
553                 dev_err(xdev->dev, "failed to register DMA to DT DMA helper\n");
554                 goto error_of_dma;
555         }
556
557         dev_info(&pdev->dev, "Xilinx Scene Change DMA is probed!\n");
558         return 0;
559
560 error_of_dma:
561         dma_async_device_unregister(ddev);
562
563 error:
564         for (chan_id = 0; chan_id < xdev->numchannels; chan_id++) {
565                 if (xdev->chan[chan_id])
566                         xscd_dma_chan_remove(xdev->chan[chan_id]);
567         }
568         return ret;
569 }
570
571 static int xscd_dma_remove(struct platform_device *pdev)
572 {
573         return 0;
574 }
575
576 static struct platform_driver xscd_dma_driver = {
577         .probe          = xscd_dma_probe,
578         .remove         = xscd_dma_remove,
579         .driver         = {
580                 .name   = "xlnx,scdma",
581         },
582 };
583
584 module_platform_driver(xscd_dma_driver);
585
586 MODULE_AUTHOR("Xilinx, Inc.");
587 MODULE_DESCRIPTION("Xilinx Scene Change Detect DMA driver");
588 MODULE_LICENSE("GPL v2");