]> rtime.felk.cvut.cz Git - zynq/linux.git/blob - drivers/media/platform/xilinx/xilinx-scenechange-dma.c
d9c9c84d063c7b1582508ef27773b40529763b28
[zynq/linux.git] / drivers / media / platform / xilinx / xilinx-scenechange-dma.c
1 //SPDX-License-Identifier: GPL-2.0
2 /*
3  * Xilinx Scene Change Detection DMA driver
4  *
5  * Copyright (C) 2018 Xilinx, Inc.
6  *
7  * Authors: Anand Ashok Dumbre <anand.ashok.dumbre@xilinx.com>
8  *          Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
9  */
10
11 #include <linux/bitops.h>
12 #include <linux/clk.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/io-64-nonatomic-lo-hi.h>
19 #include <linux/iopoll.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_dma.h>
23 #include <linux/of_irq.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26
27 #include "xilinx-scenechange.h"
28
29 /**
30  * xscd_dma_irq_handler - scdma Interrupt handler
31  * @xscd: Pointer to the SCD device structure
32  */
33 void xscd_dma_irq_handler(struct xscd_device *xscd)
34 {
35         struct xscd_dma_chan *chan;
36
37         if (xscd->memory_based) {
38                 u32 chan_en = 0, id;
39
40                 for (id = 0; id < xscd->num_streams; id++) {
41                         chan = xscd->channels[id];
42                         spin_lock(&chan->lock);
43                         chan->idle = true;
44
45                         if (chan->en && (!list_empty(&chan->pending_list))) {
46                                 chan_en |= 1 << chan->id;
47                                 chan->valid_interrupt = true;
48                         } else {
49                                 chan->valid_interrupt = false;
50                         }
51
52                         xscd_dma_start_transfer(chan);
53                         spin_unlock(&chan->lock);
54                 }
55
56                 if (chan_en) {
57                         xscd_dma_reset(chan);
58                         xscd_dma_chan_enable(chan, chan_en);
59                         xscd_dma_start(chan);
60                 }
61
62                 for (id = 0; id < xscd->num_streams; id++) {
63                         chan = xscd->channels[id];
64                         tasklet_schedule(&chan->tasklet);
65                 }
66         }
67 }
68
69 /* -----------------------------------------------------------------------------
70  * Descriptors alloc and free
71  */
72
73 /**
74  * xscd_dma_tx_descriptor - Allocate transaction descriptor
75  * @chan: Driver specific dma channel
76  *
77  * Return: The allocated descriptor on success and NULL on failure.
78  */
79 static struct xscd_dma_tx_descriptor *
80 xscd_dma_alloc_tx_descriptor(struct xscd_dma_chan *chan)
81 {
82         struct xscd_dma_tx_descriptor *desc;
83
84         desc = kzalloc(sizeof(*desc), GFP_KERNEL);
85         if (!desc)
86                 return NULL;
87
88         return desc;
89 }
90
91 /**
92  * xscd_dma_tx_submit - Submit DMA transaction
93  * @tx: Async transaction descriptor
94  *
95  * Return: cookie value on success and failure value on error
96  */
97 static dma_cookie_t xscd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
98 {
99         struct xscd_dma_tx_descriptor *desc = to_xscd_dma_tx_descriptor(tx);
100         struct xscd_dma_chan *chan = to_xscd_dma_chan(tx->chan);
101         dma_cookie_t cookie;
102         unsigned long flags;
103
104         spin_lock_irqsave(&chan->lock, flags);
105         cookie = dma_cookie_assign(tx);
106         list_add_tail(&desc->node, &chan->pending_list);
107         spin_unlock_irqrestore(&chan->lock, flags);
108
109         return cookie;
110 }
111
112 /**
113  * xscd_dma_chan_enable - Enable dma channel
114  * @chan: Driver specific dma channel
115  * @chan_en: Channels ready for transfer, it is a bitmap
116  */
117 void xscd_dma_chan_enable(struct xscd_dma_chan *chan, int chan_en)
118 {
119         xscd_write(chan->xscd->iomem, XSCD_CHAN_EN_OFFSET, chan_en);
120 }
121
122 /**
123  * xscd_dma_complete_descriptor - Mark the active descriptor as complete
124  * This function is invoked with spinlock held
125  * @chan : xilinx dma channel
126  *
127  */
128 static void xscd_dma_complete_descriptor(struct xscd_dma_chan *chan)
129 {
130         struct xscd_dma_tx_descriptor *desc = chan->active_desc;
131
132         dma_cookie_complete(&desc->async_tx);
133         list_add_tail(&desc->node, &chan->done_list);
134 }
135
136 /**
137  * xscd_dma_start_transfer - Starts dma transfer
138  * @chan: Driver specific channel struct pointer
139  */
140 void xscd_dma_start_transfer(struct xscd_dma_chan *chan)
141 {
142         struct xscd_dma_tx_descriptor *desc;
143
144         if (!chan->en)
145                 return;
146
147         if (!chan->idle)
148                 return;
149
150         if (chan->active_desc) {
151                 xscd_dma_complete_descriptor(chan);
152                 chan->active_desc = NULL;
153         }
154
155         if (chan->staged_desc) {
156                 chan->active_desc = chan->staged_desc;
157                 chan->staged_desc = NULL;
158         }
159
160         if (list_empty(&chan->pending_list))
161                 return;
162
163         desc = list_first_entry(&chan->pending_list,
164                                 struct xscd_dma_tx_descriptor, node);
165
166         /* Start the transfer */
167         xscd_write(chan->iomem, XSCD_ADDR_OFFSET, desc->sw.luma_plane_addr);
168
169         list_del(&desc->node);
170         chan->staged_desc = desc;
171 }
172
173 /**
174  * xscd_dma_free_desc_list - Free descriptors list
175  * @chan: Driver specific dma channel
176  * @list: List to parse and delete the descriptor
177  */
178 static void xscd_dma_free_desc_list(struct xscd_dma_chan *chan,
179                                     struct list_head *list)
180 {
181         struct xscd_dma_tx_descriptor *desc, *next;
182
183         list_for_each_entry_safe(desc, next, list, node) {
184                 list_del(&desc->node);
185                 kfree(desc);
186         }
187 }
188
189 /**
190  * xscd_dma_free_descriptors - Free channel descriptors
191  * @chan: Driver specific dma channel
192  */
193 static void xscd_dma_free_descriptors(struct xscd_dma_chan *chan)
194 {
195         unsigned long flags;
196
197         spin_lock_irqsave(&chan->lock, flags);
198
199         xscd_dma_free_desc_list(chan, &chan->pending_list);
200         xscd_dma_free_desc_list(chan, &chan->done_list);
201         kfree(chan->active_desc);
202         kfree(chan->staged_desc);
203
204         chan->staged_desc = NULL;
205         chan->active_desc = NULL;
206         INIT_LIST_HEAD(&chan->pending_list);
207         INIT_LIST_HEAD(&chan->done_list);
208
209         spin_unlock_irqrestore(&chan->lock, flags);
210 }
211
212 /**
213  * scd_dma_chan_desc_cleanup - Clean channel descriptors
214  * @chan: Driver specific dma channel
215  */
216 static void xscd_dma_chan_desc_cleanup(struct xscd_dma_chan *chan)
217 {
218         struct xscd_dma_tx_descriptor *desc, *next;
219         unsigned long flags;
220
221         spin_lock_irqsave(&chan->lock, flags);
222
223         list_for_each_entry_safe(desc, next, &chan->done_list, node) {
224                 dma_async_tx_callback callback;
225                 void *callback_param;
226
227                 list_del(&desc->node);
228
229                 /* Run the link descriptor callback function */
230                 callback = desc->async_tx.callback;
231                 callback_param = desc->async_tx.callback_param;
232                 if (callback) {
233                         spin_unlock_irqrestore(&chan->lock, flags);
234                         callback(callback_param);
235                         spin_lock_irqsave(&chan->lock, flags);
236                 }
237
238                 kfree(desc);
239         }
240
241         spin_unlock_irqrestore(&chan->lock, flags);
242 }
243
244 /**
245  * xscd_dma_chan_remove - Per Channel remove function
246  * @chan: Driver specific DMA channel
247  */
248 static void xscd_dma_chan_remove(struct xscd_dma_chan *chan)
249 {
250         list_del(&chan->common.device_node);
251 }
252
253 /**
254  * xscd_dma_dma_prep_interleaved - prepare a descriptor for a
255  * DMA_SLAVE transaction
256  * @dchan: DMA channel
257  * @xt: Interleaved template pointer
258  * @flags: transfer ack flags
259  *
260  * Return: Async transaction descriptor on success and NULL on failure
261  */
262 static struct dma_async_tx_descriptor *
263 xscd_dma_prep_interleaved(struct dma_chan *dchan,
264                           struct dma_interleaved_template *xt,
265                           unsigned long flags)
266 {
267         struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
268         struct xscd_dma_tx_descriptor *desc;
269         struct xscd_dma_desc *sw;
270
271         desc = xscd_dma_alloc_tx_descriptor(chan);
272         if (!desc)
273                 return NULL;
274
275         dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
276         desc->async_tx.tx_submit = xscd_dma_tx_submit;
277         async_tx_ack(&desc->async_tx);
278
279         sw = &desc->sw;
280         sw->vsize = xt->numf;
281         sw->hsize = xt->sgl[0].size;
282         sw->stride = xt->sgl[0].size + xt->sgl[0].icg;
283         sw->luma_plane_addr = xt->src_start;
284
285         return &desc->async_tx;
286 }
287
288 /**
289  * xscd_dma_terminate_all - Halt the channel and free descriptors
290  * @dchan: Driver specific dma channel pointer
291  *
292  * Return: 0
293  */
294 static int xscd_dma_terminate_all(struct dma_chan *dchan)
295 {
296         struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
297
298         xscd_dma_halt(chan);
299         xscd_dma_free_descriptors(chan);
300
301         /* Worst case frame-to-frame boundary, ensure frame output complete */
302         msleep(50);
303         xscd_dma_reset(chan);
304
305         return 0;
306 }
307
308 /**
309  * xscd_dma_issue_pending - Issue pending transactions
310  * @dchan: DMA channel
311  */
312 static void xscd_dma_issue_pending(struct dma_chan *dchan)
313 {
314         struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
315         struct xscd_device *xscd = chan->xscd;
316         u32 chan_en = 0, id;
317
318         for (id = 0; id < xscd->num_streams; id++) {
319                 chan = xscd->channels[id];
320                 spin_lock(&chan->lock);
321                 chan->idle = true;
322
323                 if (chan->en && (!list_empty(&chan->pending_list))) {
324                         chan_en |= 1 << chan->id;
325                         chan->valid_interrupt = true;
326                 } else {
327                         chan->valid_interrupt = false;
328                 }
329
330                 xscd_dma_start_transfer(chan);
331                 spin_unlock(&chan->lock);
332         }
333
334         if (chan_en) {
335                 xscd_dma_reset(chan);
336                 xscd_dma_chan_enable(chan, chan_en);
337                 xscd_dma_start(chan);
338         }
339 }
340
341 static enum dma_status xscd_dma_tx_status(struct dma_chan *dchan,
342                                           dma_cookie_t cookie,
343                                           struct dma_tx_state *txstate)
344 {
345         return dma_cookie_status(dchan, cookie, txstate);
346 }
347
348 /**
349  * xscd_dma_halt - Halt dma channel
350  * @chan: Driver specific dma channel
351  */
352 void xscd_dma_halt(struct xscd_dma_chan *chan)
353 {
354         struct xscd_device *xscd = chan->xscd;
355
356         if (xscd->memory_based)
357                 xscd_clr(chan->xscd->iomem, XSCD_CTRL_OFFSET,
358                          XSCD_CTRL_AP_START);
359         else
360                 /* Streaming based */
361                 xscd_clr(chan->xscd->iomem, XSCD_CTRL_OFFSET,
362                          XSCD_CTRL_AP_START | XSCD_CTRL_AUTO_RESTART);
363
364         chan->idle = true;
365 }
366
367 /**
368  * xscd_dma_start - Start dma channel
369  * @chan: Driver specific dma channel
370  */
371 void xscd_dma_start(struct xscd_dma_chan *chan)
372 {
373         struct xscd_device *xscd = chan->xscd;
374
375         if (xscd->memory_based)
376                 xscd_set(chan->xscd->iomem, XSCD_CTRL_OFFSET,
377                          XSCD_CTRL_AP_START);
378         else
379                 /* Streaming based */
380                 xscd_set(chan->xscd->iomem, XSCD_CTRL_OFFSET,
381                          XSCD_CTRL_AP_START | XSCD_CTRL_AUTO_RESTART);
382
383         chan->idle = false;
384 }
385
386 /**
387  * xscd_dma_reset - Reset dma channel and enable interrupts
388  * @chan: Driver specific dma channel
389  */
390 void xscd_dma_reset(struct xscd_dma_chan *chan)
391 {
392         xscd_write(chan->xscd->iomem, XSCD_IE_OFFSET, XSCD_IE_AP_DONE);
393         xscd_write(chan->xscd->iomem, XSCD_GIE_OFFSET, XSCD_GIE_EN);
394 }
395
396 /**
397  * xscd_dma_free_chan_resources - Free channel resources
398  * @dchan: DMA channel
399  */
400 static void xscd_dma_free_chan_resources(struct dma_chan *dchan)
401 {
402         struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
403
404         xscd_dma_free_descriptors(chan);
405 }
406
407 /**
408  * xscd_dma_do_tasklet - Schedule completion tasklet
409  * @data: Pointer to the Xilinx scdma channel structure
410  */
411 static void xscd_dma_do_tasklet(unsigned long data)
412 {
413         struct xscd_dma_chan *chan = (struct xscd_dma_chan *)data;
414
415         xscd_dma_chan_desc_cleanup(chan);
416 }
417
418 /**
419  * xscd_dma_alloc_chan_resources - Allocate channel resources
420  * @dchan: DMA channel
421  *
422  * Return: '0' on success and failure value on error
423  */
424 static int xscd_dma_alloc_chan_resources(struct dma_chan *dchan)
425 {
426         dma_cookie_init(dchan);
427         return 0;
428 }
429
430 /**
431  * of_scdma_xilinx_xlate - Translation function
432  * @dma_spec: Pointer to DMA specifier as found in the device tree
433  * @ofdma: Pointer to DMA controller data
434  *
435  * Return: DMA channel pointer on success and NULL on error
436  */
437 static struct dma_chan *of_scdma_xilinx_xlate(struct of_phandle_args *dma_spec,
438                                               struct of_dma *ofdma)
439 {
440         struct xscd_device *xscd = ofdma->of_dma_data;
441         u32 chan_id = dma_spec->args[0];
442
443         if (chan_id >= xscd->num_streams)
444                 return NULL;
445
446         if (!xscd->channels[chan_id])
447                 return NULL;
448
449         return dma_get_slave_channel(&xscd->channels[chan_id]->common);
450 }
451
452 static struct xscd_dma_chan *
453 xscd_dma_chan_probe(struct xscd_device *xscd, int chan_id)
454 {
455         struct xscd_dma_chan *chan = xscd->channels[chan_id];
456
457         chan->xscd = xscd;
458         chan->idle = true;
459
460         spin_lock_init(&chan->lock);
461         INIT_LIST_HEAD(&chan->pending_list);
462         INIT_LIST_HEAD(&chan->done_list);
463         tasklet_init(&chan->tasklet, xscd_dma_do_tasklet,
464                      (unsigned long)chan);
465         chan->common.device = &xscd->dma_device;
466         list_add_tail(&chan->common.device_node, &xscd->dma_device.channels);
467
468         return chan;
469 }
470
471 /**
472  * xscd_dma_init - Initialize the SCD DMA engine
473  * @xscd: Pointer to the SCD device structure
474  *
475  * Return: '0' on success and failure value on error
476  */
477 int xscd_dma_init(struct xscd_device *xscd)
478 {
479         struct dma_device *ddev = &xscd->dma_device;
480         struct xscd_dma_chan *chan;
481         unsigned int chan_id;
482         int ret;
483
484         /* Initialize the DMA engine */
485         ddev->dev = xscd->dev;
486         dma_set_mask(xscd->dev, DMA_BIT_MASK(32));
487
488         INIT_LIST_HEAD(&ddev->channels);
489         dma_cap_set(DMA_SLAVE, ddev->cap_mask);
490         dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
491         ddev->device_alloc_chan_resources = xscd_dma_alloc_chan_resources;
492         ddev->device_free_chan_resources = xscd_dma_free_chan_resources;
493         ddev->device_tx_status = xscd_dma_tx_status;
494         ddev->device_issue_pending = xscd_dma_issue_pending;
495         ddev->device_terminate_all = xscd_dma_terminate_all;
496         ddev->device_prep_interleaved_dma = xscd_dma_prep_interleaved;
497
498         for (chan_id = 0; chan_id < xscd->num_streams; chan_id++) {
499                 chan = xscd_dma_chan_probe(xscd, chan_id);
500                 if (IS_ERR(chan)) {
501                         dev_err(xscd->dev, "failed to probe a channel\n");
502                         ret = PTR_ERR(chan);
503                         goto error;
504                 }
505         }
506
507         ret = dma_async_device_register(ddev);
508         if (ret) {
509                 dev_err(xscd->dev, "failed to register the dma device\n");
510                 goto error;
511         }
512
513         ret = of_dma_controller_register(xscd->dev->of_node,
514                                          of_scdma_xilinx_xlate, xscd);
515         if (ret) {
516                 dev_err(xscd->dev, "failed to register DMA to DT DMA helper\n");
517                 goto error_of_dma;
518         }
519
520         dev_info(xscd->dev, "Xilinx Scene Change DMA is probed!\n");
521         return 0;
522
523 error_of_dma:
524         dma_async_device_unregister(ddev);
525
526 error:
527         for (chan_id = 0; chan_id < xscd->num_streams; chan_id++) {
528                 if (xscd->channels[chan_id])
529                         xscd_dma_chan_remove(xscd->channels[chan_id]);
530         }
531         return ret;
532 }
533
534 /**
535  * xscd_dma_cleanup - Clean up the SCD DMA engine
536  * @xscd: Pointer to the SCD device structure
537  *
538  * This function is the counterpart of xscd_dma_init() and cleans up the
539  * resources related to the DMA engine.
540  */
541 void xscd_dma_cleanup(struct xscd_device *xscd)
542 {
543         dma_async_device_unregister(&xscd->dma_device);
544         of_dma_controller_free(xscd->dev->of_node);
545 }