]> rtime.felk.cvut.cz Git - zynq/linux.git/blob - drivers/media/platform/xilinx/xilinx-scenechange-dma.c
v4l: xilinx: scd: Cleanup the DMA engine at remove() time
[zynq/linux.git] / drivers / media / platform / xilinx / xilinx-scenechange-dma.c
1 //SPDX-License-Identifier: GPL-2.0
2 /*
3  * Xilinx Scene Change Detection DMA driver
4  *
5  * Copyright (C) 2018 Xilinx, Inc.
6  *
7  * Authors: Anand Ashok Dumbre <anand.ashok.dumbre@xilinx.com>
8  *          Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
9  */
10
11 #include <linux/bitops.h>
12 #include <linux/clk.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/io-64-nonatomic-lo-hi.h>
19 #include <linux/iopoll.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_dma.h>
23 #include <linux/of_irq.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26
27 #include "xilinx-scenechange.h"
28
29 /**
30  * xscd_dma_irq_handler - scdma Interrupt handler
31  * @irq: IRQ number
32  * @data: Pointer to the Xilinx scdma channel structure
33  *
34  * Return: IRQ_HANDLED/IRQ_NONE
35  */
36 static irqreturn_t xscd_dma_irq_handler(int irq, void *data)
37 {
38         struct xscd_device *xscd = data;
39         struct xscd_dma_chan *chan;
40
41         if (xscd->shared_data.memory_based) {
42                 u32 chan_en = 0, id;
43
44                 for (id = 0; id < xscd->numchannels; id++) {
45                         chan = xscd->channels[id];
46                         spin_lock(&chan->lock);
47                         chan->idle = true;
48
49                         if (chan->en && (!list_empty(&chan->pending_list))) {
50                                 chan_en |= 1 << chan->id;
51                                 chan->valid_interrupt = true;
52                         } else {
53                                 chan->valid_interrupt = false;
54                         }
55
56                         xscd_dma_start_transfer(chan);
57                         spin_unlock(&chan->lock);
58                 }
59
60                 if (chan_en) {
61                         xscd_dma_reset(chan);
62                         xscd_dma_chan_enable(chan, chan_en);
63                         xscd_dma_start(chan);
64                 }
65
66                 for (id = 0; id < xscd->numchannels; id++) {
67                         chan = xscd->channels[id];
68                         tasklet_schedule(&chan->tasklet);
69                 }
70         }
71
72         return IRQ_HANDLED;
73 }
74
75 /* -----------------------------------------------------------------------------
76  * Descriptors alloc and free
77  */
78
79 /**
80  * xscd_dma_tx_descriptor - Allocate transaction descriptor
81  * @chan: Driver specific dma channel
82  *
83  * Return: The allocated descriptor on success and NULL on failure.
84  */
85 static struct xscd_dma_tx_descriptor *
86 xscd_dma_alloc_tx_descriptor(struct xscd_dma_chan *chan)
87 {
88         struct xscd_dma_tx_descriptor *desc;
89
90         desc = kzalloc(sizeof(*desc), GFP_KERNEL);
91         if (!desc)
92                 return NULL;
93
94         return desc;
95 }
96
97 /**
98  * xscd_dma_tx_submit - Submit DMA transaction
99  * @tx: Async transaction descriptor
100  *
101  * Return: cookie value on success and failure value on error
102  */
103 static dma_cookie_t xscd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
104 {
105         struct xscd_dma_tx_descriptor *desc = to_xscd_dma_tx_descriptor(tx);
106         struct xscd_dma_chan *chan = to_xscd_dma_chan(tx->chan);
107         dma_cookie_t cookie;
108         unsigned long flags;
109
110         spin_lock_irqsave(&chan->lock, flags);
111         cookie = dma_cookie_assign(tx);
112         list_add_tail(&desc->node, &chan->pending_list);
113         spin_unlock_irqrestore(&chan->lock, flags);
114
115         return cookie;
116 }
117
118 /**
119  * xscd_dma_chan_enable - Enable dma channel
120  * @chan: Driver specific dma channel
121  * @chan_en: Channels ready for transfer, it is a bitmap
122  */
123 void xscd_dma_chan_enable(struct xscd_dma_chan *chan, int chan_en)
124 {
125         xscd_write(chan->iomem, XSCD_CHAN_EN_OFFSET, chan_en);
126 }
127
128 /**
129  * xscd_dma_complete_descriptor - Mark the active descriptor as complete
130  * This function is invoked with spinlock held
131  * @chan : xilinx dma channel
132  *
133  */
134 static void xscd_dma_complete_descriptor(struct xscd_dma_chan *chan)
135 {
136         struct xscd_dma_tx_descriptor *desc = chan->active_desc;
137
138         dma_cookie_complete(&desc->async_tx);
139         list_add_tail(&desc->node, &chan->done_list);
140 }
141
142 /**
143  * xscd_dma_start_transfer - Starts dma transfer
144  * @chan: Driver specific channel struct pointer
145  */
146 void xscd_dma_start_transfer(struct xscd_dma_chan *chan)
147 {
148         struct xscd_dma_tx_descriptor *desc;
149         u32 chanoffset = chan->id * XSCD_CHAN_OFFSET;
150
151         if (!chan->en)
152                 return;
153
154         if (!chan->idle)
155                 return;
156
157         if (chan->active_desc) {
158                 xscd_dma_complete_descriptor(chan);
159                 chan->active_desc = NULL;
160         }
161
162         if (chan->staged_desc) {
163                 chan->active_desc = chan->staged_desc;
164                 chan->staged_desc = NULL;
165         }
166
167         if (list_empty(&chan->pending_list))
168                 return;
169
170         desc = list_first_entry(&chan->pending_list,
171                                 struct xscd_dma_tx_descriptor, node);
172
173         /* Start the transfer */
174         xscd_write(chan->iomem, XSCD_ADDR_OFFSET + chanoffset,
175                    desc->sw.luma_plane_addr);
176
177         list_del(&desc->node);
178         chan->staged_desc = desc;
179 }
180
181 /**
182  * xscd_dma_free_desc_list - Free descriptors list
183  * @chan: Driver specific dma channel
184  * @list: List to parse and delete the descriptor
185  */
186 static void xscd_dma_free_desc_list(struct xscd_dma_chan *chan,
187                                     struct list_head *list)
188 {
189         struct xscd_dma_tx_descriptor *desc, *next;
190
191         list_for_each_entry_safe(desc, next, list, node) {
192                 list_del(&desc->node);
193                 kfree(desc);
194         }
195 }
196
197 /**
198  * xscd_dma_free_descriptors - Free channel descriptors
199  * @chan: Driver specific dma channel
200  */
201 static void xscd_dma_free_descriptors(struct xscd_dma_chan *chan)
202 {
203         unsigned long flags;
204
205         spin_lock_irqsave(&chan->lock, flags);
206
207         xscd_dma_free_desc_list(chan, &chan->pending_list);
208         xscd_dma_free_desc_list(chan, &chan->done_list);
209         kfree(chan->active_desc);
210         kfree(chan->staged_desc);
211
212         chan->staged_desc = NULL;
213         chan->active_desc = NULL;
214         INIT_LIST_HEAD(&chan->pending_list);
215         INIT_LIST_HEAD(&chan->done_list);
216
217         spin_unlock_irqrestore(&chan->lock, flags);
218 }
219
220 /**
221  * scd_dma_chan_desc_cleanup - Clean channel descriptors
222  * @chan: Driver specific dma channel
223  */
224 static void xscd_dma_chan_desc_cleanup(struct xscd_dma_chan *chan)
225 {
226         struct xscd_dma_tx_descriptor *desc, *next;
227         unsigned long flags;
228
229         spin_lock_irqsave(&chan->lock, flags);
230
231         list_for_each_entry_safe(desc, next, &chan->done_list, node) {
232                 dma_async_tx_callback callback;
233                 void *callback_param;
234
235                 list_del(&desc->node);
236
237                 /* Run the link descriptor callback function */
238                 callback = desc->async_tx.callback;
239                 callback_param = desc->async_tx.callback_param;
240                 if (callback) {
241                         spin_unlock_irqrestore(&chan->lock, flags);
242                         callback(callback_param);
243                         spin_lock_irqsave(&chan->lock, flags);
244                 }
245
246                 kfree(desc);
247         }
248
249         spin_unlock_irqrestore(&chan->lock, flags);
250 }
251
252 /**
253  * xscd_dma_chan_remove - Per Channel remove function
254  * @chan: Driver specific DMA channel
255  */
256 static void xscd_dma_chan_remove(struct xscd_dma_chan *chan)
257 {
258         list_del(&chan->common.device_node);
259 }
260
261 /**
262  * xscd_dma_dma_prep_interleaved - prepare a descriptor for a
263  * DMA_SLAVE transaction
264  * @dchan: DMA channel
265  * @xt: Interleaved template pointer
266  * @flags: transfer ack flags
267  *
268  * Return: Async transaction descriptor on success and NULL on failure
269  */
270 static struct dma_async_tx_descriptor *
271 xscd_dma_prep_interleaved(struct dma_chan *dchan,
272                           struct dma_interleaved_template *xt,
273                           unsigned long flags)
274 {
275         struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
276         struct xscd_dma_tx_descriptor *desc;
277         struct xscd_dma_desc *sw;
278
279         desc = xscd_dma_alloc_tx_descriptor(chan);
280         if (!desc)
281                 return NULL;
282
283         dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
284         desc->async_tx.tx_submit = xscd_dma_tx_submit;
285         async_tx_ack(&desc->async_tx);
286
287         sw = &desc->sw;
288         sw->vsize = xt->numf;
289         sw->hsize = xt->sgl[0].size;
290         sw->stride = xt->sgl[0].size + xt->sgl[0].icg;
291         sw->luma_plane_addr = xt->src_start;
292
293         return &desc->async_tx;
294 }
295
296 /**
297  * xscd_dma_terminate_all - Halt the channel and free descriptors
298  * @dchan: Driver specific dma channel pointer
299  *
300  * Return: 0
301  */
302 static int xscd_dma_terminate_all(struct dma_chan *dchan)
303 {
304         struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
305
306         xscd_dma_halt(chan);
307         xscd_dma_free_descriptors(chan);
308
309         /* Worst case frame-to-frame boundary, ensure frame output complete */
310         msleep(50);
311         xscd_dma_reset(chan);
312
313         return 0;
314 }
315
316 /**
317  * xscd_dma_issue_pending - Issue pending transactions
318  * @dchan: DMA channel
319  */
320 static void xscd_dma_issue_pending(struct dma_chan *dchan)
321 {
322         struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
323         struct xscd_device *xscd = chan->xscd;
324         u32 chan_en = 0, id;
325
326         for (id = 0; id < xscd->numchannels; id++) {
327                 chan = xscd->channels[id];
328                 spin_lock(&chan->lock);
329                 chan->idle = true;
330
331                 if (chan->en && (!list_empty(&chan->pending_list))) {
332                         chan_en |= 1 << chan->id;
333                         chan->valid_interrupt = true;
334                 } else {
335                         chan->valid_interrupt = false;
336                 }
337
338                 xscd_dma_start_transfer(chan);
339                 spin_unlock(&chan->lock);
340         }
341
342         if (chan_en) {
343                 xscd_dma_reset(chan);
344                 xscd_dma_chan_enable(chan, chan_en);
345                 xscd_dma_start(chan);
346         }
347 }
348
349 static enum dma_status xscd_dma_tx_status(struct dma_chan *dchan,
350                                           dma_cookie_t cookie,
351                                           struct dma_tx_state *txstate)
352 {
353         return dma_cookie_status(dchan, cookie, txstate);
354 }
355
356 /**
357  * xscd_dma_halt - Halt dma channel
358  * @chan: Driver specific dma channel
359  */
360 void xscd_dma_halt(struct xscd_dma_chan *chan)
361 {
362         struct xscd_device *xscd = chan->xscd;
363
364         if (xscd->shared_data.memory_based)
365                 xscd_clr(chan->iomem, XSCD_CTRL_OFFSET, XSCD_CTRL_AP_START);
366         else
367                 /* Streaming based */
368                 xscd_clr(chan->iomem, XSCD_CTRL_OFFSET,
369                          XSCD_CTRL_AP_START | XSCD_CTRL_AUTO_RESTART);
370
371         chan->idle = true;
372 }
373
374 /**
375  * xscd_dma_start - Start dma channel
376  * @chan: Driver specific dma channel
377  */
378 void xscd_dma_start(struct xscd_dma_chan *chan)
379 {
380         struct xscd_device *xscd = chan->xscd;
381
382         if (xscd->shared_data.memory_based)
383                 xscd_set(chan->iomem, XSCD_CTRL_OFFSET, XSCD_CTRL_AP_START);
384         else
385                 /* Streaming based */
386                 xscd_set(chan->iomem, XSCD_CTRL_OFFSET,
387                          XSCD_CTRL_AP_START | XSCD_CTRL_AUTO_RESTART);
388
389         chan->idle = false;
390 }
391
392 /**
393  * xscd_dma_reset - Reset dma channel and enable interrupts
394  * @chan: Driver specific dma channel
395  */
396 void xscd_dma_reset(struct xscd_dma_chan *chan)
397 {
398         xscd_write(chan->iomem, XSCD_IE_OFFSET, XSCD_IE_AP_DONE);
399         xscd_write(chan->iomem, XSCD_GIE_OFFSET, XSCD_GIE_EN);
400 }
401
402 /**
403  * xscd_dma_free_chan_resources - Free channel resources
404  * @dchan: DMA channel
405  */
406 static void xscd_dma_free_chan_resources(struct dma_chan *dchan)
407 {
408         struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
409
410         xscd_dma_free_descriptors(chan);
411 }
412
413 /**
414  * xscd_dma_do_tasklet - Schedule completion tasklet
415  * @data: Pointer to the Xilinx scdma channel structure
416  */
417 static void xscd_dma_do_tasklet(unsigned long data)
418 {
419         struct xscd_dma_chan *chan = (struct xscd_dma_chan *)data;
420
421         xscd_dma_chan_desc_cleanup(chan);
422 }
423
424 /**
425  * xscd_dma_alloc_chan_resources - Allocate channel resources
426  * @dchan: DMA channel
427  *
428  * Return: '0' on success and failure value on error
429  */
430 static int xscd_dma_alloc_chan_resources(struct dma_chan *dchan)
431 {
432         dma_cookie_init(dchan);
433         return 0;
434 }
435
436 /**
437  * of_scdma_xilinx_xlate - Translation function
438  * @dma_spec: Pointer to DMA specifier as found in the device tree
439  * @ofdma: Pointer to DMA controller data
440  *
441  * Return: DMA channel pointer on success and NULL on error
442  */
443 static struct dma_chan *of_scdma_xilinx_xlate(struct of_phandle_args *dma_spec,
444                                               struct of_dma *ofdma)
445 {
446         struct xscd_device *xscd = ofdma->of_dma_data;
447         u32 chan_id = dma_spec->args[0];
448
449         if (chan_id >= xscd->numchannels)
450                 return NULL;
451
452         if (!xscd->channels[chan_id])
453                 return NULL;
454
455         return dma_get_slave_channel(&xscd->channels[chan_id]->common);
456 }
457
458 static struct xscd_dma_chan *
459 xscd_dma_chan_probe(struct xscd_device *xscd, int chan_id)
460 {
461         struct xscd_dma_chan *chan = xscd->channels[chan_id];
462
463         chan->xscd = xscd;
464         chan->idle = true;
465
466         spin_lock_init(&chan->lock);
467         INIT_LIST_HEAD(&chan->pending_list);
468         INIT_LIST_HEAD(&chan->done_list);
469         tasklet_init(&chan->tasklet, xscd_dma_do_tasklet,
470                      (unsigned long)chan);
471         chan->common.device = &xscd->dma_device;
472         list_add_tail(&chan->common.device_node, &xscd->dma_device.channels);
473
474         return chan;
475 }
476
477 /**
478  * xscd_dma_init - Initialize the SCD DMA engine
479  * @xscd: Pointer to the SCD device structure
480  *
481  * Return: '0' on success and failure value on error
482  */
483 int xscd_dma_init(struct xscd_device *xscd)
484 {
485         struct device_node *node = xscd->dev->of_node;
486         struct dma_device *ddev = &xscd->dma_device;
487         struct xscd_dma_chan *chan;
488         int  ret, irq_num, chan_id = 0;
489
490         /* Initialize the DMA engine */
491         ddev->dev = xscd->dev;
492         dma_set_mask(xscd->dev, DMA_BIT_MASK(32));
493
494         ret = of_property_read_u32(node, "xlnx,numstreams",
495                                    &xscd->numchannels);
496
497         irq_num = irq_of_parse_and_map(node, 0);
498         if (!irq_num) {
499                 dev_err(xscd->dev, "No valid irq found\n");
500                 return -EINVAL;
501         }
502
503         /* TODO: Clean up multiple interrupt handlers as there is one device */
504         ret = devm_request_irq(xscd->dev, irq_num, xscd_dma_irq_handler,
505                                IRQF_SHARED, "xilinx_scenechange DMA", xscd);
506         INIT_LIST_HEAD(&ddev->channels);
507         dma_cap_set(DMA_SLAVE, ddev->cap_mask);
508         dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
509         ddev->device_alloc_chan_resources = xscd_dma_alloc_chan_resources;
510         ddev->device_free_chan_resources = xscd_dma_free_chan_resources;
511         ddev->device_tx_status = xscd_dma_tx_status;
512         ddev->device_issue_pending = xscd_dma_issue_pending;
513         ddev->device_terminate_all = xscd_dma_terminate_all;
514         ddev->device_prep_interleaved_dma = xscd_dma_prep_interleaved;
515
516         for (chan_id = 0; chan_id < xscd->numchannels; chan_id++) {
517                 chan = xscd_dma_chan_probe(xscd, chan_id);
518                 if (IS_ERR(chan)) {
519                         dev_err(xscd->dev, "failed to probe a channel\n");
520                         ret = PTR_ERR(chan);
521                         goto error;
522                 }
523         }
524
525         ret = dma_async_device_register(ddev);
526         if (ret) {
527                 dev_err(xscd->dev, "failed to register the dma device\n");
528                 goto error;
529         }
530
531         ret = of_dma_controller_register(xscd->dev->of_node,
532                                          of_scdma_xilinx_xlate, xscd);
533         if (ret) {
534                 dev_err(xscd->dev, "failed to register DMA to DT DMA helper\n");
535                 goto error_of_dma;
536         }
537
538         dev_info(xscd->dev, "Xilinx Scene Change DMA is probed!\n");
539         return 0;
540
541 error_of_dma:
542         dma_async_device_unregister(ddev);
543
544 error:
545         for (chan_id = 0; chan_id < xscd->numchannels; chan_id++) {
546                 if (xscd->channels[chan_id])
547                         xscd_dma_chan_remove(xscd->channels[chan_id]);
548         }
549         return ret;
550 }
551
552 /**
553  * xscd_dma_cleanup - Clean up the SCD DMA engine
554  * @xscd: Pointer to the SCD device structure
555  *
556  * This function is the counterpart of xscd_dma_init() and cleans up the
557  * resources related to the DMA engine.
558  */
559 void xscd_dma_cleanup(struct xscd_device *xscd)
560 {
561         dma_async_device_unregister(&xscd->dma_device);
562         of_dma_controller_free(xscd->dev->of_node);
563 }