]> rtime.felk.cvut.cz Git - zynq/linux.git/blob - drivers/media/platform/xilinx/xilinx-scenechange-dma.c
v4l: xilinx: scd: Clean up #include statements
[zynq/linux.git] / drivers / media / platform / xilinx / xilinx-scenechange-dma.c
1 //SPDX-License-Identifier: GPL-2.0
2 /*
3  * Xilinx Scene Change Detection DMA driver
4  *
5  * Copyright (C) 2018 Xilinx, Inc.
6  *
7  * Authors: Anand Ashok Dumbre <anand.ashok.dumbre@xilinx.com>
8  *          Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
9  */
10
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/of_dma.h>
15 #include <linux/slab.h>
16
17 #include "../../../dma/dmaengine.h"
18
19 #include "xilinx-scenechange.h"
20
21 /**
22  * xscd_dma_irq_handler - scdma Interrupt handler
23  * @xscd: Pointer to the SCD device structure
24  */
25 void xscd_dma_irq_handler(struct xscd_device *xscd)
26 {
27         struct xscd_dma_chan *chan;
28
29         if (xscd->memory_based) {
30                 u32 chan_en = 0, id;
31
32                 for (id = 0; id < xscd->num_streams; id++) {
33                         chan = xscd->channels[id];
34                         spin_lock(&chan->lock);
35                         chan->idle = true;
36
37                         if (chan->en && (!list_empty(&chan->pending_list))) {
38                                 chan_en |= 1 << chan->id;
39                                 chan->valid_interrupt = true;
40                         } else {
41                                 chan->valid_interrupt = false;
42                         }
43
44                         xscd_dma_start_transfer(chan);
45                         spin_unlock(&chan->lock);
46                 }
47
48                 if (chan_en) {
49                         xscd_dma_reset(chan);
50                         xscd_dma_chan_enable(chan, chan_en);
51                         xscd_dma_start(chan);
52                 }
53
54                 for (id = 0; id < xscd->num_streams; id++) {
55                         chan = xscd->channels[id];
56                         tasklet_schedule(&chan->tasklet);
57                 }
58         }
59 }
60
61 /* -----------------------------------------------------------------------------
62  * Descriptors alloc and free
63  */
64
65 /**
66  * xscd_dma_tx_submit - Submit DMA transaction
67  * @tx: Async transaction descriptor
68  *
69  * Return: cookie value on success and failure value on error
70  */
71 static dma_cookie_t xscd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
72 {
73         struct xscd_dma_tx_descriptor *desc = to_xscd_dma_tx_descriptor(tx);
74         struct xscd_dma_chan *chan = to_xscd_dma_chan(tx->chan);
75         dma_cookie_t cookie;
76         unsigned long flags;
77
78         spin_lock_irqsave(&chan->lock, flags);
79         cookie = dma_cookie_assign(tx);
80         list_add_tail(&desc->node, &chan->pending_list);
81         spin_unlock_irqrestore(&chan->lock, flags);
82
83         return cookie;
84 }
85
86 /**
87  * xscd_dma_chan_enable - Enable dma channel
88  * @chan: Driver specific dma channel
89  * @chan_en: Channels ready for transfer, it is a bitmap
90  */
91 void xscd_dma_chan_enable(struct xscd_dma_chan *chan, int chan_en)
92 {
93         xscd_write(chan->xscd->iomem, XSCD_CHAN_EN_OFFSET, chan_en);
94 }
95
96 /**
97  * xscd_dma_complete_descriptor - Mark the active descriptor as complete
98  * This function is invoked with spinlock held
99  * @chan : xilinx dma channel
100  *
101  */
102 static void xscd_dma_complete_descriptor(struct xscd_dma_chan *chan)
103 {
104         struct xscd_dma_tx_descriptor *desc = chan->active_desc;
105
106         dma_cookie_complete(&desc->async_tx);
107         list_add_tail(&desc->node, &chan->done_list);
108 }
109
110 /**
111  * xscd_dma_start_transfer - Starts dma transfer
112  * @chan: Driver specific channel struct pointer
113  */
114 void xscd_dma_start_transfer(struct xscd_dma_chan *chan)
115 {
116         struct xscd_dma_tx_descriptor *desc;
117
118         if (!chan->en)
119                 return;
120
121         if (!chan->idle)
122                 return;
123
124         if (chan->active_desc) {
125                 xscd_dma_complete_descriptor(chan);
126                 chan->active_desc = NULL;
127         }
128
129         if (chan->staged_desc) {
130                 chan->active_desc = chan->staged_desc;
131                 chan->staged_desc = NULL;
132         }
133
134         if (list_empty(&chan->pending_list))
135                 return;
136
137         desc = list_first_entry(&chan->pending_list,
138                                 struct xscd_dma_tx_descriptor, node);
139
140         /* Start the transfer */
141         xscd_write(chan->iomem, XSCD_ADDR_OFFSET, desc->sw.luma_plane_addr);
142
143         list_del(&desc->node);
144         chan->staged_desc = desc;
145 }
146
147 /**
148  * xscd_dma_free_desc_list - Free descriptors list
149  * @chan: Driver specific dma channel
150  * @list: List to parse and delete the descriptor
151  */
152 static void xscd_dma_free_desc_list(struct xscd_dma_chan *chan,
153                                     struct list_head *list)
154 {
155         struct xscd_dma_tx_descriptor *desc, *next;
156
157         list_for_each_entry_safe(desc, next, list, node) {
158                 list_del(&desc->node);
159                 kfree(desc);
160         }
161 }
162
163 /**
164  * xscd_dma_free_descriptors - Free channel descriptors
165  * @chan: Driver specific dma channel
166  */
167 static void xscd_dma_free_descriptors(struct xscd_dma_chan *chan)
168 {
169         unsigned long flags;
170
171         spin_lock_irqsave(&chan->lock, flags);
172
173         xscd_dma_free_desc_list(chan, &chan->pending_list);
174         xscd_dma_free_desc_list(chan, &chan->done_list);
175         kfree(chan->active_desc);
176         kfree(chan->staged_desc);
177
178         chan->staged_desc = NULL;
179         chan->active_desc = NULL;
180         INIT_LIST_HEAD(&chan->pending_list);
181         INIT_LIST_HEAD(&chan->done_list);
182
183         spin_unlock_irqrestore(&chan->lock, flags);
184 }
185
186 /**
187  * scd_dma_chan_desc_cleanup - Clean channel descriptors
188  * @chan: Driver specific dma channel
189  */
190 static void xscd_dma_chan_desc_cleanup(struct xscd_dma_chan *chan)
191 {
192         struct xscd_dma_tx_descriptor *desc, *next;
193         unsigned long flags;
194
195         spin_lock_irqsave(&chan->lock, flags);
196
197         list_for_each_entry_safe(desc, next, &chan->done_list, node) {
198                 dma_async_tx_callback callback;
199                 void *callback_param;
200
201                 list_del(&desc->node);
202
203                 /* Run the link descriptor callback function */
204                 callback = desc->async_tx.callback;
205                 callback_param = desc->async_tx.callback_param;
206                 if (callback) {
207                         spin_unlock_irqrestore(&chan->lock, flags);
208                         callback(callback_param);
209                         spin_lock_irqsave(&chan->lock, flags);
210                 }
211
212                 kfree(desc);
213         }
214
215         spin_unlock_irqrestore(&chan->lock, flags);
216 }
217
218 /**
219  * xscd_dma_dma_prep_interleaved - prepare a descriptor for a
220  * DMA_SLAVE transaction
221  * @dchan: DMA channel
222  * @xt: Interleaved template pointer
223  * @flags: transfer ack flags
224  *
225  * Return: Async transaction descriptor on success and NULL on failure
226  */
227 static struct dma_async_tx_descriptor *
228 xscd_dma_prep_interleaved(struct dma_chan *dchan,
229                           struct dma_interleaved_template *xt,
230                           unsigned long flags)
231 {
232         struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
233         struct xscd_dma_tx_descriptor *desc;
234         struct xscd_dma_desc *sw;
235
236         desc = kzalloc(sizeof(*desc), GFP_KERNEL);
237         if (!desc)
238                 return NULL;
239
240         dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
241         desc->async_tx.tx_submit = xscd_dma_tx_submit;
242         async_tx_ack(&desc->async_tx);
243
244         sw = &desc->sw;
245         sw->vsize = xt->numf;
246         sw->hsize = xt->sgl[0].size;
247         sw->stride = xt->sgl[0].size + xt->sgl[0].icg;
248         sw->luma_plane_addr = xt->src_start;
249
250         return &desc->async_tx;
251 }
252
253 /**
254  * xscd_dma_terminate_all - Halt the channel and free descriptors
255  * @dchan: Driver specific dma channel pointer
256  *
257  * Return: 0
258  */
259 static int xscd_dma_terminate_all(struct dma_chan *dchan)
260 {
261         struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
262
263         xscd_dma_halt(chan);
264         xscd_dma_free_descriptors(chan);
265
266         /* Worst case frame-to-frame boundary, ensure frame output complete */
267         msleep(50);
268         xscd_dma_reset(chan);
269
270         return 0;
271 }
272
273 /**
274  * xscd_dma_issue_pending - Issue pending transactions
275  * @dchan: DMA channel
276  */
277 static void xscd_dma_issue_pending(struct dma_chan *dchan)
278 {
279         struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
280         struct xscd_device *xscd = chan->xscd;
281         u32 chan_en = 0, id;
282
283         for (id = 0; id < xscd->num_streams; id++) {
284                 chan = xscd->channels[id];
285                 spin_lock(&chan->lock);
286                 chan->idle = true;
287
288                 if (chan->en && (!list_empty(&chan->pending_list))) {
289                         chan_en |= 1 << chan->id;
290                         chan->valid_interrupt = true;
291                 } else {
292                         chan->valid_interrupt = false;
293                 }
294
295                 xscd_dma_start_transfer(chan);
296                 spin_unlock(&chan->lock);
297         }
298
299         if (chan_en) {
300                 xscd_dma_reset(chan);
301                 xscd_dma_chan_enable(chan, chan_en);
302                 xscd_dma_start(chan);
303         }
304 }
305
306 static enum dma_status xscd_dma_tx_status(struct dma_chan *dchan,
307                                           dma_cookie_t cookie,
308                                           struct dma_tx_state *txstate)
309 {
310         return dma_cookie_status(dchan, cookie, txstate);
311 }
312
313 /**
314  * xscd_dma_halt - Halt dma channel
315  * @chan: Driver specific dma channel
316  */
317 void xscd_dma_halt(struct xscd_dma_chan *chan)
318 {
319         struct xscd_device *xscd = chan->xscd;
320
321         if (xscd->memory_based)
322                 xscd_clr(chan->xscd->iomem, XSCD_CTRL_OFFSET,
323                          XSCD_CTRL_AP_START);
324         else
325                 /* Streaming based */
326                 xscd_clr(chan->xscd->iomem, XSCD_CTRL_OFFSET,
327                          XSCD_CTRL_AP_START | XSCD_CTRL_AUTO_RESTART);
328
329         chan->idle = true;
330 }
331
332 /**
333  * xscd_dma_start - Start dma channel
334  * @chan: Driver specific dma channel
335  */
336 void xscd_dma_start(struct xscd_dma_chan *chan)
337 {
338         struct xscd_device *xscd = chan->xscd;
339
340         if (xscd->memory_based)
341                 xscd_set(chan->xscd->iomem, XSCD_CTRL_OFFSET,
342                          XSCD_CTRL_AP_START);
343         else
344                 /* Streaming based */
345                 xscd_set(chan->xscd->iomem, XSCD_CTRL_OFFSET,
346                          XSCD_CTRL_AP_START | XSCD_CTRL_AUTO_RESTART);
347
348         chan->idle = false;
349 }
350
351 /**
352  * xscd_dma_reset - Reset dma channel and enable interrupts
353  * @chan: Driver specific dma channel
354  */
355 void xscd_dma_reset(struct xscd_dma_chan *chan)
356 {
357         xscd_write(chan->xscd->iomem, XSCD_IE_OFFSET, XSCD_IE_AP_DONE);
358         xscd_write(chan->xscd->iomem, XSCD_GIE_OFFSET, XSCD_GIE_EN);
359 }
360
361 /**
362  * xscd_dma_free_chan_resources - Free channel resources
363  * @dchan: DMA channel
364  */
365 static void xscd_dma_free_chan_resources(struct dma_chan *dchan)
366 {
367         struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
368
369         xscd_dma_free_descriptors(chan);
370 }
371
372 /**
373  * xscd_dma_do_tasklet - Schedule completion tasklet
374  * @data: Pointer to the Xilinx scdma channel structure
375  */
376 static void xscd_dma_do_tasklet(unsigned long data)
377 {
378         struct xscd_dma_chan *chan = (struct xscd_dma_chan *)data;
379
380         xscd_dma_chan_desc_cleanup(chan);
381 }
382
383 /**
384  * xscd_dma_alloc_chan_resources - Allocate channel resources
385  * @dchan: DMA channel
386  *
387  * Return: '0' on success and failure value on error
388  */
389 static int xscd_dma_alloc_chan_resources(struct dma_chan *dchan)
390 {
391         dma_cookie_init(dchan);
392         return 0;
393 }
394
395 /**
396  * of_scdma_xilinx_xlate - Translation function
397  * @dma_spec: Pointer to DMA specifier as found in the device tree
398  * @ofdma: Pointer to DMA controller data
399  *
400  * Return: DMA channel pointer on success and NULL on error
401  */
402 static struct dma_chan *of_scdma_xilinx_xlate(struct of_phandle_args *dma_spec,
403                                               struct of_dma *ofdma)
404 {
405         struct xscd_device *xscd = ofdma->of_dma_data;
406         u32 chan_id = dma_spec->args[0];
407
408         if (chan_id >= xscd->num_streams)
409                 return NULL;
410
411         if (!xscd->channels[chan_id])
412                 return NULL;
413
414         return dma_get_slave_channel(&xscd->channels[chan_id]->common);
415 }
416
417 static void xscd_dma_chan_init(struct xscd_device *xscd, int chan_id)
418 {
419         struct xscd_dma_chan *chan = &xscd->chans[chan_id].dmachan;
420
421         chan->id = chan_id;
422         chan->iomem = xscd->iomem + chan->id * XSCD_CHAN_OFFSET;
423         chan->xscd = xscd;
424         chan->idle = true;
425
426         xscd->channels[chan->id] = chan;
427
428         spin_lock_init(&chan->lock);
429         INIT_LIST_HEAD(&chan->pending_list);
430         INIT_LIST_HEAD(&chan->done_list);
431         tasklet_init(&chan->tasklet, xscd_dma_do_tasklet,
432                      (unsigned long)chan);
433         chan->common.device = &xscd->dma_device;
434         list_add_tail(&chan->common.device_node, &xscd->dma_device.channels);
435 }
436
437 /**
438  * xscd_dma_chan_remove - Per Channel remove function
439  * @chan: Driver specific DMA channel
440  */
441 static void xscd_dma_chan_remove(struct xscd_dma_chan *chan)
442 {
443         list_del(&chan->common.device_node);
444 }
445
446 /**
447  * xscd_dma_init - Initialize the SCD DMA engine
448  * @xscd: Pointer to the SCD device structure
449  *
450  * Return: '0' on success and failure value on error
451  */
452 int xscd_dma_init(struct xscd_device *xscd)
453 {
454         struct dma_device *ddev = &xscd->dma_device;
455         unsigned int chan_id;
456         int ret;
457
458         /* Initialize the DMA engine */
459         ddev->dev = xscd->dev;
460         dma_set_mask(xscd->dev, DMA_BIT_MASK(32));
461
462         INIT_LIST_HEAD(&ddev->channels);
463         dma_cap_set(DMA_SLAVE, ddev->cap_mask);
464         dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
465         ddev->device_alloc_chan_resources = xscd_dma_alloc_chan_resources;
466         ddev->device_free_chan_resources = xscd_dma_free_chan_resources;
467         ddev->device_tx_status = xscd_dma_tx_status;
468         ddev->device_issue_pending = xscd_dma_issue_pending;
469         ddev->device_terminate_all = xscd_dma_terminate_all;
470         ddev->device_prep_interleaved_dma = xscd_dma_prep_interleaved;
471
472         for (chan_id = 0; chan_id < xscd->num_streams; chan_id++)
473                 xscd_dma_chan_init(xscd, chan_id);
474
475         ret = dma_async_device_register(ddev);
476         if (ret) {
477                 dev_err(xscd->dev, "failed to register the dma device\n");
478                 goto error;
479         }
480
481         ret = of_dma_controller_register(xscd->dev->of_node,
482                                          of_scdma_xilinx_xlate, xscd);
483         if (ret) {
484                 dev_err(xscd->dev, "failed to register DMA to DT DMA helper\n");
485                 goto error_of_dma;
486         }
487
488         dev_info(xscd->dev, "Xilinx Scene Change DMA is initialized!\n");
489         return 0;
490
491 error_of_dma:
492         dma_async_device_unregister(ddev);
493
494 error:
495         for (chan_id = 0; chan_id < xscd->num_streams; chan_id++)
496                 xscd_dma_chan_remove(xscd->channels[chan_id]);
497
498         return ret;
499 }
500
501 /**
502  * xscd_dma_cleanup - Clean up the SCD DMA engine
503  * @xscd: Pointer to the SCD device structure
504  *
505  * This function is the counterpart of xscd_dma_init() and cleans up the
506  * resources related to the DMA engine.
507  */
508 void xscd_dma_cleanup(struct xscd_device *xscd)
509 {
510         dma_async_device_unregister(&xscd->dma_device);
511         of_dma_controller_free(xscd->dev->of_node);
512 }