2 * DMAEngine driver for Xilinx Framebuffer IP
4 * Copyright (C) 2016,2017 Xilinx, Inc. All rights reserved.
6 * Authors: Radhey Shyam Pandey <radheys@xilinx.com>
7 * John Nichols <jnichol@xilinx.com>
8 * Jeffrey Mouroux <jmouroux@xilinx.com>
10 * Based on the Freescale DMA driver.
13 * The AXI Framebuffer core is a soft Xilinx IP core that
14 * provides high-bandwidth direct memory access between memory
17 * This program is free software: you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation, either version 2 of the License, or
20 * (at your option) any later version.
23 #include <linux/bitops.h>
24 #include <linux/dma/xilinx_frmbuf.h>
25 #include <linux/dmapool.h>
26 #include <linux/gpio/consumer.h>
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
30 #include <linux/iopoll.h>
31 #include <linux/module.h>
32 #include <linux/of_address.h>
33 #include <linux/of_dma.h>
34 #include <linux/of_irq.h>
35 #include <linux/of_platform.h>
36 #include <linux/slab.h>
37 #include <linux/videodev2.h>
39 #include <drm/drm_fourcc.h>
41 #include "../dmaengine.h"
43 /* Register/Descriptor Offsets */
44 #define XILINX_FRMBUF_CTRL_OFFSET 0x00
45 #define XILINX_FRMBUF_GIE_OFFSET 0x04
46 #define XILINX_FRMBUF_IE_OFFSET 0x08
47 #define XILINX_FRMBUF_ISR_OFFSET 0x0c
48 #define XILINX_FRMBUF_WIDTH_OFFSET 0x10
49 #define XILINX_FRMBUF_HEIGHT_OFFSET 0x18
50 #define XILINX_FRMBUF_STRIDE_OFFSET 0x20
51 #define XILINX_FRMBUF_FMT_OFFSET 0x28
52 #define XILINX_FRMBUF_ADDR_OFFSET 0x30
53 #define XILINX_FRMBUF_ADDR2_OFFSET 0x3c
54 #define XILINX_FRMBUF_FID_OFFSET 0x48
56 /* Control Registers */
57 #define XILINX_FRMBUF_CTRL_AP_START BIT(0)
58 #define XILINX_FRMBUF_CTRL_AP_DONE BIT(1)
59 #define XILINX_FRMBUF_CTRL_AP_IDLE BIT(2)
60 #define XILINX_FRMBUF_CTRL_AP_READY BIT(3)
61 #define XILINX_FRMBUF_CTRL_AUTO_RESTART BIT(7)
62 #define XILINX_FRMBUF_GIE_EN BIT(0)
64 /* Interrupt Status and Control */
65 #define XILINX_FRMBUF_IE_AP_DONE BIT(0)
66 #define XILINX_FRMBUF_IE_AP_READY BIT(1)
68 #define XILINX_FRMBUF_ISR_AP_DONE_IRQ BIT(0)
69 #define XILINX_FRMBUF_ISR_AP_READY_IRQ BIT(1)
71 #define XILINX_FRMBUF_ISR_ALL_IRQ_MASK \
72 (XILINX_FRMBUF_ISR_AP_DONE_IRQ | \
73 XILINX_FRMBUF_ISR_AP_READY_IRQ)
75 /* Video Format Register Settings */
76 #define XILINX_FRMBUF_FMT_RGBX8 10
77 #define XILINX_FRMBUF_FMT_YUVX8 11
78 #define XILINX_FRMBUF_FMT_YUYV8 12
79 #define XILINX_FRMBUF_FMT_RGBA8 13
80 #define XILINX_FRMBUF_FMT_YUVA8 14
81 #define XILINX_FRMBUF_FMT_RGBX10 15
82 #define XILINX_FRMBUF_FMT_YUVX10 16
83 #define XILINX_FRMBUF_FMT_Y_UV8 18
84 #define XILINX_FRMBUF_FMT_Y_UV8_420 19
85 #define XILINX_FRMBUF_FMT_RGB8 20
86 #define XILINX_FRMBUF_FMT_YUV8 21
87 #define XILINX_FRMBUF_FMT_Y_UV10 22
88 #define XILINX_FRMBUF_FMT_Y_UV10_420 23
89 #define XILINX_FRMBUF_FMT_Y8 24
90 #define XILINX_FRMBUF_FMT_Y10 25
91 #define XILINX_FRMBUF_FMT_BGRA8 26
92 #define XILINX_FRMBUF_FMT_BGRX8 27
93 #define XILINX_FRMBUF_FMT_UYVY8 28
94 #define XILINX_FRMBUF_FMT_BGR8 29
97 #define XILINX_FRMBUF_FID_MASK BIT(0)
100 * struct xilinx_frmbuf_desc_hw - Hardware Descriptor
101 * @luma_plane_addr: Luma or packed plane buffer address
102 * @chroma_plane_addr: Chroma plane buffer address
103 * @vsize: Vertical Size
104 * @hsize: Horizontal Size
105 * @stride: Number of bytes between the first
106 * pixels of each horizontal line
108 struct xilinx_frmbuf_desc_hw {
109 dma_addr_t luma_plane_addr;
110 dma_addr_t chroma_plane_addr;
117 * struct xilinx_frmbuf_tx_descriptor - Per Transaction structure
118 * @async_tx: Async transaction descriptor
119 * @hw: Hardware descriptor
120 * @node: Node in the channel descriptors list
121 * @fid: Field ID of buffer
123 struct xilinx_frmbuf_tx_descriptor {
124 struct dma_async_tx_descriptor async_tx;
125 struct xilinx_frmbuf_desc_hw hw;
126 struct list_head node;
131 * struct xilinx_frmbuf_chan - Driver specific dma channel structure
132 * @xdev: Driver specific device structure
133 * @lock: Descriptor operation lock
134 * @chan_node: Member of a list of framebuffer channel instances
135 * @pending_list: Descriptors waiting
136 * @done_list: Complete descriptors
137 * @staged_desc: Next buffer to be programmed
138 * @active_desc: Currently active buffer being read/written to
139 * @common: DMA common channel
140 * @dev: The dma device
141 * @write_addr: callback that will write dma addresses to IP (32 or 64 bit)
143 * @direction: Transfer direction
144 * @idle: Channel idle state
145 * @tasklet: Cleanup work after irq
146 * @vid_fmt: Reference to currently assigned video format description
148 struct xilinx_frmbuf_chan {
149 struct xilinx_frmbuf_device *xdev;
150 /* Descriptor operation lock */
152 struct list_head chan_node;
153 struct list_head pending_list;
154 struct list_head done_list;
155 struct xilinx_frmbuf_tx_descriptor *staged_desc;
156 struct xilinx_frmbuf_tx_descriptor *active_desc;
157 struct dma_chan common;
159 void (*write_addr)(struct xilinx_frmbuf_chan *chan, u32 reg,
162 enum dma_transfer_direction direction;
164 struct tasklet_struct tasklet;
165 const struct xilinx_frmbuf_format_desc *vid_fmt;
169 * struct xilinx_frmbuf_format_desc - lookup table to match fourcc to format
170 * @dts_name: Device tree name for this entry.
172 * @bpw: Bits of pixel data + padding in a 32-bit word (luma plane for semi-pl)
173 * @ppw: Number of pixels represented in a 32-bit word (luma plane for semi-pl)
174 * @num_planes: Expected number of plane buffers in framebuffer for this format
175 * @drm_fmt: DRM video framework equivalent fourcc code
176 * @v4l2_fmt: Video 4 Linux framework equivalent fourcc code
177 * @fmt_bitmask: Flag identifying this format in device-specific "enabled"
180 struct xilinx_frmbuf_format_desc {
181 const char *dts_name;
191 static LIST_HEAD(frmbuf_chan_list);
192 static DEFINE_MUTEX(frmbuf_chan_list_lock);
194 static const struct xilinx_frmbuf_format_desc xilinx_frmbuf_formats[] = {
196 .dts_name = "xbgr8888",
197 .id = XILINX_FRMBUF_FMT_RGBX8,
201 .drm_fmt = DRM_FORMAT_XBGR8888,
202 .v4l2_fmt = V4L2_PIX_FMT_BGRX32,
203 .fmt_bitmask = BIT(0),
206 .dts_name = "xbgr2101010",
207 .id = XILINX_FRMBUF_FMT_RGBX10,
211 .drm_fmt = DRM_FORMAT_XBGR2101010,
212 .v4l2_fmt = V4L2_PIX_FMT_XBGR30,
213 .fmt_bitmask = BIT(1),
216 .dts_name = "xrgb8888",
217 .id = XILINX_FRMBUF_FMT_BGRX8,
221 .drm_fmt = DRM_FORMAT_XRGB8888,
222 .v4l2_fmt = V4L2_PIX_FMT_XBGR32,
223 .fmt_bitmask = BIT(2),
226 .dts_name = "xvuy8888",
227 .id = XILINX_FRMBUF_FMT_YUVX8,
231 .drm_fmt = DRM_FORMAT_XVUY8888,
232 .v4l2_fmt = V4L2_PIX_FMT_XVUY32,
233 .fmt_bitmask = BIT(5),
236 .dts_name = "vuy888",
237 .id = XILINX_FRMBUF_FMT_YUV8,
241 .drm_fmt = DRM_FORMAT_VUY888,
242 .v4l2_fmt = V4L2_PIX_FMT_VUY24,
243 .fmt_bitmask = BIT(6),
246 .dts_name = "yuvx2101010",
247 .id = XILINX_FRMBUF_FMT_YUVX10,
251 .drm_fmt = DRM_FORMAT_XVUY2101010,
252 .v4l2_fmt = V4L2_PIX_FMT_XVUY10,
253 .fmt_bitmask = BIT(7),
257 .id = XILINX_FRMBUF_FMT_YUYV8,
261 .drm_fmt = DRM_FORMAT_YUYV,
262 .v4l2_fmt = V4L2_PIX_FMT_YUYV,
263 .fmt_bitmask = BIT(8),
267 .id = XILINX_FRMBUF_FMT_UYVY8,
271 .drm_fmt = DRM_FORMAT_UYVY,
272 .v4l2_fmt = V4L2_PIX_FMT_UYVY,
273 .fmt_bitmask = BIT(9),
277 .id = XILINX_FRMBUF_FMT_Y_UV8,
281 .drm_fmt = DRM_FORMAT_NV16,
282 .v4l2_fmt = V4L2_PIX_FMT_NV16M,
283 .fmt_bitmask = BIT(11),
287 .id = XILINX_FRMBUF_FMT_Y_UV8,
292 .v4l2_fmt = V4L2_PIX_FMT_NV16,
293 .fmt_bitmask = BIT(11),
297 .id = XILINX_FRMBUF_FMT_Y_UV8_420,
301 .drm_fmt = DRM_FORMAT_NV12,
302 .v4l2_fmt = V4L2_PIX_FMT_NV12M,
303 .fmt_bitmask = BIT(12),
307 .id = XILINX_FRMBUF_FMT_Y_UV8_420,
312 .v4l2_fmt = V4L2_PIX_FMT_NV12,
313 .fmt_bitmask = BIT(12),
317 .id = XILINX_FRMBUF_FMT_Y_UV10_420,
321 .drm_fmt = DRM_FORMAT_XV15,
322 .v4l2_fmt = V4L2_PIX_FMT_XV15M,
323 .fmt_bitmask = BIT(13),
327 .id = XILINX_FRMBUF_FMT_Y_UV10_420,
332 .v4l2_fmt = V4L2_PIX_FMT_XV15,
333 .fmt_bitmask = BIT(13),
337 .id = XILINX_FRMBUF_FMT_Y_UV10,
341 .drm_fmt = DRM_FORMAT_XV20,
342 .v4l2_fmt = V4L2_PIX_FMT_XV20M,
343 .fmt_bitmask = BIT(14),
347 .id = XILINX_FRMBUF_FMT_Y_UV10,
352 .v4l2_fmt = V4L2_PIX_FMT_XV20,
353 .fmt_bitmask = BIT(14),
356 .dts_name = "bgr888",
357 .id = XILINX_FRMBUF_FMT_RGB8,
361 .drm_fmt = DRM_FORMAT_BGR888,
362 .v4l2_fmt = V4L2_PIX_FMT_RGB24,
363 .fmt_bitmask = BIT(15),
367 .id = XILINX_FRMBUF_FMT_Y8,
371 .drm_fmt = DRM_FORMAT_Y8,
372 .v4l2_fmt = V4L2_PIX_FMT_GREY,
373 .fmt_bitmask = BIT(16),
377 .id = XILINX_FRMBUF_FMT_Y10,
381 .drm_fmt = DRM_FORMAT_Y10,
382 .v4l2_fmt = V4L2_PIX_FMT_Y10,
383 .fmt_bitmask = BIT(17),
386 .dts_name = "rgb888",
387 .id = XILINX_FRMBUF_FMT_BGR8,
391 .drm_fmt = DRM_FORMAT_RGB888,
392 .v4l2_fmt = V4L2_PIX_FMT_BGR24,
393 .fmt_bitmask = BIT(18),
398 * struct xilinx_frmbuf_device - dma device structure
399 * @regs: I/O mapped base address
400 * @dev: Device Structure
401 * @common: DMA device structure
402 * @chan: Driver specific dma channel
403 * @rst_gpio: GPIO reset
404 * @enabled_vid_fmts: Bitmask of video formats enabled in hardware
405 * @drm_memory_fmts: Array of supported DRM fourcc codes
406 * @drm_fmt_cnt: Count of supported DRM fourcc codes
407 * @v4l2_memory_fmts: Array of supported V4L2 fourcc codes
408 * @v4l2_fmt_cnt: Count of supported V4L2 fourcc codes
410 struct xilinx_frmbuf_device {
413 struct dma_device common;
414 struct xilinx_frmbuf_chan chan;
415 struct gpio_desc *rst_gpio;
416 u32 enabled_vid_fmts;
417 u32 drm_memory_fmts[ARRAY_SIZE(xilinx_frmbuf_formats)];
419 u32 v4l2_memory_fmts[ARRAY_SIZE(xilinx_frmbuf_formats)];
423 static const struct of_device_id xilinx_frmbuf_of_ids[] = {
424 { .compatible = "xlnx,axi-frmbuf-wr-v2",
425 .data = (void *)DMA_DEV_TO_MEM},
426 { .compatible = "xlnx,axi-frmbuf-rd-v2",
427 .data = (void *)DMA_MEM_TO_DEV},
431 /******************************PROTOTYPES*************************************/
432 #define to_xilinx_chan(chan) \
433 container_of(chan, struct xilinx_frmbuf_chan, common)
434 #define to_dma_tx_descriptor(tx) \
435 container_of(tx, struct xilinx_frmbuf_tx_descriptor, async_tx)
437 static inline u32 frmbuf_read(struct xilinx_frmbuf_chan *chan, u32 reg)
439 return ioread32(chan->xdev->regs + reg);
442 static inline void frmbuf_write(struct xilinx_frmbuf_chan *chan, u32 reg,
445 iowrite32(value, chan->xdev->regs + reg);
448 static inline void frmbuf_writeq(struct xilinx_frmbuf_chan *chan, u32 reg,
451 iowrite32(lower_32_bits(value), chan->xdev->regs + reg);
452 iowrite32(upper_32_bits(value), chan->xdev->regs + reg + 4);
455 static void writeq_addr(struct xilinx_frmbuf_chan *chan, u32 reg,
458 frmbuf_writeq(chan, reg, (u64)addr);
461 static void write_addr(struct xilinx_frmbuf_chan *chan, u32 reg,
464 frmbuf_write(chan, reg, addr);
467 static inline void frmbuf_clr(struct xilinx_frmbuf_chan *chan, u32 reg,
470 frmbuf_write(chan, reg, frmbuf_read(chan, reg) & ~clr);
473 static inline void frmbuf_set(struct xilinx_frmbuf_chan *chan, u32 reg,
476 frmbuf_write(chan, reg, frmbuf_read(chan, reg) | set);
479 static void frmbuf_init_format_array(struct xilinx_frmbuf_device *xdev)
483 for (i = 0; i < ARRAY_SIZE(xilinx_frmbuf_formats); i++) {
484 if (!(xdev->enabled_vid_fmts &
485 xilinx_frmbuf_formats[i].fmt_bitmask))
488 if (xilinx_frmbuf_formats[i].drm_fmt) {
489 cnt = xdev->drm_fmt_cnt++;
490 xdev->drm_memory_fmts[cnt] =
491 xilinx_frmbuf_formats[i].drm_fmt;
494 if (xilinx_frmbuf_formats[i].v4l2_fmt) {
495 cnt = xdev->v4l2_fmt_cnt++;
496 xdev->v4l2_memory_fmts[cnt] =
497 xilinx_frmbuf_formats[i].v4l2_fmt;
502 static struct xilinx_frmbuf_device *frmbuf_find_dev(struct dma_chan *chan)
504 struct xilinx_frmbuf_chan *xchan, *temp;
505 struct xilinx_frmbuf_device *xdev;
506 bool is_frmbuf_chan = false;
508 list_for_each_entry_safe(xchan, temp, &frmbuf_chan_list, chan_node) {
509 if (chan == &xchan->common)
510 is_frmbuf_chan = true;
514 return ERR_PTR(-ENODEV);
516 xchan = to_xilinx_chan(chan);
517 xdev = container_of(xchan, struct xilinx_frmbuf_device, chan);
522 static int frmbuf_verify_format(struct dma_chan *chan, u32 fourcc, u32 type)
524 struct xilinx_frmbuf_chan *xil_chan = to_xilinx_chan(chan);
525 u32 i, sz = ARRAY_SIZE(xilinx_frmbuf_formats);
527 for (i = 0; i < sz; i++) {
528 if ((type == XDMA_DRM &&
529 fourcc != xilinx_frmbuf_formats[i].drm_fmt) ||
530 (type == XDMA_V4L2 &&
531 fourcc != xilinx_frmbuf_formats[i].v4l2_fmt))
534 if (!(xilinx_frmbuf_formats[i].fmt_bitmask &
535 xil_chan->xdev->enabled_vid_fmts))
538 xil_chan->vid_fmt = &xilinx_frmbuf_formats[i];
544 static void xilinx_xdma_set_config(struct dma_chan *chan, u32 fourcc, u32 type)
546 struct xilinx_frmbuf_chan *xil_chan;
547 bool found_xchan = false;
550 mutex_lock(&frmbuf_chan_list_lock);
551 list_for_each_entry(xil_chan, &frmbuf_chan_list, chan_node) {
552 if (chan == &xil_chan->common) {
557 mutex_unlock(&frmbuf_chan_list_lock);
560 dev_dbg(chan->device->dev,
561 "dma chan not a Video Framebuffer channel instance\n");
565 ret = frmbuf_verify_format(chan, fourcc, type);
566 if (ret == -EINVAL) {
567 dev_err(chan->device->dev,
568 "Framebuffer not configured for fourcc 0x%x\n",
574 void xilinx_xdma_drm_config(struct dma_chan *chan, u32 drm_fourcc)
576 xilinx_xdma_set_config(chan, drm_fourcc, XDMA_DRM);
578 } EXPORT_SYMBOL_GPL(xilinx_xdma_drm_config);
580 void xilinx_xdma_v4l2_config(struct dma_chan *chan, u32 v4l2_fourcc)
582 xilinx_xdma_set_config(chan, v4l2_fourcc, XDMA_V4L2);
584 } EXPORT_SYMBOL_GPL(xilinx_xdma_v4l2_config);
586 int xilinx_xdma_get_drm_vid_fmts(struct dma_chan *chan, u32 *fmt_cnt,
589 struct xilinx_frmbuf_device *xdev;
591 xdev = frmbuf_find_dev(chan);
594 return PTR_ERR(xdev);
596 *fmt_cnt = xdev->drm_fmt_cnt;
597 *fmts = xdev->drm_memory_fmts;
601 EXPORT_SYMBOL(xilinx_xdma_get_drm_vid_fmts);
603 int xilinx_xdma_get_v4l2_vid_fmts(struct dma_chan *chan, u32 *fmt_cnt,
606 struct xilinx_frmbuf_device *xdev;
608 xdev = frmbuf_find_dev(chan);
611 return PTR_ERR(xdev);
613 *fmt_cnt = xdev->v4l2_fmt_cnt;
614 *fmts = xdev->v4l2_memory_fmts;
618 EXPORT_SYMBOL(xilinx_xdma_get_v4l2_vid_fmts);
620 int xilinx_xdma_get_fid(struct dma_chan *chan,
621 struct dma_async_tx_descriptor *async_tx, u32 *fid)
623 struct xilinx_frmbuf_device *xdev;
624 struct xilinx_frmbuf_tx_descriptor *desc;
626 xdev = frmbuf_find_dev(chan);
628 return PTR_ERR(xdev);
630 if (!async_tx || !fid)
633 if (xdev->chan.direction != DMA_DEV_TO_MEM)
636 desc = to_dma_tx_descriptor(async_tx);
643 EXPORT_SYMBOL(xilinx_xdma_get_fid);
645 int xilinx_xdma_set_fid(struct dma_chan *chan,
646 struct dma_async_tx_descriptor *async_tx, u32 fid)
648 struct xilinx_frmbuf_device *xdev;
649 struct xilinx_frmbuf_tx_descriptor *desc;
651 if (fid > 1 || !async_tx)
654 xdev = frmbuf_find_dev(chan);
656 return PTR_ERR(xdev);
658 if (xdev->chan.direction != DMA_MEM_TO_DEV)
661 desc = to_dma_tx_descriptor(async_tx);
668 EXPORT_SYMBOL(xilinx_xdma_set_fid);
671 * of_dma_xilinx_xlate - Translation function
672 * @dma_spec: Pointer to DMA specifier as found in the device tree
673 * @ofdma: Pointer to DMA controller data
675 * Return: DMA channel pointer on success or error code on error
677 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
678 struct of_dma *ofdma)
680 struct xilinx_frmbuf_device *xdev = ofdma->of_dma_data;
682 return dma_get_slave_channel(&xdev->chan.common);
685 /* -----------------------------------------------------------------------------
686 * Descriptors alloc and free
690 * xilinx_frmbuf_tx_descriptor - Allocate transaction descriptor
691 * @chan: Driver specific dma channel
693 * Return: The allocated descriptor on success and NULL on failure.
695 static struct xilinx_frmbuf_tx_descriptor *
696 xilinx_frmbuf_alloc_tx_descriptor(struct xilinx_frmbuf_chan *chan)
698 struct xilinx_frmbuf_tx_descriptor *desc;
700 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
708 * xilinx_frmbuf_free_desc_list - Free descriptors list
709 * @chan: Driver specific dma channel
710 * @list: List to parse and delete the descriptor
712 static void xilinx_frmbuf_free_desc_list(struct xilinx_frmbuf_chan *chan,
713 struct list_head *list)
715 struct xilinx_frmbuf_tx_descriptor *desc, *next;
717 list_for_each_entry_safe(desc, next, list, node) {
718 list_del(&desc->node);
724 * xilinx_frmbuf_free_descriptors - Free channel descriptors
725 * @chan: Driver specific dma channel
727 static void xilinx_frmbuf_free_descriptors(struct xilinx_frmbuf_chan *chan)
731 spin_lock_irqsave(&chan->lock, flags);
733 xilinx_frmbuf_free_desc_list(chan, &chan->pending_list);
734 xilinx_frmbuf_free_desc_list(chan, &chan->done_list);
735 kfree(chan->active_desc);
736 kfree(chan->staged_desc);
738 chan->staged_desc = NULL;
739 chan->active_desc = NULL;
740 INIT_LIST_HEAD(&chan->pending_list);
741 INIT_LIST_HEAD(&chan->done_list);
743 spin_unlock_irqrestore(&chan->lock, flags);
747 * xilinx_frmbuf_free_chan_resources - Free channel resources
748 * @dchan: DMA channel
750 static void xilinx_frmbuf_free_chan_resources(struct dma_chan *dchan)
752 struct xilinx_frmbuf_chan *chan = to_xilinx_chan(dchan);
754 xilinx_frmbuf_free_descriptors(chan);
758 * xilinx_frmbuf_chan_desc_cleanup - Clean channel descriptors
759 * @chan: Driver specific dma channel
761 static void xilinx_frmbuf_chan_desc_cleanup(struct xilinx_frmbuf_chan *chan)
763 struct xilinx_frmbuf_tx_descriptor *desc, *next;
766 spin_lock_irqsave(&chan->lock, flags);
768 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
769 dma_async_tx_callback callback;
770 void *callback_param;
772 list_del(&desc->node);
774 /* Run the link descriptor callback function */
775 callback = desc->async_tx.callback;
776 callback_param = desc->async_tx.callback_param;
778 spin_unlock_irqrestore(&chan->lock, flags);
779 callback(callback_param);
780 spin_lock_irqsave(&chan->lock, flags);
783 /* Run any dependencies, then free the descriptor */
784 dma_run_dependencies(&desc->async_tx);
788 spin_unlock_irqrestore(&chan->lock, flags);
792 * xilinx_frmbuf_do_tasklet - Schedule completion tasklet
793 * @data: Pointer to the Xilinx frmbuf channel structure
795 static void xilinx_frmbuf_do_tasklet(unsigned long data)
797 struct xilinx_frmbuf_chan *chan = (struct xilinx_frmbuf_chan *)data;
799 xilinx_frmbuf_chan_desc_cleanup(chan);
803 * xilinx_frmbuf_alloc_chan_resources - Allocate channel resources
804 * @dchan: DMA channel
806 * Return: '0' on success and failure value on error
808 static int xilinx_frmbuf_alloc_chan_resources(struct dma_chan *dchan)
810 dma_cookie_init(dchan);
816 * xilinx_frmbuf_tx_status - Get frmbuf transaction status
817 * @dchan: DMA channel
818 * @cookie: Transaction identifier
819 * @txstate: Transaction state
821 * Return: fmrbuf transaction status
823 static enum dma_status xilinx_frmbuf_tx_status(struct dma_chan *dchan,
825 struct dma_tx_state *txstate)
827 return dma_cookie_status(dchan, cookie, txstate);
831 * xilinx_frmbuf_halt - Halt frmbuf channel
832 * @chan: Driver specific dma channel
834 static void xilinx_frmbuf_halt(struct xilinx_frmbuf_chan *chan)
836 frmbuf_clr(chan, XILINX_FRMBUF_CTRL_OFFSET,
837 XILINX_FRMBUF_CTRL_AP_START |
838 XILINX_FRMBUF_CTRL_AUTO_RESTART);
843 * xilinx_frmbuf_start - Start dma channel
844 * @chan: Driver specific dma channel
846 static void xilinx_frmbuf_start(struct xilinx_frmbuf_chan *chan)
848 frmbuf_set(chan, XILINX_FRMBUF_CTRL_OFFSET,
849 XILINX_FRMBUF_CTRL_AP_START |
850 XILINX_FRMBUF_CTRL_AUTO_RESTART);
855 * xilinx_frmbuf_complete_descriptor - Mark the active descriptor as complete
856 * This function is invoked with spinlock held
857 * @chan : xilinx frmbuf channel
861 static void xilinx_frmbuf_complete_descriptor(struct xilinx_frmbuf_chan *chan)
863 struct xilinx_frmbuf_tx_descriptor *desc = chan->active_desc;
866 * In case of frame buffer write, read the fid register
867 * and associate it with descriptor
869 if (chan->direction == DMA_DEV_TO_MEM)
870 desc->fid = frmbuf_read(chan, XILINX_FRMBUF_FID_OFFSET) &
871 XILINX_FRMBUF_FID_MASK;
873 dma_cookie_complete(&desc->async_tx);
874 list_add_tail(&desc->node, &chan->done_list);
878 * xilinx_frmbuf_start_transfer - Starts frmbuf transfer
879 * @chan: Driver specific channel struct pointer
881 static void xilinx_frmbuf_start_transfer(struct xilinx_frmbuf_chan *chan)
883 struct xilinx_frmbuf_tx_descriptor *desc;
888 if (chan->active_desc) {
889 xilinx_frmbuf_complete_descriptor(chan);
890 chan->active_desc = NULL;
893 if (chan->staged_desc) {
894 chan->active_desc = chan->staged_desc;
895 chan->staged_desc = NULL;
898 if (list_empty(&chan->pending_list))
901 desc = list_first_entry(&chan->pending_list,
902 struct xilinx_frmbuf_tx_descriptor,
905 /* Start the transfer */
906 chan->write_addr(chan, XILINX_FRMBUF_ADDR_OFFSET,
907 desc->hw.luma_plane_addr);
908 chan->write_addr(chan, XILINX_FRMBUF_ADDR2_OFFSET,
909 desc->hw.chroma_plane_addr);
911 /* HW expects these parameters to be same for one transaction */
912 frmbuf_write(chan, XILINX_FRMBUF_WIDTH_OFFSET, desc->hw.hsize);
913 frmbuf_write(chan, XILINX_FRMBUF_STRIDE_OFFSET, desc->hw.stride);
914 frmbuf_write(chan, XILINX_FRMBUF_HEIGHT_OFFSET, desc->hw.vsize);
915 frmbuf_write(chan, XILINX_FRMBUF_FMT_OFFSET, chan->vid_fmt->id);
917 /* If it is framebuffer read IP set the FID */
918 if (chan->direction == DMA_MEM_TO_DEV)
919 frmbuf_write(chan, XILINX_FRMBUF_FID_OFFSET, desc->fid);
921 /* Start the hardware */
922 xilinx_frmbuf_start(chan);
923 list_del(&desc->node);
924 chan->staged_desc = desc;
928 * xilinx_frmbuf_issue_pending - Issue pending transactions
929 * @dchan: DMA channel
931 static void xilinx_frmbuf_issue_pending(struct dma_chan *dchan)
933 struct xilinx_frmbuf_chan *chan = to_xilinx_chan(dchan);
936 spin_lock_irqsave(&chan->lock, flags);
937 xilinx_frmbuf_start_transfer(chan);
938 spin_unlock_irqrestore(&chan->lock, flags);
942 * xilinx_frmbuf_reset - Reset frmbuf channel
943 * @chan: Driver specific dma channel
945 static void xilinx_frmbuf_reset(struct xilinx_frmbuf_chan *chan)
948 gpiod_set_value(chan->xdev->rst_gpio, 1);
950 gpiod_set_value(chan->xdev->rst_gpio, 0);
954 * xilinx_frmbuf_chan_reset - Reset frmbuf channel and enable interrupts
955 * @chan: Driver specific frmbuf channel
957 static void xilinx_frmbuf_chan_reset(struct xilinx_frmbuf_chan *chan)
959 xilinx_frmbuf_reset(chan);
960 frmbuf_write(chan, XILINX_FRMBUF_IE_OFFSET, XILINX_FRMBUF_IE_AP_READY);
961 frmbuf_write(chan, XILINX_FRMBUF_GIE_OFFSET, XILINX_FRMBUF_GIE_EN);
965 * xilinx_frmbuf_irq_handler - frmbuf Interrupt handler
967 * @data: Pointer to the Xilinx frmbuf channel structure
969 * Return: IRQ_HANDLED/IRQ_NONE
971 static irqreturn_t xilinx_frmbuf_irq_handler(int irq, void *data)
973 struct xilinx_frmbuf_chan *chan = data;
976 status = frmbuf_read(chan, XILINX_FRMBUF_ISR_OFFSET);
977 if (!(status & XILINX_FRMBUF_ISR_ALL_IRQ_MASK))
980 frmbuf_write(chan, XILINX_FRMBUF_ISR_OFFSET,
981 status & XILINX_FRMBUF_ISR_ALL_IRQ_MASK);
983 if (status & XILINX_FRMBUF_ISR_AP_READY_IRQ) {
984 spin_lock(&chan->lock);
986 xilinx_frmbuf_start_transfer(chan);
987 spin_unlock(&chan->lock);
990 tasklet_schedule(&chan->tasklet);
995 * xilinx_frmbuf_tx_submit - Submit DMA transaction
996 * @tx: Async transaction descriptor
998 * Return: cookie value on success and failure value on error
1000 static dma_cookie_t xilinx_frmbuf_tx_submit(struct dma_async_tx_descriptor *tx)
1002 struct xilinx_frmbuf_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1003 struct xilinx_frmbuf_chan *chan = to_xilinx_chan(tx->chan);
1004 dma_cookie_t cookie;
1005 unsigned long flags;
1007 spin_lock_irqsave(&chan->lock, flags);
1008 cookie = dma_cookie_assign(tx);
1009 list_add_tail(&desc->node, &chan->pending_list);
1010 spin_unlock_irqrestore(&chan->lock, flags);
1016 * xilinx_frmbuf_dma_prep_interleaved - prepare a descriptor for a
1017 * DMA_SLAVE transaction
1018 * @dchan: DMA channel
1019 * @xt: Interleaved template pointer
1020 * @flags: transfer ack flags
1022 * Return: Async transaction descriptor on success and NULL on failure
1024 static struct dma_async_tx_descriptor *
1025 xilinx_frmbuf_dma_prep_interleaved(struct dma_chan *dchan,
1026 struct dma_interleaved_template *xt,
1027 unsigned long flags)
1029 struct xilinx_frmbuf_chan *chan = to_xilinx_chan(dchan);
1030 struct xilinx_frmbuf_tx_descriptor *desc;
1031 struct xilinx_frmbuf_desc_hw *hw;
1033 if (chan->direction != xt->dir || !chan->vid_fmt)
1036 if (!xt->numf || !xt->sgl[0].size)
1039 if (xt->frame_size != chan->vid_fmt->num_planes)
1042 desc = xilinx_frmbuf_alloc_tx_descriptor(chan);
1046 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1047 desc->async_tx.tx_submit = xilinx_frmbuf_tx_submit;
1048 async_tx_ack(&desc->async_tx);
1051 hw->vsize = xt->numf;
1052 hw->stride = xt->sgl[0].icg + xt->sgl[0].size;
1053 hw->hsize = (xt->sgl[0].size * chan->vid_fmt->ppw * 8) /
1056 /* hsize calc should not have resulted in an odd number */
1060 if (chan->direction == DMA_MEM_TO_DEV) {
1061 hw->luma_plane_addr = xt->src_start;
1062 if (xt->frame_size == 2)
1063 hw->chroma_plane_addr =
1065 xt->numf * hw->stride +
1068 hw->luma_plane_addr = xt->dst_start;
1069 if (xt->frame_size == 2)
1070 hw->chroma_plane_addr =
1072 xt->numf * hw->stride +
1076 return &desc->async_tx;
1079 dev_err(chan->xdev->dev,
1080 "Invalid dma template or missing dma video fmt config\n");
1085 * xilinx_frmbuf_terminate_all - Halt the channel and free descriptors
1086 * @dchan: Driver specific dma channel pointer
1090 static int xilinx_frmbuf_terminate_all(struct dma_chan *dchan)
1092 struct xilinx_frmbuf_chan *chan = to_xilinx_chan(dchan);
1094 xilinx_frmbuf_halt(chan);
1095 xilinx_frmbuf_free_descriptors(chan);
1096 /* worst case frame-to-frame boundary; ensure frame output complete */
1098 xilinx_frmbuf_chan_reset(chan);
1104 * xilinx_frmbuf_synchronize - kill tasklet to stop further descr processing
1105 * @dchan: Driver specific dma channel pointer
1107 static void xilinx_frmbuf_synchronize(struct dma_chan *dchan)
1109 struct xilinx_frmbuf_chan *chan = to_xilinx_chan(dchan);
1111 tasklet_kill(&chan->tasklet);
1114 /* -----------------------------------------------------------------------------
1119 * xilinx_frmbuf_chan_remove - Per Channel remove function
1120 * @chan: Driver specific dma channel
1122 static void xilinx_frmbuf_chan_remove(struct xilinx_frmbuf_chan *chan)
1124 /* Disable all interrupts */
1125 frmbuf_clr(chan, XILINX_FRMBUF_IE_OFFSET,
1126 XILINX_FRMBUF_ISR_ALL_IRQ_MASK);
1128 tasklet_kill(&chan->tasklet);
1129 list_del(&chan->common.device_node);
1131 mutex_lock(&frmbuf_chan_list_lock);
1132 list_del(&chan->chan_node);
1133 mutex_unlock(&frmbuf_chan_list_lock);
1137 * xilinx_frmbuf_chan_probe - Per Channel Probing
1138 * It get channel features from the device tree entry and
1139 * initialize special channel handling routines
1141 * @xdev: Driver specific device structure
1142 * @node: Device node
1144 * Return: '0' on success and failure value on error
1146 static int xilinx_frmbuf_chan_probe(struct xilinx_frmbuf_device *xdev,
1147 struct device_node *node)
1149 struct xilinx_frmbuf_chan *chan;
1155 chan->dev = xdev->dev;
1159 err = of_property_read_u32(node, "xlnx,dma-addr-width",
1161 if (err || (dma_addr_size != 32 && dma_addr_size != 64)) {
1162 dev_err(xdev->dev, "missing or invalid addr width dts prop\n");
1166 if (dma_addr_size == 64 && sizeof(dma_addr_t) == sizeof(u64))
1167 chan->write_addr = writeq_addr;
1169 chan->write_addr = write_addr;
1171 spin_lock_init(&chan->lock);
1172 INIT_LIST_HEAD(&chan->pending_list);
1173 INIT_LIST_HEAD(&chan->done_list);
1175 chan->irq = irq_of_parse_and_map(node, 0);
1176 err = devm_request_irq(xdev->dev, chan->irq, xilinx_frmbuf_irq_handler,
1177 IRQF_SHARED, "xilinx_framebuffer", chan);
1180 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
1184 tasklet_init(&chan->tasklet, xilinx_frmbuf_do_tasklet,
1185 (unsigned long)chan);
1188 * Initialize the DMA channel and add it to the DMA engine channels
1191 chan->common.device = &xdev->common;
1193 list_add_tail(&chan->common.device_node, &xdev->common.channels);
1195 mutex_lock(&frmbuf_chan_list_lock);
1196 list_add_tail(&chan->chan_node, &frmbuf_chan_list);
1197 mutex_unlock(&frmbuf_chan_list_lock);
1199 xilinx_frmbuf_chan_reset(chan);
1205 * xilinx_frmbuf_probe - Driver probe function
1206 * @pdev: Pointer to the platform_device structure
1208 * Return: '0' on success and failure value on error
1210 static int xilinx_frmbuf_probe(struct platform_device *pdev)
1212 struct device_node *node = pdev->dev.of_node;
1213 struct xilinx_frmbuf_device *xdev;
1214 struct resource *io;
1215 enum dma_transfer_direction dma_dir;
1216 const struct of_device_id *match;
1220 const char *vid_fmts[ARRAY_SIZE(xilinx_frmbuf_formats)];
1222 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
1226 xdev->dev = &pdev->dev;
1228 match = of_match_node(xilinx_frmbuf_of_ids, node);
1232 dma_dir = (enum dma_transfer_direction)match->data;
1234 xdev->rst_gpio = devm_gpiod_get(&pdev->dev, "reset",
1236 if (IS_ERR(xdev->rst_gpio)) {
1237 err = PTR_ERR(xdev->rst_gpio);
1238 if (err == -EPROBE_DEFER)
1239 dev_info(&pdev->dev,
1240 "Probe deferred due to GPIO reset defer\n");
1243 "Unable to locate reset property in dt\n");
1247 gpiod_set_value_cansleep(xdev->rst_gpio, 0x0);
1249 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1250 xdev->regs = devm_ioremap_resource(&pdev->dev, io);
1251 if (IS_ERR(xdev->regs))
1252 return PTR_ERR(xdev->regs);
1254 /* Initialize the DMA engine */
1255 /* TODO: Get DMA alignment from device tree property */
1256 xdev->common.copy_align = 4;
1257 xdev->common.dev = &pdev->dev;
1259 INIT_LIST_HEAD(&xdev->common.channels);
1260 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
1261 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
1263 /* Initialize the channels */
1264 err = xilinx_frmbuf_chan_probe(xdev, node);
1268 xdev->chan.direction = dma_dir;
1270 if (xdev->chan.direction == DMA_DEV_TO_MEM) {
1271 xdev->common.directions = BIT(DMA_DEV_TO_MEM);
1272 dev_info(&pdev->dev, "Xilinx AXI frmbuf DMA_DEV_TO_MEM\n");
1273 } else if (xdev->chan.direction == DMA_MEM_TO_DEV) {
1274 xdev->common.directions = BIT(DMA_MEM_TO_DEV);
1275 dev_info(&pdev->dev, "Xilinx AXI frmbuf DMA_MEM_TO_DEV\n");
1277 xilinx_frmbuf_chan_remove(&xdev->chan);
1281 /* read supported video formats and update internal table */
1282 hw_vid_fmt_cnt = of_property_count_strings(node, "xlnx,vid-formats");
1284 err = of_property_read_string_array(node, "xlnx,vid-formats",
1285 vid_fmts, hw_vid_fmt_cnt);
1288 "Missing or invalid xlnx,vid-formats dts prop\n");
1292 for (i = 0; i < hw_vid_fmt_cnt; i++) {
1293 const char *vid_fmt_name = vid_fmts[i];
1295 for (j = 0; j < ARRAY_SIZE(xilinx_frmbuf_formats); j++) {
1296 const char *dts_name =
1297 xilinx_frmbuf_formats[j].dts_name;
1299 if (strcmp(vid_fmt_name, dts_name))
1302 xdev->enabled_vid_fmts |=
1303 xilinx_frmbuf_formats[j].fmt_bitmask;
1307 /* Determine supported vid framework formats */
1308 frmbuf_init_format_array(xdev);
1310 xdev->common.device_alloc_chan_resources =
1311 xilinx_frmbuf_alloc_chan_resources;
1312 xdev->common.device_free_chan_resources =
1313 xilinx_frmbuf_free_chan_resources;
1314 xdev->common.device_prep_interleaved_dma =
1315 xilinx_frmbuf_dma_prep_interleaved;
1316 xdev->common.device_terminate_all = xilinx_frmbuf_terminate_all;
1317 xdev->common.device_synchronize = xilinx_frmbuf_synchronize;
1318 xdev->common.device_tx_status = xilinx_frmbuf_tx_status;
1319 xdev->common.device_issue_pending = xilinx_frmbuf_issue_pending;
1321 platform_set_drvdata(pdev, xdev);
1323 /* Register the DMA engine with the core */
1324 dma_async_device_register(&xdev->common);
1325 err = of_dma_controller_register(node, of_dma_xilinx_xlate, xdev);
1328 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
1329 xilinx_frmbuf_chan_remove(&xdev->chan);
1330 dma_async_device_unregister(&xdev->common);
1334 dev_info(&pdev->dev, "Xilinx AXI FrameBuffer Engine Driver Probed!!\n");
1340 * xilinx_frmbuf_remove - Driver remove function
1341 * @pdev: Pointer to the platform_device structure
1343 * Return: Always '0'
1345 static int xilinx_frmbuf_remove(struct platform_device *pdev)
1347 struct xilinx_frmbuf_device *xdev = platform_get_drvdata(pdev);
1349 dma_async_device_unregister(&xdev->common);
1350 xilinx_frmbuf_chan_remove(&xdev->chan);
1355 MODULE_DEVICE_TABLE(of, xilinx_frmbuf_of_ids);
1357 static struct platform_driver xilinx_frmbuf_driver = {
1359 .name = "xilinx-frmbuf",
1360 .of_match_table = xilinx_frmbuf_of_ids,
1362 .probe = xilinx_frmbuf_probe,
1363 .remove = xilinx_frmbuf_remove,
1366 module_platform_driver(xilinx_frmbuf_driver);
1368 MODULE_AUTHOR("Xilinx, Inc.");
1369 MODULE_DESCRIPTION("Xilinx Framebuffer driver");
1370 MODULE_LICENSE("GPL v2");