2 * Xilinx DPDMA Engine driver
4 * Copyright (C) 2015 Xilinx, Inc.
6 * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/bitops.h>
19 #include <linux/clk.h>
20 #include <linux/debugfs.h>
21 #include <linux/delay.h>
22 #include <linux/device.h>
23 #include <linux/dmaengine.h>
24 #include <linux/dmapool.h>
25 #include <linux/gfp.h>
26 #include <linux/interrupt.h>
27 #include <linux/irqreturn.h>
28 #include <linux/module.h>
30 #include <linux/of_dma.h>
31 #include <linux/platform_device.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/types.h>
36 #include <linux/uaccess.h>
37 #include <linux/wait.h>
39 #include "../dmaengine.h"
42 #define XILINX_DPDMA_ERR_CTRL 0x0
43 #define XILINX_DPDMA_ISR 0x4
44 #define XILINX_DPDMA_IMR 0x8
45 #define XILINX_DPDMA_IEN 0xc
46 #define XILINX_DPDMA_IDS 0x10
47 #define XILINX_DPDMA_INTR_DESC_DONE_MASK (0x3f << 0)
48 #define XILINX_DPDMA_INTR_DESC_DONE_SHIFT 0
49 #define XILINX_DPDMA_INTR_NO_OSTAND_MASK (0x3f << 6)
50 #define XILINX_DPDMA_INTR_NO_OSTAND_SHIFT 6
51 #define XILINX_DPDMA_INTR_AXI_ERR_MASK (0x3f << 12)
52 #define XILINX_DPDMA_INTR_AXI_ERR_SHIFT 12
53 #define XILINX_DPDMA_INTR_DESC_ERR_MASK (0x3f << 18)
54 #define XILINX_DPDMA_INTR_DESC_ERR_SHIFT 16
55 #define XILINX_DPDMA_INTR_WR_CMD_FIFO_FULL BIT(24)
56 #define XILINX_DPDMA_INTR_WR_DATA_FIFO_FULL BIT(25)
57 #define XILINX_DPDMA_INTR_AXI_4K_CROSS BIT(26)
58 #define XILINX_DPDMA_INTR_VSYNC BIT(27)
59 #define XILINX_DPDMA_INTR_CHAN_ERR_MASK 0x41000
60 #define XILINX_DPDMA_INTR_CHAN_ERR 0xfff000
61 #define XILINX_DPDMA_INTR_GLOBAL_ERR 0x7000000
62 #define XILINX_DPDMA_INTR_ERR_ALL 0x7fff000
63 #define XILINX_DPDMA_INTR_CHAN_MASK 0x41041
64 #define XILINX_DPDMA_INTR_GLOBAL_MASK 0xf000000
65 #define XILINX_DPDMA_INTR_ALL 0xfffffff
66 #define XILINX_DPDMA_EISR 0x14
67 #define XILINX_DPDMA_EIMR 0x18
68 #define XILINX_DPDMA_EIEN 0x1c
69 #define XILINX_DPDMA_EIDS 0x20
70 #define XILINX_DPDMA_EINTR_INV_APB BIT(0)
71 #define XILINX_DPDMA_EINTR_RD_AXI_ERR_MASK (0x3f << 1)
72 #define XILINX_DPDMA_EINTR_RD_AXI_ERR_SHIFT 1
73 #define XILINX_DPDMA_EINTR_PRE_ERR_MASK (0x3f << 7)
74 #define XILINX_DPDMA_EINTR_PRE_ERR_SHIFT 7
75 #define XILINX_DPDMA_EINTR_CRC_ERR_MASK (0x3f << 13)
76 #define XILINX_DPDMA_EINTR_CRC_ERR_SHIFT 13
77 #define XILINX_DPDMA_EINTR_WR_AXI_ERR_MASK (0x3f << 19)
78 #define XILINX_DPDMA_EINTR_WR_AXI_ERR_SHIFT 19
79 #define XILINX_DPDMA_EINTR_DESC_DONE_ERR_MASK (0x3f << 25)
80 #define XILINX_DPDMA_EINTR_DESC_DONE_ERR_SHIFT 25
81 #define XILINX_DPDMA_EINTR_RD_CMD_FIFO_FULL BIT(32)
82 #define XILINX_DPDMA_EINTR_CHAN_ERR_MASK 0x2082082
83 #define XILINX_DPDMA_EINTR_CHAN_ERR 0x7ffffffe
84 #define XILINX_DPDMA_EINTR_GLOBAL_ERR 0x80000001
85 #define XILINX_DPDMA_EINTR_ALL 0xffffffff
86 #define XILINX_DPDMA_CNTL 0x100
87 #define XILINX_DPDMA_GBL 0x104
88 #define XILINX_DPDMA_GBL_TRIG_SHIFT 0
89 #define XILINX_DPDMA_GBL_RETRIG_SHIFT 6
90 #define XILINX_DPDMA_ALC0_CNTL 0x108
91 #define XILINX_DPDMA_ALC0_STATUS 0x10c
92 #define XILINX_DPDMA_ALC0_MAX 0x110
93 #define XILINX_DPDMA_ALC0_MIN 0x114
94 #define XILINX_DPDMA_ALC0_ACC 0x118
95 #define XILINX_DPDMA_ALC0_ACC_TRAN 0x11c
96 #define XILINX_DPDMA_ALC1_CNTL 0x120
97 #define XILINX_DPDMA_ALC1_STATUS 0x124
98 #define XILINX_DPDMA_ALC1_MAX 0x128
99 #define XILINX_DPDMA_ALC1_MIN 0x12c
100 #define XILINX_DPDMA_ALC1_ACC 0x130
101 #define XILINX_DPDMA_ALC1_ACC_TRAN 0x134
103 /* Channel register */
104 #define XILINX_DPDMA_CH_BASE 0x200
105 #define XILINX_DPDMA_CH_OFFSET 0x100
106 #define XILINX_DPDMA_CH_DESC_START_ADDRE 0x0
107 #define XILINX_DPDMA_CH_DESC_START_ADDR 0x4
108 #define XILINX_DPDMA_CH_DESC_NEXT_ADDRE 0x8
109 #define XILINX_DPDMA_CH_DESC_NEXT_ADDR 0xc
110 #define XILINX_DPDMA_CH_PYLD_CUR_ADDRE 0x10
111 #define XILINX_DPDMA_CH_PYLD_CUR_ADDR 0x14
112 #define XILINX_DPDMA_CH_CNTL 0x18
113 #define XILINX_DPDMA_CH_CNTL_ENABLE BIT(0)
114 #define XILINX_DPDMA_CH_CNTL_PAUSE BIT(1)
115 #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_SHIFT 2
116 #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_SHIFT 6
117 #define XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_SHIFT 10
118 #define XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS 11
119 #define XILINX_DPDMA_CH_STATUS 0x1c
120 #define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK (0xf << 21)
121 #define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_SHIFT 21
122 #define XILINX_DPDMA_CH_VDO 0x20
123 #define XILINX_DPDMA_CH_PYLD_SZ 0x24
124 #define XILINX_DPDMA_CH_DESC_ID 0x28
126 /* DPDMA descriptor fields */
127 #define XILINX_DPDMA_DESC_CONTROL_PREEMBLE (0xa5)
128 #define XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR BIT(8)
129 #define XILINX_DPDMA_DESC_CONTROL_DESC_UPDATE BIT(9)
130 #define XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE BIT(10)
131 #define XILINX_DPDMA_DESC_CONTROL_FRAG_MODE BIT(18)
132 #define XILINX_DPDMA_DESC_CONTROL_LAST BIT(19)
133 #define XILINX_DPDMA_DESC_CONTROL_ENABLE_CRC BIT(20)
134 #define XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME BIT(21)
135 #define XILINX_DPDMA_DESC_ID_MASK (0xffff << 0)
136 #define XILINX_DPDMA_DESC_ID_SHIFT (0)
137 #define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK (0x3ffff << 0)
138 #define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT (0)
139 #define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK (0x3fff << 18)
140 #define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_SHIFT (18)
141 #define XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK (0xfff)
142 #define XILINX_DPDMA_DESC_ADDR_EXT_ADDR_SHIFT (16)
144 #define XILINX_DPDMA_ALIGN_BYTES 256
146 #define XILINX_DPDMA_NUM_CHAN 6
147 #define XILINX_DPDMA_PAGE_MASK ((1 << 12) - 1)
148 #define XILINX_DPDMA_PAGE_SHIFT 12
151 * struct xilinx_dpdma_hw_desc - DPDMA hardware descriptor
152 * @control: control configuration field
153 * @desc_id: descriptor ID
154 * @xfer_size: transfer size
155 * @hsize_stride: horizontal size and stride
156 * @timestamp_lsb: LSB of time stamp
157 * @timestamp_msb: MSB of time stamp
158 * @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr)
159 * @next_desc: next descriptor 32 bit address
160 * @src_addr: payload source address (lower 32 bit of 1st 4KB page)
161 * @addr_ext_23: upper 16 bit of 48 bit address (src_addr2 and src_addr3)
162 * @addr_ext_45: upper 16 bit of 48 bit address (src_addr4 and src_addr5)
163 * @src_addr2: payload source address (lower 32 bit of 2nd 4KB page)
164 * @src_addr3: payload source address (lower 32 bit of 3rd 4KB page)
165 * @src_addr4: payload source address (lower 32 bit of 4th 4KB page)
166 * @src_addr5: payload source address (lower 32 bit of 5th 4KB page)
167 * @crc: descriptor CRC
169 struct xilinx_dpdma_hw_desc {
186 } __aligned(XILINX_DPDMA_ALIGN_BYTES);
189 * struct xilinx_dpdma_sw_desc - DPDMA software descriptor
190 * @hw: DPDMA hardware descriptor
191 * @node: list node for software descriptors
192 * @phys: physical address of the software descriptor
194 struct xilinx_dpdma_sw_desc {
195 struct xilinx_dpdma_hw_desc hw;
196 struct list_head node;
201 * enum xilinx_dpdma_tx_desc_status - DPDMA tx descriptor status
202 * @PREPARED: descriptor is prepared for transaction
203 * @ACTIVE: transaction is (being) done successfully
204 * @ERRORED: descriptor generates some errors
206 enum xilinx_dpdma_tx_desc_status {
213 * struct xilinx_dpdma_tx_desc - DPDMA transaction descriptor
214 * @async_tx: DMA async transaction descriptor
215 * @descriptors: list of software descriptors
216 * @node: list node for transaction descriptors
217 * @status: tx descriptor status
218 * @done_cnt: number of complete notification to deliver
220 struct xilinx_dpdma_tx_desc {
221 struct dma_async_tx_descriptor async_tx;
222 struct list_head descriptors;
223 struct list_head node;
224 enum xilinx_dpdma_tx_desc_status status;
225 unsigned int done_cnt;
229 * enum xilinx_dpdma_chan_id - DPDMA channel ID
230 * @VIDEO0: video 1st channel
231 * @VIDEO1: video 2nd channel for multi plane yuv formats
232 * @VIDEO2: video 3rd channel for multi plane yuv formats
233 * @GRAPHICS: graphics channel
234 * @AUDIO0: 1st audio channel
235 * @AUDIO1: 2nd audio channel
237 enum xilinx_dpdma_chan_id {
247 * enum xilinx_dpdma_chan_status - DPDMA channel status
249 * @STREAMING: actively streaming state
251 enum xilinx_dpdma_chan_status {
257 * DPDMA descriptor placement
258 * --------------------------
259 * DPDMA descritpor life time is described with following placements:
261 * allocated_desc -> submitted_desc -> pending_desc -> active_desc -> done_list
263 * Transition is triggered as following:
265 * -> allocated_desc : a descriptor allocation
266 * allocated_desc -> submitted_desc: a descriptorsubmission
267 * submitted_desc -> pending_desc: request to issue pending a descriptor
268 * pending_desc -> active_desc: VSYNC intr when a desc is scheduled to DPDMA
269 * active_desc -> done_list: VSYNC intr when DPDMA switches to a new desc
273 * struct xilinx_dpdma_chan - DPDMA channel
274 * @common: generic dma channel structure
275 * @reg: register base address
277 * @wait_to_stop: queue to wait for outstanding transacitons before stopping
278 * @status: channel status
279 * @first_frame: flag for the first frame of stream
280 * @video_group: flag if multi-channel operation is needed for video channels
281 * @lock: lock to access struct xilinx_dpdma_chan
282 * @desc_pool: descriptor allocation pool
283 * @done_task: done IRQ bottom half handler
284 * @err_task: error IRQ bottom half handler
285 * @allocated_desc: allocated descriptor
286 * @submitted_desc: submitted descriptor
287 * @pending_desc: pending descriptor to be scheduled in next period
288 * @active_desc: descriptor that the DPDMA channel is active on
289 * @done_list: done descriptor list
290 * @xdev: DPDMA device
292 struct xilinx_dpdma_chan {
293 struct dma_chan common;
295 enum xilinx_dpdma_chan_id id;
297 wait_queue_head_t wait_to_stop;
298 enum xilinx_dpdma_chan_status status;
303 struct dma_pool *desc_pool;
304 struct tasklet_struct done_task;
305 struct tasklet_struct err_task;
307 struct xilinx_dpdma_tx_desc *allocated_desc;
308 struct xilinx_dpdma_tx_desc *submitted_desc;
309 struct xilinx_dpdma_tx_desc *pending_desc;
310 struct xilinx_dpdma_tx_desc *active_desc;
311 struct list_head done_list;
313 struct xilinx_dpdma_device *xdev;
317 * struct xilinx_dpdma_device - DPDMA device
318 * @common: generic dma device structure
319 * @reg: register base address
320 * @dev: generic device structure
321 * @axi_clk: axi clock
322 * @chan: DPDMA channels
323 * @ext_addr: flag for 64 bit system (48 bit addressing)
324 * @desc_addr: descriptor addressing callback (32 bit vs 64 bit)
326 struct xilinx_dpdma_device {
327 struct dma_device common;
332 struct xilinx_dpdma_chan *chan[XILINX_DPDMA_NUM_CHAN];
335 void (*desc_addr)(struct xilinx_dpdma_sw_desc *sw_desc,
336 struct xilinx_dpdma_sw_desc *prev,
337 dma_addr_t dma_addr[], unsigned int num_src_addr);
340 #ifdef CONFIG_XILINX_DPDMA_DEBUG_FS
341 #define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE 32
342 #define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR "65535"
343 #define IN_RANGE(x, min, max) ((x) >= (min) && (x) <= (max))
345 /* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
346 enum xilinx_dpdma_testcases {
351 struct xilinx_dpdma_debugfs {
352 enum xilinx_dpdma_testcases testcase;
353 u16 xilinx_dpdma_intr_done_count;
354 enum xilinx_dpdma_chan_id chan_id;
357 static struct xilinx_dpdma_debugfs dpdma_debugfs;
358 struct xilinx_dpdma_debugfs_request {
360 enum xilinx_dpdma_testcases tc;
361 ssize_t (*read_handler)(char **kern_buff);
362 ssize_t (*write_handler)(char **cmd);
365 static void xilinx_dpdma_debugfs_intr_done_count_incr(int chan_id)
367 if (chan_id == dpdma_debugfs.chan_id)
368 dpdma_debugfs.xilinx_dpdma_intr_done_count++;
371 static s64 xilinx_dpdma_debugfs_argument_value(char *arg)
378 if (!kstrtos64(arg, 0, &value))
385 xilinx_dpdma_debugfs_desc_done_intr_write(char **dpdma_test_arg)
391 arg = strsep(dpdma_test_arg, " ");
392 if (strncasecmp(arg, "start", 5) != 0)
395 arg_chan_id = strsep(dpdma_test_arg, " ");
396 id = xilinx_dpdma_debugfs_argument_value(arg_chan_id);
398 if (id < 0 || !IN_RANGE(id, VIDEO0, AUDIO1))
401 dpdma_debugfs.testcase = DPDMA_TC_INTR_DONE;
402 dpdma_debugfs.xilinx_dpdma_intr_done_count = 0;
403 dpdma_debugfs.chan_id = id;
408 static ssize_t xilinx_dpdma_debugfs_desc_done_intr_read(char **kern_buff)
412 dpdma_debugfs.testcase = DPDMA_TC_NONE;
414 out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR);
415 out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
417 snprintf(*kern_buff, out_str_len, "%d",
418 dpdma_debugfs.xilinx_dpdma_intr_done_count);
423 /* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
424 struct xilinx_dpdma_debugfs_request dpdma_debugfs_reqs[] = {
425 {"DESCRIPTOR_DONE_INTR", DPDMA_TC_INTR_DONE,
426 xilinx_dpdma_debugfs_desc_done_intr_read,
427 xilinx_dpdma_debugfs_desc_done_intr_write},
430 static ssize_t xilinx_dpdma_debugfs_write(struct file *f, const char __user
431 *buf, size_t size, loff_t *pos)
434 char *dpdma_test_req;
438 if (*pos != 0 || size <= 0)
441 /* Supporting single instance of test as of now*/
442 if (dpdma_debugfs.testcase != DPDMA_TC_NONE)
445 kern_buff = kzalloc(size, GFP_KERNEL);
449 ret = strncpy_from_user(kern_buff, buf, size);
455 /* Read the testcase name from an user request */
456 dpdma_test_req = strsep(&kern_buff, " ");
458 for (i = 0; i < ARRAY_SIZE(dpdma_debugfs_reqs); i++) {
459 if (!strcasecmp(dpdma_test_req, dpdma_debugfs_reqs[i].req)) {
460 if (!dpdma_debugfs_reqs[i].write_handler(&kern_buff)) {
471 static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf,
472 size_t size, loff_t *pos)
474 char *kern_buff = NULL;
475 size_t kern_buff_len, out_str_len;
484 kern_buff = kzalloc(XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL);
486 dpdma_debugfs.testcase = DPDMA_TC_NONE;
490 if (dpdma_debugfs.testcase == DPDMA_TC_NONE) {
491 out_str_len = strlen("No testcase executed");
492 out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
494 snprintf(kern_buff, out_str_len, "%s", "No testcase executed");
496 ret = dpdma_debugfs_reqs[dpdma_debugfs.testcase].read_handler(
504 kern_buff_len = strlen(kern_buff);
505 size = min(size, kern_buff_len);
507 ret = copy_to_user(buf, kern_buff, size);
517 static const struct file_operations fops_xilinx_dpdma_dbgfs = {
518 .owner = THIS_MODULE,
519 .read = xilinx_dpdma_debugfs_read,
520 .write = xilinx_dpdma_debugfs_write,
523 static int xilinx_dpdma_debugfs_init(struct device *dev)
526 struct dentry *xilinx_dpdma_debugfs_dir, *xilinx_dpdma_debugfs_file;
528 dpdma_debugfs.testcase = DPDMA_TC_NONE;
530 xilinx_dpdma_debugfs_dir = debugfs_create_dir("dpdma", NULL);
531 if (!xilinx_dpdma_debugfs_dir) {
532 dev_err(dev, "debugfs_create_dir failed\n");
536 xilinx_dpdma_debugfs_file =
537 debugfs_create_file("testcase", 0444,
538 xilinx_dpdma_debugfs_dir, NULL,
539 &fops_xilinx_dpdma_dbgfs);
540 if (!xilinx_dpdma_debugfs_file) {
541 dev_err(dev, "debugfs_create_file testcase failed\n");
548 debugfs_remove_recursive(xilinx_dpdma_debugfs_dir);
549 xilinx_dpdma_debugfs_dir = NULL;
554 static int xilinx_dpdma_debugfs_init(struct device *dev)
559 static void xilinx_dpdma_debugfs_intr_done_count_incr(int chan_id)
562 #endif /* CONFIG_XILINX_DPDMA_DEBUG_FS */
564 #define to_dpdma_tx_desc(tx) \
565 container_of(tx, struct xilinx_dpdma_tx_desc, async_tx)
567 #define to_xilinx_chan(chan) \
568 container_of(chan, struct xilinx_dpdma_chan, common)
572 static inline u32 dpdma_read(void __iomem *base, u32 offset)
574 return ioread32(base + offset);
577 static inline void dpdma_write(void __iomem *base, u32 offset, u32 val)
579 iowrite32(val, base + offset);
582 static inline void dpdma_clr(void __iomem *base, u32 offset, u32 clr)
584 dpdma_write(base, offset, dpdma_read(base, offset) & ~clr);
587 static inline void dpdma_set(void __iomem *base, u32 offset, u32 set)
589 dpdma_write(base, offset, dpdma_read(base, offset) | set);
592 /* Xilinx DPDMA descriptor operations */
595 * xilinx_dpdma_sw_desc_next_32 - Set 32 bit address of a next sw descriptor
596 * @sw_desc: current software descriptor
597 * @next: next descriptor
599 * Update the current sw descriptor @sw_desc with 32 bit address of the next
603 xilinx_dpdma_sw_desc_next_32(struct xilinx_dpdma_sw_desc *sw_desc,
604 struct xilinx_dpdma_sw_desc *next)
606 sw_desc->hw.next_desc = next->phys;
610 * xilinx_dpdma_sw_desc_addr_32 - Update the sw descriptor with 32 bit address
611 * @sw_desc: software descriptor
612 * @prev: previous descriptor
613 * @dma_addr: array of dma addresses
614 * @num_src_addr: number of addresses in @dma_addr
616 * Update the descriptor @sw_desc with 32 bit address.
618 static void xilinx_dpdma_sw_desc_addr_32(struct xilinx_dpdma_sw_desc *sw_desc,
619 struct xilinx_dpdma_sw_desc *prev,
620 dma_addr_t dma_addr[],
621 unsigned int num_src_addr)
623 struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
626 hw_desc->src_addr = dma_addr[0];
629 xilinx_dpdma_sw_desc_next_32(prev, sw_desc);
631 for (i = 1; i < num_src_addr; i++) {
632 u32 *addr = &hw_desc->src_addr2;
635 frag_addr = dma_addr[i];
636 addr[i - 1] = frag_addr;
641 * xilinx_dpdma_sw_desc_next_64 - Set 64 bit address of a next sw descriptor
642 * @sw_desc: current software descriptor
643 * @next: next descriptor
645 * Update the current sw descriptor @sw_desc with 64 bit address of the next
649 xilinx_dpdma_sw_desc_next_64(struct xilinx_dpdma_sw_desc *sw_desc,
650 struct xilinx_dpdma_sw_desc *next)
652 sw_desc->hw.next_desc = (u32)next->phys;
653 sw_desc->hw.addr_ext |= ((u64)next->phys >> 32) &
654 XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK;
658 * xilinx_dpdma_sw_desc_addr_64 - Update the sw descriptor with 64 bit address
659 * @sw_desc: software descriptor
660 * @prev: previous descriptor
661 * @dma_addr: array of dma addresses
662 * @num_src_addr: number of addresses in @dma_addr
664 * Update the descriptor @sw_desc with 64 bit address.
666 static void xilinx_dpdma_sw_desc_addr_64(struct xilinx_dpdma_sw_desc *sw_desc,
667 struct xilinx_dpdma_sw_desc *prev,
668 dma_addr_t dma_addr[],
669 unsigned int num_src_addr)
671 struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
674 hw_desc->src_addr = (u32)dma_addr[0];
676 ((u64)dma_addr[0] >> 32) & XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK;
679 xilinx_dpdma_sw_desc_next_64(prev, sw_desc);
681 for (i = 1; i < num_src_addr; i++) {
682 u32 *addr = &hw_desc->src_addr2;
683 u32 *addr_ext = &hw_desc->addr_ext_23;
686 frag_addr = dma_addr[i];
687 addr[i] = (u32)frag_addr;
690 frag_addr &= XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK;
691 frag_addr <<= XILINX_DPDMA_DESC_ADDR_EXT_ADDR_SHIFT * (i % 2);
692 addr_ext[i / 2] = frag_addr;
696 /* Xilinx DPDMA channel descriptor operations */
699 * xilinx_dpdma_chan_alloc_sw_desc - Allocate a software descriptor
700 * @chan: DPDMA channel
702 * Allocate a software descriptor from the channel's descriptor pool.
704 * Return: a software descriptor or NULL.
706 static struct xilinx_dpdma_sw_desc *
707 xilinx_dpdma_chan_alloc_sw_desc(struct xilinx_dpdma_chan *chan)
709 struct xilinx_dpdma_sw_desc *sw_desc;
712 sw_desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys);
716 memset(sw_desc, 0, sizeof(*sw_desc));
717 sw_desc->phys = phys;
723 * xilinx_dpdma_chan_free_sw_desc - Free a software descriptor
724 * @chan: DPDMA channel
725 * @sw_desc: software descriptor to free
727 * Free a software descriptor from the channel's descriptor pool.
730 xilinx_dpdma_chan_free_sw_desc(struct xilinx_dpdma_chan *chan,
731 struct xilinx_dpdma_sw_desc *sw_desc)
733 dma_pool_free(chan->desc_pool, sw_desc, sw_desc->phys);
737 * xilinx_dpdma_chan_dump_tx_desc - Dump a tx descriptor
738 * @chan: DPDMA channel
739 * @tx_desc: tx descriptor to dump
741 * Dump contents of a tx descriptor
743 static void xilinx_dpdma_chan_dump_tx_desc(struct xilinx_dpdma_chan *chan,
744 struct xilinx_dpdma_tx_desc *tx_desc)
746 struct xilinx_dpdma_sw_desc *sw_desc;
747 struct device *dev = chan->xdev->dev;
750 dev_dbg(dev, "------- TX descriptor dump start -------\n");
751 dev_dbg(dev, "------- channel ID = %d -------\n", chan->id);
753 list_for_each_entry(sw_desc, &tx_desc->descriptors, node) {
754 struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
756 dev_dbg(dev, "------- HW descriptor %d -------\n", i++);
757 dev_dbg(dev, "descriptor phys: %pad\n", &sw_desc->phys);
758 dev_dbg(dev, "control: 0x%08x\n", hw_desc->control);
759 dev_dbg(dev, "desc_id: 0x%08x\n", hw_desc->desc_id);
760 dev_dbg(dev, "xfer_size: 0x%08x\n", hw_desc->xfer_size);
761 dev_dbg(dev, "hsize_stride: 0x%08x\n", hw_desc->hsize_stride);
762 dev_dbg(dev, "timestamp_lsb: 0x%08x\n", hw_desc->timestamp_lsb);
763 dev_dbg(dev, "timestamp_msb: 0x%08x\n", hw_desc->timestamp_msb);
764 dev_dbg(dev, "addr_ext: 0x%08x\n", hw_desc->addr_ext);
765 dev_dbg(dev, "next_desc: 0x%08x\n", hw_desc->next_desc);
766 dev_dbg(dev, "src_addr: 0x%08x\n", hw_desc->src_addr);
767 dev_dbg(dev, "addr_ext_23: 0x%08x\n", hw_desc->addr_ext_23);
768 dev_dbg(dev, "addr_ext_45: 0x%08x\n", hw_desc->addr_ext_45);
769 dev_dbg(dev, "src_addr2: 0x%08x\n", hw_desc->src_addr2);
770 dev_dbg(dev, "src_addr3: 0x%08x\n", hw_desc->src_addr3);
771 dev_dbg(dev, "src_addr4: 0x%08x\n", hw_desc->src_addr4);
772 dev_dbg(dev, "src_addr5: 0x%08x\n", hw_desc->src_addr5);
773 dev_dbg(dev, "crc: 0x%08x\n", hw_desc->crc);
776 dev_dbg(dev, "------- TX descriptor dump end -------\n");
780 * xilinx_dpdma_chan_alloc_tx_desc - Allocate a transaction descriptor
781 * @chan: DPDMA channel
783 * Allocate a tx descriptor.
785 * Return: a tx descriptor or NULL.
787 static struct xilinx_dpdma_tx_desc *
788 xilinx_dpdma_chan_alloc_tx_desc(struct xilinx_dpdma_chan *chan)
790 struct xilinx_dpdma_tx_desc *tx_desc;
792 tx_desc = kzalloc(sizeof(*tx_desc), GFP_KERNEL);
796 INIT_LIST_HEAD(&tx_desc->descriptors);
797 tx_desc->status = PREPARED;
803 * xilinx_dpdma_chan_free_tx_desc - Free a transaction descriptor
804 * @chan: DPDMA channel
805 * @tx_desc: tx descriptor
807 * Free the tx descriptor @tx_desc including its software descriptors.
810 xilinx_dpdma_chan_free_tx_desc(struct xilinx_dpdma_chan *chan,
811 struct xilinx_dpdma_tx_desc *tx_desc)
813 struct xilinx_dpdma_sw_desc *sw_desc, *next;
818 list_for_each_entry_safe(sw_desc, next, &tx_desc->descriptors, node) {
819 list_del(&sw_desc->node);
820 xilinx_dpdma_chan_free_sw_desc(chan, sw_desc);
827 * xilinx_dpdma_chan_submit_tx_desc - Submit a transaction descriptor
828 * @chan: DPDMA channel
829 * @tx_desc: tx descriptor
831 * Submit the tx descriptor @tx_desc to the channel @chan.
833 * Return: a cookie assigned to the tx descriptor
836 xilinx_dpdma_chan_submit_tx_desc(struct xilinx_dpdma_chan *chan,
837 struct xilinx_dpdma_tx_desc *tx_desc)
839 struct xilinx_dpdma_sw_desc *sw_desc;
843 spin_lock_irqsave(&chan->lock, flags);
845 if (chan->submitted_desc) {
846 cookie = chan->submitted_desc->async_tx.cookie;
850 cookie = dma_cookie_assign(&tx_desc->async_tx);
852 /* Assign the cookie to descriptors in this transaction */
853 /* Only 16 bit will be used, but it should be enough */
854 list_for_each_entry(sw_desc, &tx_desc->descriptors, node)
855 sw_desc->hw.desc_id = cookie;
857 if (tx_desc != chan->allocated_desc)
858 dev_err(chan->xdev->dev, "desc != allocated_desc\n");
860 chan->allocated_desc = NULL;
861 chan->submitted_desc = tx_desc;
863 if (chan->id == VIDEO1 || chan->id == VIDEO2) {
864 chan->video_group = true;
865 chan->xdev->chan[VIDEO0]->video_group = true;
869 spin_unlock_irqrestore(&chan->lock, flags);
875 * xilinx_dpdma_chan_free_desc_list - Free a descriptor list
876 * @chan: DPDMA channel
877 * @list: tx descriptor list
879 * Free tx descriptors in the list @list.
881 static void xilinx_dpdma_chan_free_desc_list(struct xilinx_dpdma_chan *chan,
882 struct list_head *list)
884 struct xilinx_dpdma_tx_desc *tx_desc, *next;
886 list_for_each_entry_safe(tx_desc, next, list, node) {
887 list_del(&tx_desc->node);
888 xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
893 * xilinx_dpdma_chan_free_all_desc - Free all descriptors of the channel
894 * @chan: DPDMA channel
896 * Free all descriptors associated with the channel. The channel should be
897 * disabled before this function is called, otherwise, this function may
898 * result in misbehavior of the system due to remaining outstanding
901 static void xilinx_dpdma_chan_free_all_desc(struct xilinx_dpdma_chan *chan)
905 spin_lock_irqsave(&chan->lock, flags);
907 dev_dbg(chan->xdev->dev, "chan->status = %s\n",
908 chan->status == STREAMING ? "STREAMING" : "IDLE");
910 xilinx_dpdma_chan_free_tx_desc(chan, chan->allocated_desc);
911 chan->allocated_desc = NULL;
912 xilinx_dpdma_chan_free_tx_desc(chan, chan->submitted_desc);
913 chan->submitted_desc = NULL;
914 xilinx_dpdma_chan_free_tx_desc(chan, chan->pending_desc);
915 chan->pending_desc = NULL;
916 xilinx_dpdma_chan_free_tx_desc(chan, chan->active_desc);
917 chan->active_desc = NULL;
918 xilinx_dpdma_chan_free_desc_list(chan, &chan->done_list);
920 spin_unlock_irqrestore(&chan->lock, flags);
924 * xilinx_dpdma_chan_cleanup_desc - Clean up descriptors
925 * @chan: DPDMA channel
927 * Trigger the complete callbacks of descriptors with finished transactions.
928 * Free descriptors which are no longer in use.
930 static void xilinx_dpdma_chan_cleanup_desc(struct xilinx_dpdma_chan *chan)
932 struct xilinx_dpdma_tx_desc *desc;
933 dma_async_tx_callback callback;
934 void *callback_param;
938 spin_lock_irqsave(&chan->lock, flags);
940 while (!list_empty(&chan->done_list)) {
941 desc = list_first_entry(&chan->done_list,
942 struct xilinx_dpdma_tx_desc, node);
943 list_del(&desc->node);
945 cnt = desc->done_cnt;
947 callback = desc->async_tx.callback;
948 callback_param = desc->async_tx.callback_param;
950 spin_unlock_irqrestore(&chan->lock, flags);
951 for (i = 0; i < cnt; i++)
952 callback(callback_param);
953 spin_lock_irqsave(&chan->lock, flags);
956 xilinx_dpdma_chan_free_tx_desc(chan, desc);
959 if (chan->active_desc) {
960 cnt = chan->active_desc->done_cnt;
961 chan->active_desc->done_cnt = 0;
962 callback = chan->active_desc->async_tx.callback;
963 callback_param = chan->active_desc->async_tx.callback_param;
965 spin_unlock_irqrestore(&chan->lock, flags);
966 for (i = 0; i < cnt; i++)
967 callback(callback_param);
968 spin_lock_irqsave(&chan->lock, flags);
972 spin_unlock_irqrestore(&chan->lock, flags);
976 * xilinx_dpdma_chan_desc_active - Set the descriptor as active
977 * @chan: DPDMA channel
979 * Make the pending descriptor @chan->pending_desc as active. This function
980 * should be called when the channel starts operating on the pending descriptor.
982 static void xilinx_dpdma_chan_desc_active(struct xilinx_dpdma_chan *chan)
986 spin_lock_irqsave(&chan->lock, flags);
988 if (!chan->pending_desc)
991 if (chan->active_desc)
992 list_add_tail(&chan->active_desc->node, &chan->done_list);
994 chan->active_desc = chan->pending_desc;
995 chan->pending_desc = NULL;
998 spin_unlock_irqrestore(&chan->lock, flags);
1002 * xilinx_dpdma_chan_desc_done_intr - Mark the current descriptor as 'done'
1003 * @chan: DPDMA channel
1005 * Mark the current active descriptor @chan->active_desc as 'done'. This
1006 * function should be called to mark completion of the currently active
1009 static void xilinx_dpdma_chan_desc_done_intr(struct xilinx_dpdma_chan *chan)
1011 unsigned long flags;
1013 spin_lock_irqsave(&chan->lock, flags);
1015 xilinx_dpdma_debugfs_intr_done_count_incr(chan->id);
1017 if (!chan->active_desc) {
1018 dev_dbg(chan->xdev->dev, "done intr with no active desc\n");
1022 chan->active_desc->done_cnt++;
1023 if (chan->active_desc->status == PREPARED) {
1024 dma_cookie_complete(&chan->active_desc->async_tx);
1025 chan->active_desc->status = ACTIVE;
1029 spin_unlock_irqrestore(&chan->lock, flags);
1030 tasklet_schedule(&chan->done_task);
1034 * xilinx_dpdma_chan_prep_slave_sg - Prepare a scatter-gather dma descriptor
1035 * @chan: DPDMA channel
1036 * @sgl: scatter-gather list
1038 * Prepare a tx descriptor incudling internal software/hardware descriptors
1039 * for the given scatter-gather transaction.
1041 * Return: A dma async tx descriptor on success, or NULL.
1043 static struct dma_async_tx_descriptor *
1044 xilinx_dpdma_chan_prep_slave_sg(struct xilinx_dpdma_chan *chan,
1045 struct scatterlist *sgl)
1047 struct xilinx_dpdma_tx_desc *tx_desc;
1048 struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
1049 struct scatterlist *iter = sgl;
1052 if (chan->allocated_desc)
1053 return &chan->allocated_desc->async_tx;
1055 tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
1059 while (!sg_is_chain(iter))
1060 line_size += sg_dma_len(iter++);
1063 struct xilinx_dpdma_hw_desc *hw_desc;
1064 dma_addr_t dma_addr[4];
1065 unsigned int num_pages = 0;
1067 sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
1071 while (!sg_is_chain(sgl) && !sg_is_last(sgl)) {
1072 dma_addr[num_pages] = sg_dma_address(sgl++);
1073 if (!IS_ALIGNED(dma_addr[num_pages++],
1074 XILINX_DPDMA_ALIGN_BYTES)) {
1075 dev_err(chan->xdev->dev,
1076 "buffer should be aligned at %d B\n",
1077 XILINX_DPDMA_ALIGN_BYTES);
1082 chan->xdev->desc_addr(sw_desc, last, dma_addr, num_pages);
1083 hw_desc = &sw_desc->hw;
1084 hw_desc->xfer_size = line_size;
1085 hw_desc->hsize_stride =
1086 line_size << XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT;
1087 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
1088 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_FRAG_MODE;
1089 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
1091 list_add_tail(&sw_desc->node, &tx_desc->descriptors);
1093 if (sg_is_last(sgl))
1095 sgl = sg_chain_ptr(sgl);
1098 sw_desc = list_first_entry(&tx_desc->descriptors,
1099 struct xilinx_dpdma_sw_desc, node);
1100 if (chan->xdev->ext_addr)
1101 xilinx_dpdma_sw_desc_next_64(last, sw_desc);
1103 xilinx_dpdma_sw_desc_next_32(last, sw_desc);
1104 last->hw.control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
1105 last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
1107 chan->allocated_desc = tx_desc;
1109 return &tx_desc->async_tx;
1112 xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
1118 * xilinx_dpdma_chan_prep_cyclic - Prepare a cyclic dma descriptor
1119 * @chan: DPDMA channel
1120 * @buf_addr: buffer address
1121 * @buf_len: buffer length
1122 * @period_len: number of periods
1124 * Prepare a tx descriptor incudling internal software/hardware descriptors
1125 * for the given cyclic transaction.
1127 * Return: A dma async tx descriptor on success, or NULL.
1129 static struct dma_async_tx_descriptor *
1130 xilinx_dpdma_chan_prep_cyclic(struct xilinx_dpdma_chan *chan,
1131 dma_addr_t buf_addr, size_t buf_len,
1134 struct xilinx_dpdma_tx_desc *tx_desc;
1135 struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
1136 unsigned int periods = buf_len / period_len;
1139 if (chan->allocated_desc)
1140 return &chan->allocated_desc->async_tx;
1142 tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
1146 for (i = 0; i < periods; i++) {
1147 struct xilinx_dpdma_hw_desc *hw_desc;
1149 if (!IS_ALIGNED(buf_addr, XILINX_DPDMA_ALIGN_BYTES)) {
1150 dev_err(chan->xdev->dev,
1151 "buffer should be aligned at %d B\n",
1152 XILINX_DPDMA_ALIGN_BYTES);
1156 sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
1160 chan->xdev->desc_addr(sw_desc, last, &buf_addr, 1);
1161 hw_desc = &sw_desc->hw;
1162 hw_desc->xfer_size = period_len;
1163 hw_desc->hsize_stride =
1165 XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT;
1166 hw_desc->hsize_stride |=
1168 XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_SHIFT;
1169 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
1170 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
1171 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
1173 list_add_tail(&sw_desc->node, &tx_desc->descriptors);
1175 buf_addr += period_len;
1179 sw_desc = list_first_entry(&tx_desc->descriptors,
1180 struct xilinx_dpdma_sw_desc, node);
1181 if (chan->xdev->ext_addr)
1182 xilinx_dpdma_sw_desc_next_64(last, sw_desc);
1184 xilinx_dpdma_sw_desc_next_32(last, sw_desc);
1185 last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
1187 chan->allocated_desc = tx_desc;
1189 return &tx_desc->async_tx;
1192 xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
1198 * xilinx_dpdma_chan_prep_interleaved - Prepare a interleaved dma descriptor
1199 * @chan: DPDMA channel
1200 * @xt: dma interleaved template
1202 * Prepare a tx descriptor incudling internal software/hardware descriptors
1205 * Return: A dma async tx descriptor on success, or NULL.
1207 static struct dma_async_tx_descriptor *
1208 xilinx_dpdma_chan_prep_interleaved(struct xilinx_dpdma_chan *chan,
1209 struct dma_interleaved_template *xt)
1211 struct xilinx_dpdma_tx_desc *tx_desc;
1212 struct xilinx_dpdma_sw_desc *sw_desc;
1213 struct xilinx_dpdma_hw_desc *hw_desc;
1214 size_t hsize = xt->sgl[0].size;
1215 size_t stride = hsize + xt->sgl[0].icg;
1217 if (!IS_ALIGNED(xt->src_start, XILINX_DPDMA_ALIGN_BYTES)) {
1218 dev_err(chan->xdev->dev, "buffer should be aligned at %d B\n",
1219 XILINX_DPDMA_ALIGN_BYTES);
1223 if (chan->allocated_desc)
1224 return &chan->allocated_desc->async_tx;
1226 tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
1230 sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
1234 chan->xdev->desc_addr(sw_desc, sw_desc, &xt->src_start, 1);
1235 hw_desc = &sw_desc->hw;
1236 hw_desc->xfer_size = hsize * xt->numf;
1237 hw_desc->hsize_stride = hsize <<
1238 XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT;
1239 hw_desc->hsize_stride |= (stride / 16) <<
1240 XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_SHIFT;
1241 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
1242 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
1243 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
1244 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
1246 list_add_tail(&sw_desc->node, &tx_desc->descriptors);
1247 chan->allocated_desc = tx_desc;
1249 return &tx_desc->async_tx;
1252 xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
1257 /* Xilinx DPDMA channel operations */
1260 * xilinx_dpdma_chan_enable - Enable the channel
1261 * @chan: DPDMA channel
1263 * Enable the channel and its interrupts. Set the QoS values for video class.
1265 static inline void xilinx_dpdma_chan_enable(struct xilinx_dpdma_chan *chan)
1269 reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id;
1270 reg |= XILINX_DPDMA_INTR_GLOBAL_MASK;
1271 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
1272 reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id;
1273 reg |= XILINX_DPDMA_INTR_GLOBAL_ERR;
1274 dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
1276 reg = XILINX_DPDMA_CH_CNTL_ENABLE;
1277 reg |= XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS <<
1278 XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_SHIFT;
1279 reg |= XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS <<
1280 XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_SHIFT;
1281 reg |= XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS <<
1282 XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_SHIFT;
1283 dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, reg);
1287 * xilinx_dpdma_chan_disable - Disable the channel
1288 * @chan: DPDMA channel
1290 * Disable the channel and its interrupts.
1292 static inline void xilinx_dpdma_chan_disable(struct xilinx_dpdma_chan *chan)
1296 reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id;
1297 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
1298 reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id;
1299 dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
1301 dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
1305 * xilinx_dpdma_chan_pause - Pause the channel
1306 * @chan: DPDMA channel
1308 * Pause the channel.
1310 static inline void xilinx_dpdma_chan_pause(struct xilinx_dpdma_chan *chan)
1312 dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
1316 * xilinx_dpdma_chan_unpause - Unpause the channel
1317 * @chan: DPDMA channel
1319 * Unpause the channel.
1321 static inline void xilinx_dpdma_chan_unpause(struct xilinx_dpdma_chan *chan)
1323 dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
1327 xilinx_dpdma_chan_video_group_ready(struct xilinx_dpdma_chan *chan)
1329 struct xilinx_dpdma_device *xdev = chan->xdev;
1332 for (i = VIDEO0; i < GRAPHICS; i++) {
1333 if (xdev->chan[i]->video_group &&
1334 xdev->chan[i]->status != STREAMING)
1337 if (xdev->chan[i]->video_group)
1345 * xilinx_dpdma_chan_issue_pending - Issue the pending descriptor
1346 * @chan: DPDMA channel
1348 * Issue the first pending descriptor from @chan->submitted_desc. If the channel
1349 * is already streaming, the channel is re-triggered with the pending
1352 static void xilinx_dpdma_chan_issue_pending(struct xilinx_dpdma_chan *chan)
1354 struct xilinx_dpdma_device *xdev = chan->xdev;
1355 struct xilinx_dpdma_sw_desc *sw_desc;
1356 unsigned long flags;
1359 spin_lock_irqsave(&chan->lock, flags);
1361 if (!chan->submitted_desc || chan->pending_desc)
1364 chan->pending_desc = chan->submitted_desc;
1365 chan->submitted_desc = NULL;
1367 sw_desc = list_first_entry(&chan->pending_desc->descriptors,
1368 struct xilinx_dpdma_sw_desc, node);
1369 dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR,
1370 (u32)sw_desc->phys);
1372 dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE,
1373 ((u64)sw_desc->phys >> 32) &
1374 XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK);
1376 if (chan->first_frame) {
1377 chan->first_frame = false;
1378 if (chan->video_group) {
1379 channels = xilinx_dpdma_chan_video_group_ready(chan);
1382 reg = channels << XILINX_DPDMA_GBL_TRIG_SHIFT;
1384 reg = 1 << (XILINX_DPDMA_GBL_TRIG_SHIFT + chan->id);
1387 if (chan->video_group) {
1388 channels = xilinx_dpdma_chan_video_group_ready(chan);
1391 reg = channels << XILINX_DPDMA_GBL_RETRIG_SHIFT;
1393 reg = 1 << (XILINX_DPDMA_GBL_RETRIG_SHIFT + chan->id);
1397 dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
1400 spin_unlock_irqrestore(&chan->lock, flags);
1404 * xilinx_dpdma_chan_start - Start the channel
1405 * @chan: DPDMA channel
1407 * Start the channel by enabling interrupts and triggering the channel.
1408 * If the channel is enabled already or there's no pending descriptor, this
1409 * function won't do anything on the channel.
1411 static void xilinx_dpdma_chan_start(struct xilinx_dpdma_chan *chan)
1413 unsigned long flags;
1415 spin_lock_irqsave(&chan->lock, flags);
1417 if (!chan->submitted_desc || chan->status == STREAMING)
1420 xilinx_dpdma_chan_unpause(chan);
1421 xilinx_dpdma_chan_enable(chan);
1422 chan->first_frame = true;
1423 chan->status = STREAMING;
1426 spin_unlock_irqrestore(&chan->lock, flags);
1430 * xilinx_dpdma_chan_ostand - Number of outstanding transactions
1431 * @chan: DPDMA channel
1433 * Read and return the number of outstanding transactions from register.
1435 * Return: Number of outstanding transactions from the status register.
1437 static inline u32 xilinx_dpdma_chan_ostand(struct xilinx_dpdma_chan *chan)
1439 return dpdma_read(chan->reg, XILINX_DPDMA_CH_STATUS) >>
1440 XILINX_DPDMA_CH_STATUS_OTRAN_CNT_SHIFT &
1441 XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK;
1445 * xilinx_dpdma_chan_no_ostand - Notify no outstanding transaction event
1446 * @chan: DPDMA channel
1448 * Notify waiters for no outstanding event, so waiters can stop the channel
1449 * safely. This function is supposed to be called when 'no oustanding' interrupt
1450 * is generated. The 'no outstanding' interrupt is disabled and should be
1451 * re-enabled when this event is handled. If the channel status register still
1452 * shows some number of outstanding transactions, the interrupt remains enabled.
1454 * Return: 0 on success. On failure, -EWOULDBLOCK if there's still outstanding
1457 static int xilinx_dpdma_chan_notify_no_ostand(struct xilinx_dpdma_chan *chan)
1461 cnt = xilinx_dpdma_chan_ostand(chan);
1463 dev_dbg(chan->xdev->dev, "%d outstanding transactions\n", cnt);
1464 return -EWOULDBLOCK;
1467 /* Disable 'no oustanding' interrupt */
1468 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IDS,
1469 1 << (XILINX_DPDMA_INTR_NO_OSTAND_SHIFT + chan->id));
1470 wake_up(&chan->wait_to_stop);
1476 * xilinx_dpdma_chan_wait_no_ostand - Wait for the oustanding transaction intr
1477 * @chan: DPDMA channel
1479 * Wait for the no outstanding transaction interrupt. This functions can sleep
1482 * Return: 0 on success. On failure, -ETIMEOUT for time out, or the error code
1483 * from wait_event_interruptible_timeout().
1485 static int xilinx_dpdma_chan_wait_no_ostand(struct xilinx_dpdma_chan *chan)
1489 /* Wait for a no outstanding transaction interrupt upto 50msec */
1490 ret = wait_event_interruptible_timeout(chan->wait_to_stop,
1491 !xilinx_dpdma_chan_ostand(chan),
1492 msecs_to_jiffies(50));
1494 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
1496 (XILINX_DPDMA_INTR_NO_OSTAND_SHIFT + chan->id));
1500 dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
1501 xilinx_dpdma_chan_ostand(chan));
1510 * xilinx_dpdma_chan_poll_no_ostand - Poll the oustanding transaction status reg
1511 * @chan: DPDMA channel
1513 * Poll the outstanding transaction status, and return when there's no
1514 * outstanding transaction. This functions can be used in the interrupt context
1515 * or where the atomicity is required. Calling thread may wait more than 50ms.
1517 * Return: 0 on success, or -ETIMEDOUT.
1519 static int xilinx_dpdma_chan_poll_no_ostand(struct xilinx_dpdma_chan *chan)
1521 u32 cnt, loop = 50000;
1523 /* Poll at least for 50ms (20 fps). */
1525 cnt = xilinx_dpdma_chan_ostand(chan);
1527 } while (loop-- > 0 && cnt);
1530 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
1532 (XILINX_DPDMA_INTR_NO_OSTAND_SHIFT + chan->id));
1536 dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
1537 xilinx_dpdma_chan_ostand(chan));
1543 * xilinx_dpdma_chan_stop - Stop the channel
1544 * @chan: DPDMA channel
1546 * Stop the channel with the following sequence: 1. Pause, 2. Wait (sleep) for
1547 * no outstanding transaction interrupt, 3. Disable the channel.
1549 * Return: 0 on success, or error code from xilinx_dpdma_chan_wait_no_ostand().
1551 static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
1553 unsigned long flags;
1556 xilinx_dpdma_chan_pause(chan);
1557 ret = xilinx_dpdma_chan_wait_no_ostand(chan);
1561 spin_lock_irqsave(&chan->lock, flags);
1562 xilinx_dpdma_chan_disable(chan);
1563 chan->status = IDLE;
1564 spin_unlock_irqrestore(&chan->lock, flags);
1570 * xilinx_dpdma_chan_alloc_resources - Allocate resources for the channel
1571 * @chan: DPDMA channel
1573 * Allocate a descriptor pool for the channel.
1575 * Return: 0 on success, or -ENOMEM if failed to allocate a pool.
1577 static int xilinx_dpdma_chan_alloc_resources(struct xilinx_dpdma_chan *chan)
1579 chan->desc_pool = dma_pool_create(dev_name(chan->xdev->dev),
1581 sizeof(struct xilinx_dpdma_sw_desc),
1582 __alignof__(struct xilinx_dpdma_sw_desc), 0);
1583 if (!chan->desc_pool) {
1584 dev_err(chan->xdev->dev,
1585 "failed to allocate a descriptor pool\n");
1593 * xilinx_dpdma_chan_free_resources - Free all resources for the channel
1594 * @chan: DPDMA channel
1596 * Free all descriptors and the descriptor pool for the channel.
1598 static void xilinx_dpdma_chan_free_resources(struct xilinx_dpdma_chan *chan)
1600 xilinx_dpdma_chan_free_all_desc(chan);
1601 dma_pool_destroy(chan->desc_pool);
1602 chan->desc_pool = NULL;
1606 * xilinx_dpdma_chan_terminate_all - Terminate the channel and descriptors
1607 * @chan: DPDMA channel
1609 * Stop the channel and free all associated descriptors.
1611 * Return: 0 on success, or the error code from xilinx_dpdma_chan_stop().
1613 static int xilinx_dpdma_chan_terminate_all(struct xilinx_dpdma_chan *chan)
1615 struct xilinx_dpdma_device *xdev = chan->xdev;
1619 if (chan->video_group) {
1620 for (i = VIDEO0; i < GRAPHICS; i++) {
1621 if (xdev->chan[i]->video_group &&
1622 xdev->chan[i]->status == STREAMING) {
1623 xilinx_dpdma_chan_pause(xdev->chan[i]);
1624 xdev->chan[i]->video_group = false;
1629 ret = xilinx_dpdma_chan_stop(chan);
1633 xilinx_dpdma_chan_free_all_desc(chan);
1639 * xilinx_dpdma_chan_err - Detect any channel error
1640 * @chan: DPDMA channel
1641 * @isr: masked Interrupt Status Register
1642 * @eisr: Error Interrupt Status Register
1644 * Return: true if any channel error occurs, or false otherwise.
1647 xilinx_dpdma_chan_err(struct xilinx_dpdma_chan *chan, u32 isr, u32 eisr)
1652 if (chan->status == STREAMING &&
1653 ((isr & (XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id)) ||
1654 (eisr & (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id))))
1661 * xilinx_dpdma_chan_handle_err - DPDMA channel error handling
1662 * @chan: DPDMA channel
1664 * This function is called when any channel error or any global error occurs.
1665 * The function disables the paused channel by errors and determines
1666 * if the current active descriptor can be rescheduled depending on
1667 * the descriptor status.
1669 static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
1671 struct xilinx_dpdma_device *xdev = chan->xdev;
1672 struct device *dev = xdev->dev;
1673 unsigned long flags;
1675 spin_lock_irqsave(&chan->lock, flags);
1677 dev_dbg(dev, "cur desc addr = 0x%04x%08x\n",
1678 dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE),
1679 dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR));
1680 dev_dbg(dev, "cur payload addr = 0x%04x%08x\n",
1681 dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDRE),
1682 dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDR));
1684 xilinx_dpdma_chan_disable(chan);
1685 chan->status = IDLE;
1687 /* Decide if the current descriptor can be rescheduled */
1688 if (chan->active_desc) {
1689 switch (chan->active_desc->status) {
1692 xilinx_dpdma_chan_free_tx_desc(chan,
1693 chan->submitted_desc);
1694 chan->submitted_desc = NULL;
1695 xilinx_dpdma_chan_free_tx_desc(chan,
1696 chan->pending_desc);
1697 chan->pending_desc = NULL;
1698 chan->active_desc->status = ERRORED;
1699 chan->submitted_desc = chan->active_desc;
1702 dev_err(dev, "desc is dropped by unrecoverable err\n");
1703 xilinx_dpdma_chan_dump_tx_desc(chan, chan->active_desc);
1704 xilinx_dpdma_chan_free_tx_desc(chan, chan->active_desc);
1709 chan->active_desc = NULL;
1712 spin_unlock_irqrestore(&chan->lock, flags);
1715 /* DMA tx descriptor */
1717 static dma_cookie_t xilinx_dpdma_tx_submit(struct dma_async_tx_descriptor *tx)
1719 struct xilinx_dpdma_chan *chan = to_xilinx_chan(tx->chan);
1720 struct xilinx_dpdma_tx_desc *tx_desc = to_dpdma_tx_desc(tx);
1722 return xilinx_dpdma_chan_submit_tx_desc(chan, tx_desc);
1725 /* DMA channel operations */
1727 static struct dma_async_tx_descriptor *
1728 xilinx_dpdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
1729 unsigned int sg_len,
1730 enum dma_transfer_direction direction,
1731 unsigned long flags, void *context)
1733 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1734 struct dma_async_tx_descriptor *async_tx;
1736 if (direction != DMA_MEM_TO_DEV)
1739 if (!sgl || sg_len < 2)
1742 async_tx = xilinx_dpdma_chan_prep_slave_sg(chan, sgl);
1746 dma_async_tx_descriptor_init(async_tx, dchan);
1747 async_tx->tx_submit = xilinx_dpdma_tx_submit;
1748 async_tx->flags = flags;
1749 async_tx_ack(async_tx);
1754 static struct dma_async_tx_descriptor *
1755 xilinx_dpdma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr,
1756 size_t buf_len, size_t period_len,
1757 enum dma_transfer_direction direction,
1758 unsigned long flags)
1760 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1761 struct dma_async_tx_descriptor *async_tx;
1763 if (direction != DMA_MEM_TO_DEV)
1766 if (buf_len % period_len)
1769 async_tx = xilinx_dpdma_chan_prep_cyclic(chan, buf_addr, buf_len,
1774 dma_async_tx_descriptor_init(async_tx, dchan);
1775 async_tx->tx_submit = xilinx_dpdma_tx_submit;
1776 async_tx->flags = flags;
1777 async_tx_ack(async_tx);
1782 static struct dma_async_tx_descriptor *
1783 xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
1784 struct dma_interleaved_template *xt,
1785 unsigned long flags)
1787 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1788 struct dma_async_tx_descriptor *async_tx;
1790 if (xt->dir != DMA_MEM_TO_DEV)
1793 if (!xt->numf || !xt->sgl[0].size)
1796 async_tx = xilinx_dpdma_chan_prep_interleaved(chan, xt);
1800 dma_async_tx_descriptor_init(async_tx, dchan);
1801 async_tx->tx_submit = xilinx_dpdma_tx_submit;
1802 async_tx->flags = flags;
1803 async_tx_ack(async_tx);
1808 static int xilinx_dpdma_alloc_chan_resources(struct dma_chan *dchan)
1810 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1812 dma_cookie_init(dchan);
1814 return xilinx_dpdma_chan_alloc_resources(chan);
1817 static void xilinx_dpdma_free_chan_resources(struct dma_chan *dchan)
1819 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1821 xilinx_dpdma_chan_free_resources(chan);
1824 static enum dma_status xilinx_dpdma_tx_status(struct dma_chan *dchan,
1825 dma_cookie_t cookie,
1826 struct dma_tx_state *txstate)
1828 return dma_cookie_status(dchan, cookie, txstate);
1831 static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
1833 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1835 xilinx_dpdma_chan_start(chan);
1836 xilinx_dpdma_chan_issue_pending(chan);
1839 static int xilinx_dpdma_config(struct dma_chan *dchan,
1840 struct dma_slave_config *config)
1842 if (config->direction != DMA_MEM_TO_DEV)
1848 static int xilinx_dpdma_pause(struct dma_chan *dchan)
1850 xilinx_dpdma_chan_pause(to_xilinx_chan(dchan));
1855 static int xilinx_dpdma_resume(struct dma_chan *dchan)
1857 xilinx_dpdma_chan_unpause(to_xilinx_chan(dchan));
1862 static int xilinx_dpdma_terminate_all(struct dma_chan *dchan)
1864 return xilinx_dpdma_chan_terminate_all(to_xilinx_chan(dchan));
1867 /* Xilinx DPDMA device operations */
1870 * xilinx_dpdma_err - Detect any global error
1871 * @isr: Interrupt Status Register
1872 * @eisr: Error Interrupt Status Register
1874 * Return: True if any global error occurs, or false otherwise.
1876 static bool xilinx_dpdma_err(u32 isr, u32 eisr)
1878 if ((isr & XILINX_DPDMA_INTR_GLOBAL_ERR ||
1879 eisr & XILINX_DPDMA_EINTR_GLOBAL_ERR))
1886 * xilinx_dpdma_handle_err_intr - Handle DPDMA error interrupt
1887 * @xdev: DPDMA device
1888 * @isr: masked Interrupt Status Register
1889 * @eisr: Error Interrupt Status Register
1891 * Handle if any error occurs based on @isr and @eisr. This function disables
1892 * corresponding error interrupts, and those should be re-enabled once handling
1895 static void xilinx_dpdma_handle_err_intr(struct xilinx_dpdma_device *xdev,
1898 bool err = xilinx_dpdma_err(isr, eisr);
1901 dev_err(xdev->dev, "error intr: isr = 0x%08x, eisr = 0x%08x\n",
1904 /* Disable channel error interrupts until errors are handled. */
1905 dpdma_write(xdev->reg, XILINX_DPDMA_IDS,
1906 isr & ~XILINX_DPDMA_INTR_GLOBAL_ERR);
1907 dpdma_write(xdev->reg, XILINX_DPDMA_EIDS,
1908 eisr & ~XILINX_DPDMA_EINTR_GLOBAL_ERR);
1910 for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
1911 if (err || xilinx_dpdma_chan_err(xdev->chan[i], isr, eisr))
1912 tasklet_schedule(&xdev->chan[i]->err_task);
1916 * xilinx_dpdma_handle_vsync_intr - Handle the VSYNC interrupt
1917 * @xdev: DPDMA device
1919 * Handle the VSYNC event. At this point, the current frame becomes active,
1920 * which means the DPDMA actually starts fetching, and the next frame can be
1923 static void xilinx_dpdma_handle_vsync_intr(struct xilinx_dpdma_device *xdev)
1927 for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++) {
1928 if (xdev->chan[i] &&
1929 xdev->chan[i]->status == STREAMING) {
1930 xilinx_dpdma_chan_desc_active(xdev->chan[i]);
1931 xilinx_dpdma_chan_issue_pending(xdev->chan[i]);
1937 * xilinx_dpdma_enable_intr - Enable interrupts
1938 * @xdev: DPDMA device
1940 * Enable interrupts.
1942 static void xilinx_dpdma_enable_intr(struct xilinx_dpdma_device *xdev)
1944 dpdma_write(xdev->reg, XILINX_DPDMA_IEN, XILINX_DPDMA_INTR_ALL);
1945 dpdma_write(xdev->reg, XILINX_DPDMA_EIEN, XILINX_DPDMA_EINTR_ALL);
1949 * xilinx_dpdma_disable_intr - Disable interrupts
1950 * @xdev: DPDMA device
1952 * Disable interrupts.
1954 static void xilinx_dpdma_disable_intr(struct xilinx_dpdma_device *xdev)
1956 dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL);
1957 dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
1960 /* Interrupt handling operations*/
1963 * xilinx_dpdma_chan_err_task - Per channel tasklet for error handling
1964 * @data: tasklet data to be casted to DPDMA channel structure
1966 * Per channel error handling tasklet. This function waits for the outstanding
1967 * transaction to complete and triggers error handling. After error handling,
1968 * re-enable channel error interrupts, and restart the channel if needed.
1970 static void xilinx_dpdma_chan_err_task(unsigned long data)
1972 struct xilinx_dpdma_chan *chan = (struct xilinx_dpdma_chan *)data;
1973 struct xilinx_dpdma_device *xdev = chan->xdev;
1975 /* Proceed error handling even when polling fails. */
1976 xilinx_dpdma_chan_poll_no_ostand(chan);
1978 xilinx_dpdma_chan_handle_err(chan);
1980 dpdma_write(xdev->reg, XILINX_DPDMA_IEN,
1981 XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id);
1982 dpdma_write(xdev->reg, XILINX_DPDMA_EIEN,
1983 XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
1985 xilinx_dpdma_chan_start(chan);
1986 xilinx_dpdma_chan_issue_pending(chan);
1990 * xilinx_dpdma_chan_done_task - Per channel tasklet for done interrupt handling
1991 * @data: tasklet data to be casted to DPDMA channel structure
1993 * Per channel done interrupt handling tasklet.
1995 static void xilinx_dpdma_chan_done_task(unsigned long data)
1997 struct xilinx_dpdma_chan *chan = (struct xilinx_dpdma_chan *)data;
1999 xilinx_dpdma_chan_cleanup_desc(chan);
2002 static irqreturn_t xilinx_dpdma_irq_handler(int irq, void *data)
2004 struct xilinx_dpdma_device *xdev = data;
2005 u32 status, error, i;
2006 unsigned long masked;
2008 status = dpdma_read(xdev->reg, XILINX_DPDMA_ISR);
2009 error = dpdma_read(xdev->reg, XILINX_DPDMA_EISR);
2010 if (!status && !error)
2013 dpdma_write(xdev->reg, XILINX_DPDMA_ISR, status);
2014 dpdma_write(xdev->reg, XILINX_DPDMA_EISR, error);
2016 if (status & XILINX_DPDMA_INTR_VSYNC)
2017 xilinx_dpdma_handle_vsync_intr(xdev);
2019 masked = (status & XILINX_DPDMA_INTR_DESC_DONE_MASK) >>
2020 XILINX_DPDMA_INTR_DESC_DONE_SHIFT;
2022 for_each_set_bit(i, &masked, XILINX_DPDMA_NUM_CHAN)
2023 xilinx_dpdma_chan_desc_done_intr(xdev->chan[i]);
2025 masked = (status & XILINX_DPDMA_INTR_NO_OSTAND_MASK) >>
2026 XILINX_DPDMA_INTR_NO_OSTAND_SHIFT;
2028 for_each_set_bit(i, &masked, XILINX_DPDMA_NUM_CHAN)
2029 xilinx_dpdma_chan_notify_no_ostand(xdev->chan[i]);
2031 masked = status & XILINX_DPDMA_INTR_ERR_ALL;
2032 if (masked || error)
2033 xilinx_dpdma_handle_err_intr(xdev, masked, error);
2038 /* Initialization operations */
2040 static struct xilinx_dpdma_chan *
2041 xilinx_dpdma_chan_probe(struct device_node *node,
2042 struct xilinx_dpdma_device *xdev)
2044 struct xilinx_dpdma_chan *chan;
2046 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2048 return ERR_PTR(-ENOMEM);
2050 if (of_device_is_compatible(node, "xlnx,video0")) {
2052 } else if (of_device_is_compatible(node, "xlnx,video1")) {
2054 } else if (of_device_is_compatible(node, "xlnx,video2")) {
2056 } else if (of_device_is_compatible(node, "xlnx,graphics")) {
2057 chan->id = GRAPHICS;
2058 } else if (of_device_is_compatible(node, "xlnx,audio0")) {
2060 } else if (of_device_is_compatible(node, "xlnx,audio1")) {
2063 dev_err(xdev->dev, "invalid channel compatible string in DT\n");
2064 return ERR_PTR(-EINVAL);
2067 chan->reg = xdev->reg + XILINX_DPDMA_CH_BASE + XILINX_DPDMA_CH_OFFSET *
2069 chan->status = IDLE;
2071 spin_lock_init(&chan->lock);
2072 INIT_LIST_HEAD(&chan->done_list);
2073 init_waitqueue_head(&chan->wait_to_stop);
2075 tasklet_init(&chan->done_task, xilinx_dpdma_chan_done_task,
2076 (unsigned long)chan);
2077 tasklet_init(&chan->err_task, xilinx_dpdma_chan_err_task,
2078 (unsigned long)chan);
2080 chan->common.device = &xdev->common;
2083 list_add_tail(&chan->common.device_node, &xdev->common.channels);
2084 xdev->chan[chan->id] = chan;
2089 static void xilinx_dpdma_chan_remove(struct xilinx_dpdma_chan *chan)
2091 tasklet_kill(&chan->err_task);
2092 tasklet_kill(&chan->done_task);
2093 list_del(&chan->common.device_node);
2096 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2097 struct of_dma *ofdma)
2099 struct xilinx_dpdma_device *xdev = ofdma->of_dma_data;
2100 uint32_t chan_id = dma_spec->args[0];
2102 if (chan_id >= XILINX_DPDMA_NUM_CHAN)
2105 if (!xdev->chan[chan_id])
2108 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2111 static int xilinx_dpdma_probe(struct platform_device *pdev)
2113 struct xilinx_dpdma_device *xdev;
2114 struct xilinx_dpdma_chan *chan;
2115 struct dma_device *ddev;
2116 struct resource *res;
2117 struct device_node *node, *child;
2121 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2125 xdev->dev = &pdev->dev;
2126 ddev = &xdev->common;
2127 ddev->dev = &pdev->dev;
2128 node = xdev->dev->of_node;
2130 xdev->axi_clk = devm_clk_get(xdev->dev, "axi_clk");
2131 if (IS_ERR(xdev->axi_clk))
2132 return PTR_ERR(xdev->axi_clk);
2134 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2135 xdev->reg = devm_ioremap_resource(&pdev->dev, res);
2136 if (IS_ERR(xdev->reg))
2137 return PTR_ERR(xdev->reg);
2139 irq = platform_get_irq(pdev, 0);
2141 dev_err(xdev->dev, "failed to get platform irq\n");
2145 ret = devm_request_irq(xdev->dev, irq, xilinx_dpdma_irq_handler,
2146 IRQF_SHARED, dev_name(xdev->dev), xdev);
2148 dev_err(xdev->dev, "failed to request IRQ\n");
2152 INIT_LIST_HEAD(&xdev->common.channels);
2153 dma_cap_set(DMA_SLAVE, ddev->cap_mask);
2154 dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
2155 dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
2156 dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
2157 ddev->copy_align = fls(XILINX_DPDMA_ALIGN_BYTES - 1);
2159 ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
2160 ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
2161 ddev->device_prep_slave_sg = xilinx_dpdma_prep_slave_sg;
2162 ddev->device_prep_dma_cyclic = xilinx_dpdma_prep_dma_cyclic;
2163 ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
2164 ddev->device_tx_status = xilinx_dpdma_tx_status;
2165 ddev->device_issue_pending = xilinx_dpdma_issue_pending;
2166 ddev->device_config = xilinx_dpdma_config;
2167 ddev->device_pause = xilinx_dpdma_pause;
2168 ddev->device_resume = xilinx_dpdma_resume;
2169 ddev->device_terminate_all = xilinx_dpdma_terminate_all;
2170 ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED);
2171 ddev->directions = BIT(DMA_MEM_TO_DEV);
2172 ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
2174 for_each_child_of_node(node, child) {
2175 chan = xilinx_dpdma_chan_probe(child, xdev);
2177 dev_err(xdev->dev, "failed to probe a channel\n");
2178 ret = PTR_ERR(chan);
2183 xdev->ext_addr = sizeof(dma_addr_t) > 4;
2185 xdev->desc_addr = xilinx_dpdma_sw_desc_addr_64;
2187 xdev->desc_addr = xilinx_dpdma_sw_desc_addr_32;
2189 ret = clk_prepare_enable(xdev->axi_clk);
2191 dev_err(xdev->dev, "failed to enable the axi clock\n");
2195 ret = dma_async_device_register(ddev);
2197 dev_err(xdev->dev, "failed to enable the axi clock\n");
2198 goto error_dma_async;
2201 ret = of_dma_controller_register(xdev->dev->of_node,
2202 of_dma_xilinx_xlate, ddev);
2204 dev_err(xdev->dev, "failed to register DMA to DT DMA helper\n");
2208 xilinx_dpdma_enable_intr(xdev);
2210 xilinx_dpdma_debugfs_init(&pdev->dev);
2212 dev_info(&pdev->dev, "Xilinx DPDMA engine is probed\n");
2217 dma_async_device_unregister(ddev);
2219 clk_disable_unprepare(xdev->axi_clk);
2221 for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
2223 xilinx_dpdma_chan_remove(xdev->chan[i]);
2228 static int xilinx_dpdma_remove(struct platform_device *pdev)
2230 struct xilinx_dpdma_device *xdev;
2233 xdev = platform_get_drvdata(pdev);
2235 xilinx_dpdma_disable_intr(xdev);
2236 of_dma_controller_free(pdev->dev.of_node);
2237 dma_async_device_unregister(&xdev->common);
2238 clk_disable_unprepare(xdev->axi_clk);
2240 for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
2242 xilinx_dpdma_chan_remove(xdev->chan[i]);
2247 static const struct of_device_id xilinx_dpdma_of_match[] = {
2248 { .compatible = "xlnx,dpdma",},
2249 { /* end of table */ },
2251 MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match);
2253 static struct platform_driver xilinx_dpdma_driver = {
2254 .probe = xilinx_dpdma_probe,
2255 .remove = xilinx_dpdma_remove,
2257 .name = "xilinx-dpdma",
2258 .of_match_table = xilinx_dpdma_of_match,
2262 module_platform_driver(xilinx_dpdma_driver);
2264 MODULE_AUTHOR("Xilinx, Inc.");
2265 MODULE_DESCRIPTION("Xilinx DPDMA driver");
2266 MODULE_LICENSE("GPL v2");