1 // SPDX-License-Identifier: GPL-2.0
3 * Xilinx ZynqMP DPDMA Engine driver
5 * Copyright (C) 2015 - 2020 Xilinx, Inc.
7 * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
10 #include <linux/bitfield.h>
11 #include <linux/bits.h>
12 #include <linux/clk.h>
13 #include <linux/debugfs.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dmapool.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
20 #include <linux/of_dma.h>
21 #include <linux/platform_device.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/wait.h>
27 #include <dt-bindings/dma/xlnx-zynqmp-dpdma.h>
29 #include "../dmaengine.h"
30 #include "../virt-dma.h"
33 #define XILINX_DPDMA_ERR_CTRL 0x000
34 #define XILINX_DPDMA_ISR 0x004
35 #define XILINX_DPDMA_IMR 0x008
36 #define XILINX_DPDMA_IEN 0x00c
37 #define XILINX_DPDMA_IDS 0x010
38 #define XILINX_DPDMA_INTR_DESC_DONE(n) BIT((n) + 0)
39 #define XILINX_DPDMA_INTR_DESC_DONE_MASK GENMASK(5, 0)
40 #define XILINX_DPDMA_INTR_NO_OSTAND(n) BIT((n) + 6)
41 #define XILINX_DPDMA_INTR_NO_OSTAND_MASK GENMASK(11, 6)
42 #define XILINX_DPDMA_INTR_AXI_ERR(n) BIT((n) + 12)
43 #define XILINX_DPDMA_INTR_AXI_ERR_MASK GENMASK(17, 12)
44 #define XILINX_DPDMA_INTR_DESC_ERR(n) BIT((n) + 16)
45 #define XILINX_DPDMA_INTR_DESC_ERR_MASK GENMASK(23, 18)
46 #define XILINX_DPDMA_INTR_WR_CMD_FIFO_FULL BIT(24)
47 #define XILINX_DPDMA_INTR_WR_DATA_FIFO_FULL BIT(25)
48 #define XILINX_DPDMA_INTR_AXI_4K_CROSS BIT(26)
49 #define XILINX_DPDMA_INTR_VSYNC BIT(27)
50 #define XILINX_DPDMA_INTR_CHAN_ERR_MASK 0x00041000
51 #define XILINX_DPDMA_INTR_CHAN_ERR 0x00fff000
52 #define XILINX_DPDMA_INTR_GLOBAL_ERR 0x07000000
53 #define XILINX_DPDMA_INTR_ERR_ALL 0x07fff000
54 #define XILINX_DPDMA_INTR_CHAN_MASK 0x00041041
55 #define XILINX_DPDMA_INTR_GLOBAL_MASK 0x0f000000
56 #define XILINX_DPDMA_INTR_ALL 0x0fffffff
57 #define XILINX_DPDMA_EISR 0x014
58 #define XILINX_DPDMA_EIMR 0x018
59 #define XILINX_DPDMA_EIEN 0x01c
60 #define XILINX_DPDMA_EIDS 0x020
61 #define XILINX_DPDMA_EINTR_INV_APB BIT(0)
62 #define XILINX_DPDMA_EINTR_RD_AXI_ERR(n) BIT((n) + 1)
63 #define XILINX_DPDMA_EINTR_RD_AXI_ERR_MASK GENMASK(6, 1)
64 #define XILINX_DPDMA_EINTR_PRE_ERR(n) BIT((n) + 7)
65 #define XILINX_DPDMA_EINTR_PRE_ERR_MASK GENMASK(12, 7)
66 #define XILINX_DPDMA_EINTR_CRC_ERR(n) BIT((n) + 13)
67 #define XILINX_DPDMA_EINTR_CRC_ERR_MASK GENMASK(18, 13)
68 #define XILINX_DPDMA_EINTR_WR_AXI_ERR(n) BIT((n) + 19)
69 #define XILINX_DPDMA_EINTR_WR_AXI_ERR_MASK GENMASK(24, 19)
70 #define XILINX_DPDMA_EINTR_DESC_DONE_ERR(n) BIT((n) + 25)
71 #define XILINX_DPDMA_EINTR_DESC_DONE_ERR_MASK GENMASK(30, 25)
72 #define XILINX_DPDMA_EINTR_RD_CMD_FIFO_FULL BIT(32)
73 #define XILINX_DPDMA_EINTR_CHAN_ERR_MASK 0x02082082
74 #define XILINX_DPDMA_EINTR_CHAN_ERR 0x7ffffffe
75 #define XILINX_DPDMA_EINTR_GLOBAL_ERR 0x80000001
76 #define XILINX_DPDMA_EINTR_ALL 0xffffffff
77 #define XILINX_DPDMA_CNTL 0x100
78 #define XILINX_DPDMA_GBL 0x104
79 #define XILINX_DPDMA_GBL_TRIG_MASK(n) ((n) << 0)
80 #define XILINX_DPDMA_GBL_RETRIG_MASK(n) ((n) << 6)
81 #define XILINX_DPDMA_ALC0_CNTL 0x108
82 #define XILINX_DPDMA_ALC0_STATUS 0x10c
83 #define XILINX_DPDMA_ALC0_MAX 0x110
84 #define XILINX_DPDMA_ALC0_MIN 0x114
85 #define XILINX_DPDMA_ALC0_ACC 0x118
86 #define XILINX_DPDMA_ALC0_ACC_TRAN 0x11c
87 #define XILINX_DPDMA_ALC1_CNTL 0x120
88 #define XILINX_DPDMA_ALC1_STATUS 0x124
89 #define XILINX_DPDMA_ALC1_MAX 0x128
90 #define XILINX_DPDMA_ALC1_MIN 0x12c
91 #define XILINX_DPDMA_ALC1_ACC 0x130
92 #define XILINX_DPDMA_ALC1_ACC_TRAN 0x134
94 /* Channel register */
95 #define XILINX_DPDMA_CH_BASE 0x200
96 #define XILINX_DPDMA_CH_OFFSET 0x100
97 #define XILINX_DPDMA_CH_DESC_START_ADDRE 0x000
98 #define XILINX_DPDMA_CH_DESC_START_ADDRE_MASK GENMASK(15, 0)
99 #define XILINX_DPDMA_CH_DESC_START_ADDR 0x004
100 #define XILINX_DPDMA_CH_DESC_NEXT_ADDRE 0x008
101 #define XILINX_DPDMA_CH_DESC_NEXT_ADDR 0x00c
102 #define XILINX_DPDMA_CH_PYLD_CUR_ADDRE 0x010
103 #define XILINX_DPDMA_CH_PYLD_CUR_ADDR 0x014
104 #define XILINX_DPDMA_CH_CNTL 0x018
105 #define XILINX_DPDMA_CH_CNTL_ENABLE BIT(0)
106 #define XILINX_DPDMA_CH_CNTL_PAUSE BIT(1)
107 #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK GENMASK(5, 2)
108 #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK GENMASK(9, 6)
109 #define XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK GENMASK(13, 10)
110 #define XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS 11
111 #define XILINX_DPDMA_CH_STATUS 0x01c
112 #define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK GENMASK(24, 21)
113 #define XILINX_DPDMA_CH_VDO 0x020
114 #define XILINX_DPDMA_CH_PYLD_SZ 0x024
115 #define XILINX_DPDMA_CH_DESC_ID 0x028
117 /* DPDMA descriptor fields */
118 #define XILINX_DPDMA_DESC_CONTROL_PREEMBLE 0xa5
119 #define XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR BIT(8)
120 #define XILINX_DPDMA_DESC_CONTROL_DESC_UPDATE BIT(9)
121 #define XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE BIT(10)
122 #define XILINX_DPDMA_DESC_CONTROL_FRAG_MODE BIT(18)
123 #define XILINX_DPDMA_DESC_CONTROL_LAST BIT(19)
124 #define XILINX_DPDMA_DESC_CONTROL_ENABLE_CRC BIT(20)
125 #define XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME BIT(21)
126 #define XILINX_DPDMA_DESC_ID_MASK GENMASK(15, 0)
127 #define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK GENMASK(17, 0)
128 #define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK GENMASK(31, 18)
129 #define XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK GENMASK(15, 0)
130 #define XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK GENMASK(31, 16)
132 #define XILINX_DPDMA_ALIGN_BYTES 256
133 #define XILINX_DPDMA_LINESIZE_ALIGN_BITS 128
135 #define XILINX_DPDMA_NUM_CHAN 6
137 struct xilinx_dpdma_chan;
140 * struct xilinx_dpdma_hw_desc - DPDMA hardware descriptor
141 * @control: control configuration field
142 * @desc_id: descriptor ID
143 * @xfer_size: transfer size
144 * @hsize_stride: horizontal size and stride
145 * @timestamp_lsb: LSB of time stamp
146 * @timestamp_msb: MSB of time stamp
147 * @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr)
148 * @next_desc: next descriptor 32 bit address
149 * @src_addr: payload source address (1st page, 32 LSB)
150 * @addr_ext_23: payload source address (3nd and 3rd pages, 16 LSBs)
151 * @addr_ext_45: payload source address (4th and 5th pages, 16 LSBs)
152 * @src_addr2: payload source address (2nd page, 32 LSB)
153 * @src_addr3: payload source address (3rd page, 32 LSB)
154 * @src_addr4: payload source address (4th page, 32 LSB)
155 * @src_addr5: payload source address (5th page, 32 LSB)
156 * @crc: descriptor CRC
158 struct xilinx_dpdma_hw_desc {
175 } __aligned(XILINX_DPDMA_ALIGN_BYTES);
178 * struct xilinx_dpdma_sw_desc - DPDMA software descriptor
179 * @hw: DPDMA hardware descriptor
180 * @node: list node for software descriptors
181 * @dma_addr: DMA address of the software descriptor
183 struct xilinx_dpdma_sw_desc {
184 struct xilinx_dpdma_hw_desc hw;
185 struct list_head node;
190 * struct xilinx_dpdma_tx_desc - DPDMA transaction descriptor
191 * @vdesc: virtual DMA descriptor
193 * @descriptors: list of software descriptors
194 * @error: an error has been detected with this descriptor
196 struct xilinx_dpdma_tx_desc {
197 struct virt_dma_desc vdesc;
198 struct xilinx_dpdma_chan *chan;
199 struct list_head descriptors;
203 #define to_dpdma_tx_desc(_desc) \
204 container_of(_desc, struct xilinx_dpdma_tx_desc, vdesc)
207 * struct xilinx_dpdma_chan - DPDMA channel
208 * @vchan: virtual DMA channel
209 * @reg: register base address
211 * @wait_to_stop: queue to wait for outstanding transacitons before stopping
212 * @running: true if the channel is running
213 * @first_frame: flag for the first frame of stream
214 * @video_group: flag if multi-channel operation is needed for video channels
215 * @lock: lock to access struct xilinx_dpdma_chan
216 * @desc_pool: descriptor allocation pool
217 * @err_task: error IRQ bottom half handler
218 * @desc: References to descriptors being processed
219 * @desc.pending: Descriptor schedule to the hardware, pending execution
220 * @desc.active: Descriptor being executed by the hardware
221 * @xdev: DPDMA device
223 struct xilinx_dpdma_chan {
224 struct virt_dma_chan vchan;
228 wait_queue_head_t wait_to_stop;
233 spinlock_t lock; /* lock to access struct xilinx_dpdma_chan */
234 struct dma_pool *desc_pool;
235 struct tasklet_struct err_task;
238 struct xilinx_dpdma_tx_desc *pending;
239 struct xilinx_dpdma_tx_desc *active;
242 struct xilinx_dpdma_device *xdev;
245 #define to_xilinx_chan(_chan) \
246 container_of(_chan, struct xilinx_dpdma_chan, vchan.chan)
249 * struct xilinx_dpdma_device - DPDMA device
250 * @common: generic dma device structure
251 * @reg: register base address
252 * @dev: generic device structure
253 * @irq: the interrupt number
254 * @axi_clk: axi clock
255 * @chan: DPDMA channels
256 * @ext_addr: flag for 64 bit system (48 bit addressing)
258 struct xilinx_dpdma_device {
259 struct dma_device common;
265 struct xilinx_dpdma_chan *chan[XILINX_DPDMA_NUM_CHAN];
270 /* -----------------------------------------------------------------------------
274 #ifdef CONFIG_DEBUG_FS
276 #define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE 32
277 #define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR "65535"
279 /* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
280 enum xilinx_dpdma_testcases {
285 struct xilinx_dpdma_debugfs {
286 enum xilinx_dpdma_testcases testcase;
287 u16 xilinx_dpdma_irq_done_count;
288 unsigned int chan_id;
291 static struct xilinx_dpdma_debugfs dpdma_debugfs;
292 struct xilinx_dpdma_debugfs_request {
294 enum xilinx_dpdma_testcases tc;
295 ssize_t (*read)(char *buf);
296 int (*write)(char *args);
299 static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan)
301 if (chan->id == dpdma_debugfs.chan_id)
302 dpdma_debugfs.xilinx_dpdma_irq_done_count++;
305 static ssize_t xilinx_dpdma_debugfs_desc_done_irq_read(char *buf)
309 dpdma_debugfs.testcase = DPDMA_TC_NONE;
311 out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR);
312 out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
314 snprintf(buf, out_str_len, "%d",
315 dpdma_debugfs.xilinx_dpdma_irq_done_count);
320 static int xilinx_dpdma_debugfs_desc_done_irq_write(char *args)
326 arg = strsep(&args, " ");
327 if (!arg || strncasecmp(arg, "start", 5))
330 arg = strsep(&args, " ");
334 ret = kstrtou32(arg, 0, &id);
338 if (id < ZYNQMP_DPDMA_VIDEO0 || id > ZYNQMP_DPDMA_AUDIO1)
341 dpdma_debugfs.testcase = DPDMA_TC_INTR_DONE;
342 dpdma_debugfs.xilinx_dpdma_irq_done_count = 0;
343 dpdma_debugfs.chan_id = id;
348 /* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
349 static struct xilinx_dpdma_debugfs_request dpdma_debugfs_reqs[] = {
351 .name = "DESCRIPTOR_DONE_INTR",
352 .tc = DPDMA_TC_INTR_DONE,
353 .read = xilinx_dpdma_debugfs_desc_done_irq_read,
354 .write = xilinx_dpdma_debugfs_desc_done_irq_write,
358 static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf,
359 size_t size, loff_t *pos)
361 enum xilinx_dpdma_testcases testcase;
365 if (*pos != 0 || size <= 0)
368 kern_buff = kzalloc(XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL);
370 dpdma_debugfs.testcase = DPDMA_TC_NONE;
374 testcase = READ_ONCE(dpdma_debugfs.testcase);
375 if (testcase != DPDMA_TC_NONE) {
376 ret = dpdma_debugfs_reqs[testcase].read(kern_buff);
380 strlcpy(kern_buff, "No testcase executed",
381 XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE);
384 size = min(size, strlen(kern_buff));
385 if (copy_to_user(buf, kern_buff, size))
397 static ssize_t xilinx_dpdma_debugfs_write(struct file *f,
398 const char __user *buf, size_t size,
401 char *kern_buff, *kern_buff_start;
406 if (*pos != 0 || size <= 0)
409 /* Supporting single instance of test as of now. */
410 if (dpdma_debugfs.testcase != DPDMA_TC_NONE)
413 kern_buff = kzalloc(size, GFP_KERNEL);
416 kern_buff_start = kern_buff;
418 ret = strncpy_from_user(kern_buff, buf, size);
422 /* Read the testcase name from a user request. */
423 testcase = strsep(&kern_buff, " ");
425 for (i = 0; i < ARRAY_SIZE(dpdma_debugfs_reqs); i++) {
426 if (!strcasecmp(testcase, dpdma_debugfs_reqs[i].name))
430 if (i == ARRAY_SIZE(dpdma_debugfs_reqs)) {
435 ret = dpdma_debugfs_reqs[i].write(kern_buff);
442 kfree(kern_buff_start);
446 static const struct file_operations fops_xilinx_dpdma_dbgfs = {
447 .owner = THIS_MODULE,
448 .read = xilinx_dpdma_debugfs_read,
449 .write = xilinx_dpdma_debugfs_write,
452 static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev)
456 dpdma_debugfs.testcase = DPDMA_TC_NONE;
458 dent = debugfs_create_file("testcase", 0444, xdev->common.dbg_dev_root,
459 NULL, &fops_xilinx_dpdma_dbgfs);
461 dev_err(xdev->dev, "Failed to create debugfs testcase file\n");
465 static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev)
469 static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan)
472 #endif /* CONFIG_DEBUG_FS */
474 /* -----------------------------------------------------------------------------
478 static inline u32 dpdma_read(void __iomem *base, u32 offset)
480 return ioread32(base + offset);
483 static inline void dpdma_write(void __iomem *base, u32 offset, u32 val)
485 iowrite32(val, base + offset);
488 static inline void dpdma_clr(void __iomem *base, u32 offset, u32 clr)
490 dpdma_write(base, offset, dpdma_read(base, offset) & ~clr);
493 static inline void dpdma_set(void __iomem *base, u32 offset, u32 set)
495 dpdma_write(base, offset, dpdma_read(base, offset) | set);
498 /* -----------------------------------------------------------------------------
499 * Descriptor Operations
503 * xilinx_dpdma_sw_desc_set_dma_addrs - Set DMA addresses in the descriptor
504 * @xdev: DPDMA device
505 * @sw_desc: The software descriptor in which to set DMA addresses
506 * @prev: The previous descriptor
507 * @dma_addr: array of dma addresses
508 * @num_src_addr: number of addresses in @dma_addr
510 * Set all the DMA addresses in the hardware descriptor corresponding to @dev
511 * from @dma_addr. If a previous descriptor is specified in @prev, its next
512 * descriptor DMA address is set to the DMA address of @sw_desc. @prev may be
513 * identical to @sw_desc for cyclic transfers.
515 static void xilinx_dpdma_sw_desc_set_dma_addrs(struct xilinx_dpdma_device *xdev,
516 struct xilinx_dpdma_sw_desc *sw_desc,
517 struct xilinx_dpdma_sw_desc *prev,
518 dma_addr_t dma_addr[],
519 unsigned int num_src_addr)
521 struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
524 hw_desc->src_addr = lower_32_bits(dma_addr[0]);
527 FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK,
528 upper_32_bits(dma_addr[0]));
530 for (i = 1; i < num_src_addr; i++) {
531 u32 *addr = &hw_desc->src_addr2;
533 addr[i - 1] = lower_32_bits(dma_addr[i]);
535 if (xdev->ext_addr) {
536 u32 *addr_ext = &hw_desc->addr_ext_23;
539 addr_msb = upper_32_bits(dma_addr[i]) & GENMASK(15, 0);
540 addr_msb <<= 16 * ((i - 1) % 2);
541 addr_ext[(i - 1) / 2] |= addr_msb;
548 prev->hw.next_desc = lower_32_bits(sw_desc->dma_addr);
551 FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK,
552 upper_32_bits(sw_desc->dma_addr));
556 * xilinx_dpdma_chan_alloc_sw_desc - Allocate a software descriptor
557 * @chan: DPDMA channel
559 * Allocate a software descriptor from the channel's descriptor pool.
561 * Return: a software descriptor or NULL.
563 static struct xilinx_dpdma_sw_desc *
564 xilinx_dpdma_chan_alloc_sw_desc(struct xilinx_dpdma_chan *chan)
566 struct xilinx_dpdma_sw_desc *sw_desc;
569 sw_desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &dma_addr);
573 sw_desc->dma_addr = dma_addr;
579 * xilinx_dpdma_chan_free_sw_desc - Free a software descriptor
580 * @chan: DPDMA channel
581 * @sw_desc: software descriptor to free
583 * Free a software descriptor from the channel's descriptor pool.
586 xilinx_dpdma_chan_free_sw_desc(struct xilinx_dpdma_chan *chan,
587 struct xilinx_dpdma_sw_desc *sw_desc)
589 dma_pool_free(chan->desc_pool, sw_desc, sw_desc->dma_addr);
593 * xilinx_dpdma_chan_dump_tx_desc - Dump a tx descriptor
594 * @chan: DPDMA channel
595 * @tx_desc: tx descriptor to dump
597 * Dump contents of a tx descriptor
599 static void xilinx_dpdma_chan_dump_tx_desc(struct xilinx_dpdma_chan *chan,
600 struct xilinx_dpdma_tx_desc *tx_desc)
602 struct xilinx_dpdma_sw_desc *sw_desc;
603 struct device *dev = chan->xdev->dev;
606 dev_dbg(dev, "------- TX descriptor dump start -------\n");
607 dev_dbg(dev, "------- channel ID = %d -------\n", chan->id);
609 list_for_each_entry(sw_desc, &tx_desc->descriptors, node) {
610 struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
612 dev_dbg(dev, "------- HW descriptor %d -------\n", i++);
613 dev_dbg(dev, "descriptor DMA addr: %pad\n", &sw_desc->dma_addr);
614 dev_dbg(dev, "control: 0x%08x\n", hw_desc->control);
615 dev_dbg(dev, "desc_id: 0x%08x\n", hw_desc->desc_id);
616 dev_dbg(dev, "xfer_size: 0x%08x\n", hw_desc->xfer_size);
617 dev_dbg(dev, "hsize_stride: 0x%08x\n", hw_desc->hsize_stride);
618 dev_dbg(dev, "timestamp_lsb: 0x%08x\n", hw_desc->timestamp_lsb);
619 dev_dbg(dev, "timestamp_msb: 0x%08x\n", hw_desc->timestamp_msb);
620 dev_dbg(dev, "addr_ext: 0x%08x\n", hw_desc->addr_ext);
621 dev_dbg(dev, "next_desc: 0x%08x\n", hw_desc->next_desc);
622 dev_dbg(dev, "src_addr: 0x%08x\n", hw_desc->src_addr);
623 dev_dbg(dev, "addr_ext_23: 0x%08x\n", hw_desc->addr_ext_23);
624 dev_dbg(dev, "addr_ext_45: 0x%08x\n", hw_desc->addr_ext_45);
625 dev_dbg(dev, "src_addr2: 0x%08x\n", hw_desc->src_addr2);
626 dev_dbg(dev, "src_addr3: 0x%08x\n", hw_desc->src_addr3);
627 dev_dbg(dev, "src_addr4: 0x%08x\n", hw_desc->src_addr4);
628 dev_dbg(dev, "src_addr5: 0x%08x\n", hw_desc->src_addr5);
629 dev_dbg(dev, "crc: 0x%08x\n", hw_desc->crc);
632 dev_dbg(dev, "------- TX descriptor dump end -------\n");
636 * xilinx_dpdma_chan_alloc_tx_desc - Allocate a transaction descriptor
637 * @chan: DPDMA channel
639 * Allocate a tx descriptor.
641 * Return: a tx descriptor or NULL.
643 static struct xilinx_dpdma_tx_desc *
644 xilinx_dpdma_chan_alloc_tx_desc(struct xilinx_dpdma_chan *chan)
646 struct xilinx_dpdma_tx_desc *tx_desc;
648 tx_desc = kzalloc(sizeof(*tx_desc), GFP_NOWAIT);
652 INIT_LIST_HEAD(&tx_desc->descriptors);
653 tx_desc->chan = chan;
654 tx_desc->error = false;
660 * xilinx_dpdma_chan_free_tx_desc - Free a virtual DMA descriptor
661 * @vdesc: virtual DMA descriptor
663 * Free the virtual DMA descriptor @vdesc including its software descriptors.
665 static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc *vdesc)
667 struct xilinx_dpdma_sw_desc *sw_desc, *next;
668 struct xilinx_dpdma_tx_desc *desc;
673 desc = to_dpdma_tx_desc(vdesc);
675 list_for_each_entry_safe(sw_desc, next, &desc->descriptors, node) {
676 list_del(&sw_desc->node);
677 xilinx_dpdma_chan_free_sw_desc(desc->chan, sw_desc);
684 * xilinx_dpdma_chan_prep_interleaved_dma - Prepare an interleaved dma
686 * @chan: DPDMA channel
687 * @xt: dma interleaved template
689 * Prepare a tx descriptor including internal software/hardware descriptors
692 * Return: A DPDMA TX descriptor on success, or NULL.
694 static struct xilinx_dpdma_tx_desc *
695 xilinx_dpdma_chan_prep_interleaved_dma(struct xilinx_dpdma_chan *chan,
696 struct dma_interleaved_template *xt)
698 struct xilinx_dpdma_tx_desc *tx_desc;
699 struct xilinx_dpdma_sw_desc *sw_desc;
700 struct xilinx_dpdma_hw_desc *hw_desc;
701 size_t hsize = xt->sgl[0].size;
702 size_t stride = hsize + xt->sgl[0].icg;
704 if (!IS_ALIGNED(xt->src_start, XILINX_DPDMA_ALIGN_BYTES)) {
705 dev_err(chan->xdev->dev,
706 "chan%u: buffer should be aligned at %d B\n",
707 chan->id, XILINX_DPDMA_ALIGN_BYTES);
711 tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
715 sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
717 xilinx_dpdma_chan_free_tx_desc(&tx_desc->vdesc);
721 xilinx_dpdma_sw_desc_set_dma_addrs(chan->xdev, sw_desc, sw_desc,
724 hw_desc = &sw_desc->hw;
725 hsize = ALIGN(hsize, XILINX_DPDMA_LINESIZE_ALIGN_BITS / 8);
726 hw_desc->xfer_size = hsize * xt->numf;
727 hw_desc->hsize_stride =
728 FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK, hsize) |
729 FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK,
731 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
732 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
733 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
734 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
736 list_add_tail(&sw_desc->node, &tx_desc->descriptors);
741 /* -----------------------------------------------------------------------------
742 * DPDMA Channel Operations
746 * xilinx_dpdma_chan_enable - Enable the channel
747 * @chan: DPDMA channel
749 * Enable the channel and its interrupts. Set the QoS values for video class.
751 static void xilinx_dpdma_chan_enable(struct xilinx_dpdma_chan *chan)
755 reg = (XILINX_DPDMA_INTR_CHAN_MASK << chan->id)
756 | XILINX_DPDMA_INTR_GLOBAL_MASK;
757 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
758 reg = (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id)
759 | XILINX_DPDMA_INTR_GLOBAL_ERR;
760 dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
762 reg = XILINX_DPDMA_CH_CNTL_ENABLE
763 | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK,
764 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS)
765 | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK,
766 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS)
767 | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK,
768 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS);
769 dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, reg);
773 * xilinx_dpdma_chan_disable - Disable the channel
774 * @chan: DPDMA channel
776 * Disable the channel and its interrupts.
778 static void xilinx_dpdma_chan_disable(struct xilinx_dpdma_chan *chan)
782 reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id;
783 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
784 reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id;
785 dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
787 dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
791 * xilinx_dpdma_chan_pause - Pause the channel
792 * @chan: DPDMA channel
796 static void xilinx_dpdma_chan_pause(struct xilinx_dpdma_chan *chan)
798 dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
802 * xilinx_dpdma_chan_unpause - Unpause the channel
803 * @chan: DPDMA channel
805 * Unpause the channel.
807 static void xilinx_dpdma_chan_unpause(struct xilinx_dpdma_chan *chan)
809 dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
812 static u32 xilinx_dpdma_chan_video_group_ready(struct xilinx_dpdma_chan *chan)
814 struct xilinx_dpdma_device *xdev = chan->xdev;
818 for (i = ZYNQMP_DPDMA_VIDEO0; i <= ZYNQMP_DPDMA_VIDEO2; i++) {
819 if (xdev->chan[i]->video_group && !xdev->chan[i]->running)
822 if (xdev->chan[i]->video_group)
830 * xilinx_dpdma_chan_queue_transfer - Queue the next transfer
831 * @chan: DPDMA channel
833 * Queue the next descriptor, if any, to the hardware. If the channel is
834 * stopped, start it first. Otherwise retrigger it with the next descriptor.
836 static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
838 struct xilinx_dpdma_device *xdev = chan->xdev;
839 struct xilinx_dpdma_sw_desc *sw_desc;
840 struct xilinx_dpdma_tx_desc *desc;
841 struct virt_dma_desc *vdesc;
845 lockdep_assert_held(&chan->lock);
847 if (chan->desc.pending)
850 if (!chan->running) {
851 xilinx_dpdma_chan_unpause(chan);
852 xilinx_dpdma_chan_enable(chan);
853 chan->first_frame = true;
854 chan->running = true;
857 vdesc = vchan_next_desc(&chan->vchan);
861 desc = to_dpdma_tx_desc(vdesc);
862 chan->desc.pending = desc;
863 list_del(&desc->vdesc.node);
866 * Assign the cookie to descriptors in this transaction. Only 16 bit
867 * will be used, but it should be enough.
869 list_for_each_entry(sw_desc, &desc->descriptors, node)
870 sw_desc->hw.desc_id = desc->vdesc.tx.cookie;
872 sw_desc = list_first_entry(&desc->descriptors,
873 struct xilinx_dpdma_sw_desc, node);
874 dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR,
875 lower_32_bits(sw_desc->dma_addr));
877 dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE,
878 FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK,
879 upper_32_bits(sw_desc->dma_addr)));
881 first_frame = chan->first_frame;
882 chan->first_frame = false;
884 if (chan->video_group) {
885 channels = xilinx_dpdma_chan_video_group_ready(chan);
887 * Trigger the transfer only when all channels in the group are
893 channels = BIT(chan->id);
897 reg = XILINX_DPDMA_GBL_TRIG_MASK(channels);
899 reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels);
901 dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
905 * xilinx_dpdma_chan_ostand - Number of outstanding transactions
906 * @chan: DPDMA channel
908 * Read and return the number of outstanding transactions from register.
910 * Return: Number of outstanding transactions from the status register.
912 static u32 xilinx_dpdma_chan_ostand(struct xilinx_dpdma_chan *chan)
914 return FIELD_GET(XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK,
915 dpdma_read(chan->reg, XILINX_DPDMA_CH_STATUS));
919 * xilinx_dpdma_chan_notify_no_ostand - Notify no outstanding transaction event
920 * @chan: DPDMA channel
922 * Notify waiters for no outstanding event, so waiters can stop the channel
923 * safely. This function is supposed to be called when 'no outstanding'
924 * interrupt is generated. The 'no outstanding' interrupt is disabled and
925 * should be re-enabled when this event is handled. If the channel status
926 * register still shows some number of outstanding transactions, the interrupt
929 * Return: 0 on success. On failure, -EWOULDBLOCK if there's still outstanding
932 static int xilinx_dpdma_chan_notify_no_ostand(struct xilinx_dpdma_chan *chan)
936 cnt = xilinx_dpdma_chan_ostand(chan);
938 dev_dbg(chan->xdev->dev,
939 "chan%u: %d outstanding transactions\n",
944 /* Disable 'no outstanding' interrupt */
945 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IDS,
946 XILINX_DPDMA_INTR_NO_OSTAND(chan->id));
947 wake_up(&chan->wait_to_stop);
953 * xilinx_dpdma_chan_wait_no_ostand - Wait for the no outstanding irq
954 * @chan: DPDMA channel
956 * Wait for the no outstanding transaction interrupt. This functions can sleep
959 * Return: 0 on success. On failure, -ETIMEOUT for time out, or the error code
960 * from wait_event_interruptible_timeout().
962 static int xilinx_dpdma_chan_wait_no_ostand(struct xilinx_dpdma_chan *chan)
966 /* Wait for a no outstanding transaction interrupt upto 50msec */
967 ret = wait_event_interruptible_timeout(chan->wait_to_stop,
968 !xilinx_dpdma_chan_ostand(chan),
969 msecs_to_jiffies(50));
971 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
972 XILINX_DPDMA_INTR_NO_OSTAND(chan->id));
976 dev_err(chan->xdev->dev, "chan%u: not ready to stop: %d trans\n",
977 chan->id, xilinx_dpdma_chan_ostand(chan));
986 * xilinx_dpdma_chan_poll_no_ostand - Poll the outstanding transaction status
987 * @chan: DPDMA channel
989 * Poll the outstanding transaction status, and return when there's no
990 * outstanding transaction. This functions can be used in the interrupt context
991 * or where the atomicity is required. Calling thread may wait more than 50ms.
993 * Return: 0 on success, or -ETIMEDOUT.
995 static int xilinx_dpdma_chan_poll_no_ostand(struct xilinx_dpdma_chan *chan)
997 u32 cnt, loop = 50000;
999 /* Poll at least for 50ms (20 fps). */
1001 cnt = xilinx_dpdma_chan_ostand(chan);
1003 } while (loop-- > 0 && cnt);
1006 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
1007 XILINX_DPDMA_INTR_NO_OSTAND(chan->id));
1011 dev_err(chan->xdev->dev, "chan%u: not ready to stop: %d trans\n",
1012 chan->id, xilinx_dpdma_chan_ostand(chan));
1018 * xilinx_dpdma_chan_stop - Stop the channel
1019 * @chan: DPDMA channel
1021 * Stop a previously paused channel by first waiting for completion of all
1022 * outstanding transaction and then disabling the channel.
1024 * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop.
1026 static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
1028 unsigned long flags;
1031 ret = xilinx_dpdma_chan_wait_no_ostand(chan);
1035 spin_lock_irqsave(&chan->lock, flags);
1036 xilinx_dpdma_chan_disable(chan);
1037 chan->running = false;
1038 spin_unlock_irqrestore(&chan->lock, flags);
1044 * xilinx_dpdma_chan_done_irq - Handle hardware descriptor completion
1045 * @chan: DPDMA channel
1047 * Handle completion of the currently active descriptor (@chan->desc.active). As
1048 * we currently support cyclic transfers only, this just invokes the cyclic
1049 * callback. The descriptor will be completed at the VSYNC interrupt when a new
1050 * descriptor replaces it.
1052 static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
1054 struct xilinx_dpdma_tx_desc *active;
1055 unsigned long flags;
1057 spin_lock_irqsave(&chan->lock, flags);
1059 xilinx_dpdma_debugfs_desc_done_irq(chan);
1061 active = chan->desc.active;
1063 vchan_cyclic_callback(&active->vdesc);
1065 dev_warn(chan->xdev->dev,
1066 "chan%u: DONE IRQ with no active descriptor!\n",
1069 spin_unlock_irqrestore(&chan->lock, flags);
1073 * xilinx_dpdma_chan_vsync_irq - Handle hardware descriptor scheduling
1074 * @chan: DPDMA channel
1076 * At VSYNC the active descriptor may have been replaced by the pending
1077 * descriptor. Detect this through the DESC_ID and perform appropriate
1080 static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
1082 struct xilinx_dpdma_tx_desc *pending;
1083 struct xilinx_dpdma_sw_desc *sw_desc;
1084 unsigned long flags;
1087 spin_lock_irqsave(&chan->lock, flags);
1089 pending = chan->desc.pending;
1090 if (!chan->running || !pending)
1093 desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID);
1095 /* If the retrigger raced with vsync, retry at the next frame. */
1096 sw_desc = list_first_entry(&pending->descriptors,
1097 struct xilinx_dpdma_sw_desc, node);
1098 if (sw_desc->hw.desc_id != desc_id) {
1099 dev_dbg(chan->xdev->dev,
1100 "chan%u: vsync race lost (%u != %u), retrying\n",
1101 chan->id, sw_desc->hw.desc_id, desc_id);
1106 * Complete the active descriptor, if any, promote the pending
1107 * descriptor to active, and queue the next transfer, if any.
1109 if (chan->desc.active)
1110 vchan_cookie_complete(&chan->desc.active->vdesc);
1111 chan->desc.active = pending;
1112 chan->desc.pending = NULL;
1114 xilinx_dpdma_chan_queue_transfer(chan);
1117 spin_unlock_irqrestore(&chan->lock, flags);
1121 * xilinx_dpdma_chan_err - Detect any channel error
1122 * @chan: DPDMA channel
1123 * @isr: masked Interrupt Status Register
1124 * @eisr: Error Interrupt Status Register
1126 * Return: true if any channel error occurs, or false otherwise.
1129 xilinx_dpdma_chan_err(struct xilinx_dpdma_chan *chan, u32 isr, u32 eisr)
1134 if (chan->running &&
1135 ((isr & (XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id)) ||
1136 (eisr & (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id))))
1143 * xilinx_dpdma_chan_handle_err - DPDMA channel error handling
1144 * @chan: DPDMA channel
1146 * This function is called when any channel error or any global error occurs.
1147 * The function disables the paused channel by errors and determines
1148 * if the current active descriptor can be rescheduled depending on
1149 * the descriptor status.
1151 static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
1153 struct xilinx_dpdma_device *xdev = chan->xdev;
1154 struct xilinx_dpdma_tx_desc *active;
1155 unsigned long flags;
1157 spin_lock_irqsave(&chan->lock, flags);
1159 dev_dbg(xdev->dev, "chan%u: cur desc addr = 0x%04x%08x\n",
1161 dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE),
1162 dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR));
1163 dev_dbg(xdev->dev, "chan%u: cur payload addr = 0x%04x%08x\n",
1165 dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDRE),
1166 dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDR));
1168 xilinx_dpdma_chan_disable(chan);
1169 chan->running = false;
1171 if (!chan->desc.active)
1174 active = chan->desc.active;
1175 chan->desc.active = NULL;
1177 xilinx_dpdma_chan_dump_tx_desc(chan, active);
1180 dev_dbg(xdev->dev, "chan%u: repeated error on desc\n",
1183 /* Reschedule if there's no new descriptor */
1184 if (!chan->desc.pending &&
1185 list_empty(&chan->vchan.desc_issued)) {
1186 active->error = true;
1187 list_add_tail(&active->vdesc.node,
1188 &chan->vchan.desc_issued);
1190 xilinx_dpdma_chan_free_tx_desc(&active->vdesc);
1194 spin_unlock_irqrestore(&chan->lock, flags);
1197 /* -----------------------------------------------------------------------------
1198 * DMA Engine Operations
1201 static struct dma_async_tx_descriptor *
1202 xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
1203 struct dma_interleaved_template *xt,
1204 unsigned long flags)
1206 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1207 struct xilinx_dpdma_tx_desc *desc;
1209 if (xt->dir != DMA_MEM_TO_DEV)
1212 if (!xt->numf || !xt->sgl[0].size)
1215 if (!(flags & DMA_PREP_REPEAT) || !(flags & DMA_PREP_LOAD_EOT))
1218 desc = xilinx_dpdma_chan_prep_interleaved_dma(chan, xt);
1222 vchan_tx_prep(&chan->vchan, &desc->vdesc, flags | DMA_CTRL_ACK);
1224 return &desc->vdesc.tx;
1228 * xilinx_dpdma_alloc_chan_resources - Allocate resources for the channel
1229 * @dchan: DMA channel
1231 * Allocate a descriptor pool for the channel.
1233 * Return: 0 on success, or -ENOMEM if failed to allocate a pool.
1235 static int xilinx_dpdma_alloc_chan_resources(struct dma_chan *dchan)
1237 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1238 size_t align = __alignof__(struct xilinx_dpdma_sw_desc);
1240 chan->desc_pool = dma_pool_create(dev_name(chan->xdev->dev),
1242 sizeof(struct xilinx_dpdma_sw_desc),
1244 if (!chan->desc_pool) {
1245 dev_err(chan->xdev->dev,
1246 "chan%u: failed to allocate a descriptor pool\n",
1255 * xilinx_dpdma_free_chan_resources - Free all resources for the channel
1256 * @dchan: DMA channel
1258 * Free resources associated with the virtual DMA channel, and destroy the
1261 static void xilinx_dpdma_free_chan_resources(struct dma_chan *dchan)
1263 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1265 vchan_free_chan_resources(&chan->vchan);
1267 dma_pool_destroy(chan->desc_pool);
1268 chan->desc_pool = NULL;
1271 static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
1273 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1274 unsigned long flags;
1276 spin_lock_irqsave(&chan->vchan.lock, flags);
1277 if (vchan_issue_pending(&chan->vchan))
1278 xilinx_dpdma_chan_queue_transfer(chan);
1279 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1282 static int xilinx_dpdma_config(struct dma_chan *dchan,
1283 struct dma_slave_config *config)
1285 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1286 unsigned long flags;
1289 * The destination address doesn't need to be specified as the DPDMA is
1290 * hardwired to the destination (the DP controller). The transfer
1291 * width, burst size and port window size are thus meaningless, they're
1292 * fixed both on the DPDMA side and on the DP controller side.
1295 spin_lock_irqsave(&chan->lock, flags);
1298 * Abuse the slave_id to indicate that the channel is part of a video
1301 if (chan->id <= ZYNQMP_DPDMA_VIDEO2)
1302 chan->video_group = config->slave_id != 0;
1304 spin_unlock_irqrestore(&chan->lock, flags);
1309 static int xilinx_dpdma_pause(struct dma_chan *dchan)
1311 xilinx_dpdma_chan_pause(to_xilinx_chan(dchan));
1316 static int xilinx_dpdma_resume(struct dma_chan *dchan)
1318 xilinx_dpdma_chan_unpause(to_xilinx_chan(dchan));
1324 * xilinx_dpdma_terminate_all - Terminate the channel and descriptors
1325 * @dchan: DMA channel
1327 * Pause the channel without waiting for ongoing transfers to complete. Waiting
1328 * for completion is performed by xilinx_dpdma_synchronize() that will disable
1329 * the channel to complete the stop.
1331 * All the descriptors associated with the channel that are guaranteed not to
1332 * be touched by the hardware. The pending and active descriptor are not
1333 * touched, and will be freed either upon completion, or by
1334 * xilinx_dpdma_synchronize().
1336 * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop.
1338 static int xilinx_dpdma_terminate_all(struct dma_chan *dchan)
1340 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1341 struct xilinx_dpdma_device *xdev = chan->xdev;
1342 LIST_HEAD(descriptors);
1343 unsigned long flags;
1346 /* Pause the channel (including the whole video group if applicable). */
1347 if (chan->video_group) {
1348 for (i = ZYNQMP_DPDMA_VIDEO0; i <= ZYNQMP_DPDMA_VIDEO2; i++) {
1349 if (xdev->chan[i]->video_group &&
1350 xdev->chan[i]->running) {
1351 xilinx_dpdma_chan_pause(xdev->chan[i]);
1352 xdev->chan[i]->video_group = false;
1356 xilinx_dpdma_chan_pause(chan);
1359 /* Gather all the descriptors we can free and free them. */
1360 spin_lock_irqsave(&chan->vchan.lock, flags);
1361 vchan_get_all_descriptors(&chan->vchan, &descriptors);
1362 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1364 vchan_dma_desc_free_list(&chan->vchan, &descriptors);
1370 * xilinx_dpdma_synchronize - Synchronize callback execution
1371 * @dchan: DMA channel
1373 * Synchronizing callback execution ensures that all previously issued
1374 * transfers have completed and all associated callbacks have been called and
1377 * This function waits for the DMA channel to stop. It assumes it has been
1378 * paused by a previous call to dmaengine_terminate_async(), and that no new
1379 * pending descriptors have been issued with dma_async_issue_pending(). The
1380 * behaviour is undefined otherwise.
1382 static void xilinx_dpdma_synchronize(struct dma_chan *dchan)
1384 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1385 unsigned long flags;
1387 xilinx_dpdma_chan_stop(chan);
1389 spin_lock_irqsave(&chan->vchan.lock, flags);
1390 if (chan->desc.pending) {
1391 vchan_terminate_vdesc(&chan->desc.pending->vdesc);
1392 chan->desc.pending = NULL;
1394 if (chan->desc.active) {
1395 vchan_terminate_vdesc(&chan->desc.active->vdesc);
1396 chan->desc.active = NULL;
1398 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1400 vchan_synchronize(&chan->vchan);
1403 /* -----------------------------------------------------------------------------
1404 * Interrupt and Tasklet Handling
1408 * xilinx_dpdma_err - Detect any global error
1409 * @isr: Interrupt Status Register
1410 * @eisr: Error Interrupt Status Register
1412 * Return: True if any global error occurs, or false otherwise.
1414 static bool xilinx_dpdma_err(u32 isr, u32 eisr)
1416 if (isr & XILINX_DPDMA_INTR_GLOBAL_ERR ||
1417 eisr & XILINX_DPDMA_EINTR_GLOBAL_ERR)
1424 * xilinx_dpdma_handle_err_irq - Handle DPDMA error interrupt
1425 * @xdev: DPDMA device
1426 * @isr: masked Interrupt Status Register
1427 * @eisr: Error Interrupt Status Register
1429 * Handle if any error occurs based on @isr and @eisr. This function disables
1430 * corresponding error interrupts, and those should be re-enabled once handling
1433 static void xilinx_dpdma_handle_err_irq(struct xilinx_dpdma_device *xdev,
1436 bool err = xilinx_dpdma_err(isr, eisr);
1439 dev_dbg_ratelimited(xdev->dev,
1440 "error irq: isr = 0x%08x, eisr = 0x%08x\n",
1443 /* Disable channel error interrupts until errors are handled. */
1444 dpdma_write(xdev->reg, XILINX_DPDMA_IDS,
1445 isr & ~XILINX_DPDMA_INTR_GLOBAL_ERR);
1446 dpdma_write(xdev->reg, XILINX_DPDMA_EIDS,
1447 eisr & ~XILINX_DPDMA_EINTR_GLOBAL_ERR);
1449 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
1450 if (err || xilinx_dpdma_chan_err(xdev->chan[i], isr, eisr))
1451 tasklet_schedule(&xdev->chan[i]->err_task);
1455 * xilinx_dpdma_enable_irq - Enable interrupts
1456 * @xdev: DPDMA device
1458 * Enable interrupts.
1460 static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev)
1462 dpdma_write(xdev->reg, XILINX_DPDMA_IEN, XILINX_DPDMA_INTR_ALL);
1463 dpdma_write(xdev->reg, XILINX_DPDMA_EIEN, XILINX_DPDMA_EINTR_ALL);
1467 * xilinx_dpdma_disable_irq - Disable interrupts
1468 * @xdev: DPDMA device
1470 * Disable interrupts.
1472 static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
1474 dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL);
1475 dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
1479 * xilinx_dpdma_chan_err_task - Per channel tasklet for error handling
1480 * @t: pointer to the tasklet associated with this handler
1482 * Per channel error handling tasklet. This function waits for the outstanding
1483 * transaction to complete and triggers error handling. After error handling,
1484 * re-enable channel error interrupts, and restart the channel if needed.
1486 static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
1488 struct xilinx_dpdma_chan *chan = from_tasklet(chan, t, err_task);
1489 struct xilinx_dpdma_device *xdev = chan->xdev;
1490 unsigned long flags;
1492 /* Proceed error handling even when polling fails. */
1493 xilinx_dpdma_chan_poll_no_ostand(chan);
1495 xilinx_dpdma_chan_handle_err(chan);
1497 dpdma_write(xdev->reg, XILINX_DPDMA_IEN,
1498 XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id);
1499 dpdma_write(xdev->reg, XILINX_DPDMA_EIEN,
1500 XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
1502 spin_lock_irqsave(&chan->lock, flags);
1503 xilinx_dpdma_chan_queue_transfer(chan);
1504 spin_unlock_irqrestore(&chan->lock, flags);
1507 static irqreturn_t xilinx_dpdma_irq_handler(int irq, void *data)
1509 struct xilinx_dpdma_device *xdev = data;
1515 status = dpdma_read(xdev->reg, XILINX_DPDMA_ISR);
1516 error = dpdma_read(xdev->reg, XILINX_DPDMA_EISR);
1517 if (!status && !error)
1520 dpdma_write(xdev->reg, XILINX_DPDMA_ISR, status);
1521 dpdma_write(xdev->reg, XILINX_DPDMA_EISR, error);
1523 if (status & XILINX_DPDMA_INTR_VSYNC) {
1525 * There's a single VSYNC interrupt that needs to be processed
1526 * by each running channel to update the active descriptor.
1528 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
1529 struct xilinx_dpdma_chan *chan = xdev->chan[i];
1532 xilinx_dpdma_chan_vsync_irq(chan);
1536 mask = FIELD_GET(XILINX_DPDMA_INTR_DESC_DONE_MASK, status);
1538 for_each_set_bit(i, &mask, ARRAY_SIZE(xdev->chan))
1539 xilinx_dpdma_chan_done_irq(xdev->chan[i]);
1542 mask = FIELD_GET(XILINX_DPDMA_INTR_NO_OSTAND_MASK, status);
1544 for_each_set_bit(i, &mask, ARRAY_SIZE(xdev->chan))
1545 xilinx_dpdma_chan_notify_no_ostand(xdev->chan[i]);
1548 mask = status & XILINX_DPDMA_INTR_ERR_ALL;
1550 xilinx_dpdma_handle_err_irq(xdev, mask, error);
1555 /* -----------------------------------------------------------------------------
1556 * Initialization & Cleanup
1559 static int xilinx_dpdma_chan_init(struct xilinx_dpdma_device *xdev,
1560 unsigned int chan_id)
1562 struct xilinx_dpdma_chan *chan;
1564 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
1569 chan->reg = xdev->reg + XILINX_DPDMA_CH_BASE
1570 + XILINX_DPDMA_CH_OFFSET * chan->id;
1571 chan->running = false;
1574 spin_lock_init(&chan->lock);
1575 init_waitqueue_head(&chan->wait_to_stop);
1577 tasklet_setup(&chan->err_task, xilinx_dpdma_chan_err_task);
1579 chan->vchan.desc_free = xilinx_dpdma_chan_free_tx_desc;
1580 vchan_init(&chan->vchan, &xdev->common);
1582 xdev->chan[chan->id] = chan;
1587 static void xilinx_dpdma_chan_remove(struct xilinx_dpdma_chan *chan)
1592 tasklet_kill(&chan->err_task);
1593 list_del(&chan->vchan.chan.device_node);
1596 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
1597 struct of_dma *ofdma)
1599 struct xilinx_dpdma_device *xdev = ofdma->of_dma_data;
1600 u32 chan_id = dma_spec->args[0];
1602 if (chan_id >= ARRAY_SIZE(xdev->chan))
1605 if (!xdev->chan[chan_id])
1608 return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan);
1611 static int xilinx_dpdma_probe(struct platform_device *pdev)
1613 struct xilinx_dpdma_device *xdev;
1614 struct dma_device *ddev;
1618 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
1622 xdev->dev = &pdev->dev;
1623 xdev->ext_addr = sizeof(dma_addr_t) > 4;
1625 INIT_LIST_HEAD(&xdev->common.channels);
1627 platform_set_drvdata(pdev, xdev);
1629 xdev->axi_clk = devm_clk_get(xdev->dev, "axi_clk");
1630 if (IS_ERR(xdev->axi_clk))
1631 return PTR_ERR(xdev->axi_clk);
1633 xdev->reg = devm_platform_ioremap_resource(pdev, 0);
1634 if (IS_ERR(xdev->reg))
1635 return PTR_ERR(xdev->reg);
1637 xdev->irq = platform_get_irq(pdev, 0);
1638 if (xdev->irq < 0) {
1639 dev_err(xdev->dev, "failed to get platform irq\n");
1643 ret = request_irq(xdev->irq, xilinx_dpdma_irq_handler, IRQF_SHARED,
1644 dev_name(xdev->dev), xdev);
1646 dev_err(xdev->dev, "failed to request IRQ\n");
1650 ddev = &xdev->common;
1651 ddev->dev = &pdev->dev;
1653 dma_cap_set(DMA_SLAVE, ddev->cap_mask);
1654 dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
1655 dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
1656 dma_cap_set(DMA_REPEAT, ddev->cap_mask);
1657 dma_cap_set(DMA_LOAD_EOT, ddev->cap_mask);
1658 ddev->copy_align = fls(XILINX_DPDMA_ALIGN_BYTES - 1);
1660 ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
1661 ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
1662 ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
1663 /* TODO: Can we achieve better granularity ? */
1664 ddev->device_tx_status = dma_cookie_status;
1665 ddev->device_issue_pending = xilinx_dpdma_issue_pending;
1666 ddev->device_config = xilinx_dpdma_config;
1667 ddev->device_pause = xilinx_dpdma_pause;
1668 ddev->device_resume = xilinx_dpdma_resume;
1669 ddev->device_terminate_all = xilinx_dpdma_terminate_all;
1670 ddev->device_synchronize = xilinx_dpdma_synchronize;
1671 ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED);
1672 ddev->directions = BIT(DMA_MEM_TO_DEV);
1673 ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1675 for (i = 0; i < ARRAY_SIZE(xdev->chan); ++i) {
1676 ret = xilinx_dpdma_chan_init(xdev, i);
1678 dev_err(xdev->dev, "failed to initialize channel %u\n",
1684 ret = clk_prepare_enable(xdev->axi_clk);
1686 dev_err(xdev->dev, "failed to enable the axi clock\n");
1690 ret = dma_async_device_register(ddev);
1692 dev_err(xdev->dev, "failed to register the dma device\n");
1693 goto error_dma_async;
1696 ret = of_dma_controller_register(xdev->dev->of_node,
1697 of_dma_xilinx_xlate, ddev);
1699 dev_err(xdev->dev, "failed to register DMA to DT DMA helper\n");
1703 xilinx_dpdma_enable_irq(xdev);
1705 xilinx_dpdma_debugfs_init(xdev);
1707 dev_info(&pdev->dev, "Xilinx DPDMA engine is probed\n");
1712 dma_async_device_unregister(ddev);
1714 clk_disable_unprepare(xdev->axi_clk);
1716 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
1717 xilinx_dpdma_chan_remove(xdev->chan[i]);
1719 free_irq(xdev->irq, xdev);
1724 static int xilinx_dpdma_remove(struct platform_device *pdev)
1726 struct xilinx_dpdma_device *xdev = platform_get_drvdata(pdev);
1729 /* Start by disabling the IRQ to avoid races during cleanup. */
1730 free_irq(xdev->irq, xdev);
1732 xilinx_dpdma_disable_irq(xdev);
1733 of_dma_controller_free(pdev->dev.of_node);
1734 dma_async_device_unregister(&xdev->common);
1735 clk_disable_unprepare(xdev->axi_clk);
1737 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
1738 xilinx_dpdma_chan_remove(xdev->chan[i]);
1743 static const struct of_device_id xilinx_dpdma_of_match[] = {
1744 { .compatible = "xlnx,zynqmp-dpdma",},
1745 { /* end of table */ },
1747 MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match);
1749 static struct platform_driver xilinx_dpdma_driver = {
1750 .probe = xilinx_dpdma_probe,
1751 .remove = xilinx_dpdma_remove,
1753 .name = "xilinx-zynqmp-dpdma",
1754 .of_match_table = xilinx_dpdma_of_match,
1758 module_platform_driver(xilinx_dpdma_driver);
1760 MODULE_AUTHOR("Xilinx, Inc.");
1761 MODULE_DESCRIPTION("Xilinx ZynqMP DPDMA driver");
1762 MODULE_LICENSE("GPL v2");