2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
4 * Copyright (C) 2008 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
13 * The only Atmel DMA Controller that is not covered by this driver is the one
14 * found on AT91SAM9263.
17 #include <dt-bindings/dma/at91.h>
18 #include <linux/clk.h>
19 #include <linux/dmaengine.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmapool.h>
22 #include <linux/interrupt.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
27 #include <linux/of_device.h>
28 #include <linux/of_dma.h>
30 #include "at_hdmac_regs.h"
31 #include "dmaengine.h"
37 * at_hdmac : Name of the ATmel AHB DMA Controller
38 * at_dma_ / atdma : ATmel DMA controller entity related
39 * atc_ / atchan : ATmel DMA Channel entity related
42 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
43 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
44 |ATC_DIF(AT_DMA_MEM_IF))
45 #define ATC_DMA_BUSWIDTHS\
46 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
47 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
48 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
49 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
51 #define ATC_MAX_DSCR_TRIALS 10
54 * Initial number of descriptors to allocate for each channel. This could
55 * be increased during dma usage.
57 static unsigned int init_nr_desc_per_channel = 64;
58 module_param(init_nr_desc_per_channel, uint, 0644);
59 MODULE_PARM_DESC(init_nr_desc_per_channel,
60 "initial descriptors per channel (default: 64)");
64 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
65 static void atc_issue_pending(struct dma_chan *chan);
68 /*----------------------------------------------------------------------*/
70 static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst,
75 if (!((src | dst | len) & 3))
77 else if (!((src | dst | len) & 1))
85 static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
87 return list_first_entry(&atchan->active_list,
88 struct at_desc, desc_node);
91 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
93 return list_first_entry(&atchan->queue,
94 struct at_desc, desc_node);
98 * atc_alloc_descriptor - allocate and return an initialized descriptor
99 * @chan: the channel to allocate descriptors for
100 * @gfp_flags: GFP allocation flags
102 * Note: The ack-bit is positioned in the descriptor flag at creation time
103 * to make initial allocation more convenient. This bit will be cleared
104 * and control will be given to client at usage time (during
105 * preparation functions).
107 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
110 struct at_desc *desc = NULL;
111 struct at_dma *atdma = to_at_dma(chan->device);
114 desc = dma_pool_zalloc(atdma->dma_desc_pool, gfp_flags, &phys);
116 INIT_LIST_HEAD(&desc->tx_list);
117 dma_async_tx_descriptor_init(&desc->txd, chan);
118 /* txd.flags will be overwritten in prep functions */
119 desc->txd.flags = DMA_CTRL_ACK;
120 desc->txd.tx_submit = atc_tx_submit;
121 desc->txd.phys = phys;
128 * atc_desc_get - get an unused descriptor from free_list
129 * @atchan: channel we want a new descriptor for
131 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
133 struct at_desc *desc, *_desc;
134 struct at_desc *ret = NULL;
138 spin_lock_irqsave(&atchan->lock, flags);
139 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
141 if (async_tx_test_ack(&desc->txd)) {
142 list_del(&desc->desc_node);
146 dev_dbg(chan2dev(&atchan->chan_common),
147 "desc %p not ACKed\n", desc);
149 spin_unlock_irqrestore(&atchan->lock, flags);
150 dev_vdbg(chan2dev(&atchan->chan_common),
151 "scanned %u descriptors on freelist\n", i);
153 /* no more descriptor available in initial pool: create one more */
155 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
157 spin_lock_irqsave(&atchan->lock, flags);
158 atchan->descs_allocated++;
159 spin_unlock_irqrestore(&atchan->lock, flags);
161 dev_err(chan2dev(&atchan->chan_common),
162 "not enough descriptors available\n");
170 * atc_desc_put - move a descriptor, including any children, to the free list
171 * @atchan: channel we work on
172 * @desc: descriptor, at the head of a chain, to move to free list
174 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
177 struct at_desc *child;
180 spin_lock_irqsave(&atchan->lock, flags);
181 list_for_each_entry(child, &desc->tx_list, desc_node)
182 dev_vdbg(chan2dev(&atchan->chan_common),
183 "moving child desc %p to freelist\n",
185 list_splice_init(&desc->tx_list, &atchan->free_list);
186 dev_vdbg(chan2dev(&atchan->chan_common),
187 "moving desc %p to freelist\n", desc);
188 list_add(&desc->desc_node, &atchan->free_list);
189 spin_unlock_irqrestore(&atchan->lock, flags);
194 * atc_desc_chain - build chain adding a descriptor
195 * @first: address of first descriptor of the chain
196 * @prev: address of previous descriptor of the chain
197 * @desc: descriptor to queue
199 * Called from prep_* functions
201 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
202 struct at_desc *desc)
207 /* inform the HW lli about chaining */
208 (*prev)->lli.dscr = desc->txd.phys;
209 /* insert the link descriptor to the LD ring */
210 list_add_tail(&desc->desc_node,
217 * atc_dostart - starts the DMA engine for real
218 * @atchan: the channel we want to start
219 * @first: first descriptor in the list we want to begin with
221 * Called with atchan->lock held and bh disabled
223 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
225 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
227 /* ASSERT: channel is idle */
228 if (atc_chan_is_enabled(atchan)) {
229 dev_err(chan2dev(&atchan->chan_common),
230 "BUG: Attempted to start non-idle channel\n");
231 dev_err(chan2dev(&atchan->chan_common),
232 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
233 channel_readl(atchan, SADDR),
234 channel_readl(atchan, DADDR),
235 channel_readl(atchan, CTRLA),
236 channel_readl(atchan, CTRLB),
237 channel_readl(atchan, DSCR));
239 /* The tasklet will hopefully advance the queue... */
243 vdbg_dump_regs(atchan);
245 channel_writel(atchan, SADDR, 0);
246 channel_writel(atchan, DADDR, 0);
247 channel_writel(atchan, CTRLA, 0);
248 channel_writel(atchan, CTRLB, 0);
249 channel_writel(atchan, DSCR, first->txd.phys);
250 channel_writel(atchan, SPIP, ATC_SPIP_HOLE(first->src_hole) |
251 ATC_SPIP_BOUNDARY(first->boundary));
252 channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
253 ATC_DPIP_BOUNDARY(first->boundary));
254 dma_writel(atdma, CHER, atchan->mask);
256 vdbg_dump_regs(atchan);
260 * atc_get_desc_by_cookie - get the descriptor of a cookie
261 * @atchan: the DMA channel
262 * @cookie: the cookie to get the descriptor for
264 static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
267 struct at_desc *desc, *_desc;
269 list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
270 if (desc->txd.cookie == cookie)
274 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
275 if (desc->txd.cookie == cookie)
283 * atc_calc_bytes_left - calculates the number of bytes left according to the
284 * value read from CTRLA.
286 * @current_len: the number of bytes left before reading CTRLA
287 * @ctrla: the value of CTRLA
289 static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
291 u32 btsize = (ctrla & ATC_BTSIZE_MAX);
292 u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
295 * According to the datasheet, when reading the Control A Register
296 * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the
297 * number of transfers completed on the Source Interface.
298 * So btsize is always a number of source width transfers.
300 return current_len - (btsize << src_width);
304 * atc_get_bytes_left - get the number of bytes residue for a cookie
306 * @cookie: transaction identifier to check status of
308 static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
310 struct at_dma_chan *atchan = to_at_dma_chan(chan);
311 struct at_desc *desc_first = atc_first_active(atchan);
312 struct at_desc *desc;
314 u32 ctrla, dscr, trials;
317 * If the cookie doesn't match to the currently running transfer then
318 * we can return the total length of the associated DMA transfer,
319 * because it is still queued.
321 desc = atc_get_desc_by_cookie(atchan, cookie);
324 else if (desc != desc_first)
325 return desc->total_len;
327 /* cookie matches to the currently running transfer */
328 ret = desc_first->total_len;
330 if (desc_first->lli.dscr) {
331 /* hardware linked list transfer */
334 * Calculate the residue by removing the length of the child
335 * descriptors already transferred from the total length.
336 * To get the current child descriptor we can use the value of
337 * the channel's DSCR register and compare it against the value
338 * of the hardware linked list structure of each child
341 * The CTRLA register provides us with the amount of data
342 * already read from the source for the current child
343 * descriptor. So we can compute a more accurate residue by also
344 * removing the number of bytes corresponding to this amount of
347 * However, the DSCR and CTRLA registers cannot be read both
348 * atomically. Hence a race condition may occur: the first read
349 * register may refer to one child descriptor whereas the second
350 * read may refer to a later child descriptor in the list
351 * because of the DMA transfer progression inbetween the two
354 * One solution could have been to pause the DMA transfer, read
355 * the DSCR and CTRLA then resume the DMA transfer. Nonetheless,
356 * this approach presents some drawbacks:
357 * - If the DMA transfer is paused, RX overruns or TX underruns
358 * are more likey to occur depending on the system latency.
359 * Taking the USART driver as an example, it uses a cyclic DMA
360 * transfer to read data from the Receive Holding Register
361 * (RHR) to avoid RX overruns since the RHR is not protected
362 * by any FIFO on most Atmel SoCs. So pausing the DMA transfer
363 * to compute the residue would break the USART driver design.
364 * - The atc_pause() function masks interrupts but we'd rather
365 * avoid to do so for system latency purpose.
367 * Then we'd rather use another solution: the DSCR is read a
368 * first time, the CTRLA is read in turn, next the DSCR is read
369 * a second time. If the two consecutive read values of the DSCR
370 * are the same then we assume both refers to the very same
371 * child descriptor as well as the CTRLA value read inbetween
372 * does. For cyclic tranfers, the assumption is that a full loop
374 * If the two DSCR values are different, we read again the CTRLA
375 * then the DSCR till two consecutive read values from DSCR are
376 * equal or till the maxium trials is reach.
377 * This algorithm is very unlikely not to find a stable value for
381 dscr = channel_readl(atchan, DSCR);
382 rmb(); /* ensure DSCR is read before CTRLA */
383 ctrla = channel_readl(atchan, CTRLA);
384 for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
387 rmb(); /* ensure DSCR is read after CTRLA */
388 new_dscr = channel_readl(atchan, DSCR);
391 * If the DSCR register value has not changed inside the
392 * DMA controller since the previous read, we assume
393 * that both the dscr and ctrla values refers to the
394 * very same descriptor.
396 if (likely(new_dscr == dscr))
400 * DSCR has changed inside the DMA controller, so the
401 * previouly read value of CTRLA may refer to an already
402 * processed descriptor hence could be outdated.
403 * We need to update ctrla to match the current
407 rmb(); /* ensure DSCR is read before CTRLA */
408 ctrla = channel_readl(atchan, CTRLA);
410 if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
413 /* for the first descriptor we can be more accurate */
414 if (desc_first->lli.dscr == dscr)
415 return atc_calc_bytes_left(ret, ctrla);
417 ret -= desc_first->len;
418 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
419 if (desc->lli.dscr == dscr)
426 * For the current descriptor in the chain we can calculate
427 * the remaining bytes using the channel's register.
429 ret = atc_calc_bytes_left(ret, ctrla);
431 /* single transfer */
432 ctrla = channel_readl(atchan, CTRLA);
433 ret = atc_calc_bytes_left(ret, ctrla);
440 * atc_chain_complete - finish work for one transaction chain
441 * @atchan: channel we work on
442 * @desc: descriptor at the head of the chain we want do complete
444 * Called with atchan->lock held and bh disabled */
446 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
448 struct dma_async_tx_descriptor *txd = &desc->txd;
449 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
451 dev_vdbg(chan2dev(&atchan->chan_common),
452 "descriptor %u complete\n", txd->cookie);
454 /* mark the descriptor as complete for non cyclic cases only */
455 if (!atc_chan_is_cyclic(atchan))
456 dma_cookie_complete(txd);
458 /* If the transfer was a memset, free our temporary buffer */
459 if (desc->memset_buffer) {
460 dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
462 desc->memset_buffer = false;
465 /* move children to free_list */
466 list_splice_init(&desc->tx_list, &atchan->free_list);
467 /* move myself to free_list */
468 list_move(&desc->desc_node, &atchan->free_list);
470 dma_descriptor_unmap(txd);
471 /* for cyclic transfers,
472 * no need to replay callback function while stopping */
473 if (!atc_chan_is_cyclic(atchan)) {
475 * The API requires that no submissions are done from a
476 * callback, so we don't need to drop the lock here
478 dmaengine_desc_get_callback_invoke(txd, NULL);
481 dma_run_dependencies(txd);
485 * atc_complete_all - finish work for all transactions
486 * @atchan: channel to complete transactions for
488 * Eventually submit queued descriptors if any
490 * Assume channel is idle while calling this function
491 * Called with atchan->lock held and bh disabled
493 static void atc_complete_all(struct at_dma_chan *atchan)
495 struct at_desc *desc, *_desc;
498 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
501 * Submit queued descriptors ASAP, i.e. before we go through
502 * the completed ones.
504 if (!list_empty(&atchan->queue))
505 atc_dostart(atchan, atc_first_queued(atchan));
506 /* empty active_list now it is completed */
507 list_splice_init(&atchan->active_list, &list);
508 /* empty queue list by moving descriptors (if any) to active_list */
509 list_splice_init(&atchan->queue, &atchan->active_list);
511 list_for_each_entry_safe(desc, _desc, &list, desc_node)
512 atc_chain_complete(atchan, desc);
516 * atc_advance_work - at the end of a transaction, move forward
517 * @atchan: channel where the transaction ended
519 * Called with atchan->lock held and bh disabled
521 static void atc_advance_work(struct at_dma_chan *atchan)
523 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
525 if (atc_chan_is_enabled(atchan))
528 if (list_empty(&atchan->active_list) ||
529 list_is_singular(&atchan->active_list)) {
530 atc_complete_all(atchan);
532 atc_chain_complete(atchan, atc_first_active(atchan));
534 atc_dostart(atchan, atc_first_active(atchan));
540 * atc_handle_error - handle errors reported by DMA controller
541 * @atchan: channel where error occurs
543 * Called with atchan->lock held and bh disabled
545 static void atc_handle_error(struct at_dma_chan *atchan)
547 struct at_desc *bad_desc;
548 struct at_desc *child;
551 * The descriptor currently at the head of the active list is
552 * broked. Since we don't have any way to report errors, we'll
553 * just have to scream loudly and try to carry on.
555 bad_desc = atc_first_active(atchan);
556 list_del_init(&bad_desc->desc_node);
558 /* As we are stopped, take advantage to push queued descriptors
560 list_splice_init(&atchan->queue, atchan->active_list.prev);
562 /* Try to restart the controller */
563 if (!list_empty(&atchan->active_list))
564 atc_dostart(atchan, atc_first_active(atchan));
567 * KERN_CRITICAL may seem harsh, but since this only happens
568 * when someone submits a bad physical address in a
569 * descriptor, we should consider ourselves lucky that the
570 * controller flagged an error instead of scribbling over
571 * random memory locations.
573 dev_crit(chan2dev(&atchan->chan_common),
574 "Bad descriptor submitted for DMA!\n");
575 dev_crit(chan2dev(&atchan->chan_common),
576 " cookie: %d\n", bad_desc->txd.cookie);
577 atc_dump_lli(atchan, &bad_desc->lli);
578 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
579 atc_dump_lli(atchan, &child->lli);
581 /* Pretend the descriptor completed successfully */
582 atc_chain_complete(atchan, bad_desc);
586 * atc_handle_cyclic - at the end of a period, run callback function
587 * @atchan: channel used for cyclic operations
589 * Called with atchan->lock held and bh disabled
591 static void atc_handle_cyclic(struct at_dma_chan *atchan)
593 struct at_desc *first = atc_first_active(atchan);
594 struct dma_async_tx_descriptor *txd = &first->txd;
596 dev_vdbg(chan2dev(&atchan->chan_common),
597 "new cyclic period llp 0x%08x\n",
598 channel_readl(atchan, DSCR));
600 dmaengine_desc_get_callback_invoke(txd, NULL);
603 /*-- IRQ & Tasklet ---------------------------------------------------*/
605 static void atc_tasklet(unsigned long data)
607 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
610 spin_lock_irqsave(&atchan->lock, flags);
611 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
612 atc_handle_error(atchan);
613 else if (atc_chan_is_cyclic(atchan))
614 atc_handle_cyclic(atchan);
616 atc_advance_work(atchan);
618 spin_unlock_irqrestore(&atchan->lock, flags);
621 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
623 struct at_dma *atdma = (struct at_dma *)dev_id;
624 struct at_dma_chan *atchan;
626 u32 status, pending, imr;
630 imr = dma_readl(atdma, EBCIMR);
631 status = dma_readl(atdma, EBCISR);
632 pending = status & imr;
637 dev_vdbg(atdma->dma_common.dev,
638 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
639 status, imr, pending);
641 for (i = 0; i < atdma->dma_common.chancnt; i++) {
642 atchan = &atdma->chan[i];
643 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
644 if (pending & AT_DMA_ERR(i)) {
645 /* Disable channel on AHB error */
646 dma_writel(atdma, CHDR,
647 AT_DMA_RES(i) | atchan->mask);
648 /* Give information to tasklet */
649 set_bit(ATC_IS_ERROR, &atchan->status);
651 tasklet_schedule(&atchan->tasklet);
662 /*-- DMA Engine API --------------------------------------------------*/
665 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
666 * @desc: descriptor at the head of the transaction chain
668 * Queue chain if DMA engine is working already
670 * Cookie increment and adding to active_list or queue must be atomic
672 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
674 struct at_desc *desc = txd_to_at_desc(tx);
675 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
679 spin_lock_irqsave(&atchan->lock, flags);
680 cookie = dma_cookie_assign(tx);
682 if (list_empty(&atchan->active_list)) {
683 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
685 atc_dostart(atchan, desc);
686 list_add_tail(&desc->desc_node, &atchan->active_list);
688 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
690 list_add_tail(&desc->desc_node, &atchan->queue);
693 spin_unlock_irqrestore(&atchan->lock, flags);
699 * atc_prep_dma_interleaved - prepare memory to memory interleaved operation
700 * @chan: the channel to prepare operation on
701 * @xt: Interleaved transfer template
702 * @flags: tx descriptor status flags
704 static struct dma_async_tx_descriptor *
705 atc_prep_dma_interleaved(struct dma_chan *chan,
706 struct dma_interleaved_template *xt,
709 struct at_dma_chan *atchan = to_at_dma_chan(chan);
710 struct data_chunk *first;
711 struct at_desc *desc = NULL;
719 if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
724 dev_info(chan2dev(chan),
725 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
726 __func__, &xt->src_start, &xt->dst_start, xt->numf,
727 xt->frame_size, flags);
730 * The controller can only "skip" X bytes every Y bytes, so we
731 * need to make sure we are given a template that fit that
732 * description, ie a template with chunks that always have the
733 * same size, with the same ICGs.
735 for (i = 0; i < xt->frame_size; i++) {
736 struct data_chunk *chunk = xt->sgl + i;
738 if ((chunk->size != xt->sgl->size) ||
739 (dmaengine_get_dst_icg(xt, chunk) != dmaengine_get_dst_icg(xt, first)) ||
740 (dmaengine_get_src_icg(xt, chunk) != dmaengine_get_src_icg(xt, first))) {
741 dev_err(chan2dev(chan),
742 "%s: the controller can transfer only identical chunks\n",
750 dwidth = atc_get_xfer_width(xt->src_start,
753 xfer_count = len >> dwidth;
754 if (xfer_count > ATC_BTSIZE_MAX) {
755 dev_err(chan2dev(chan), "%s: buffer is too big\n", __func__);
759 ctrla = ATC_SRC_WIDTH(dwidth) |
760 ATC_DST_WIDTH(dwidth);
762 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
763 | ATC_SRC_ADDR_MODE_INCR
764 | ATC_DST_ADDR_MODE_INCR
769 /* create the transfer */
770 desc = atc_desc_get(atchan);
772 dev_err(chan2dev(chan),
773 "%s: couldn't allocate our descriptor\n", __func__);
777 desc->lli.saddr = xt->src_start;
778 desc->lli.daddr = xt->dst_start;
779 desc->lli.ctrla = ctrla | xfer_count;
780 desc->lli.ctrlb = ctrlb;
782 desc->boundary = first->size >> dwidth;
783 desc->dst_hole = (dmaengine_get_dst_icg(xt, first) >> dwidth) + 1;
784 desc->src_hole = (dmaengine_get_src_icg(xt, first) >> dwidth) + 1;
786 desc->txd.cookie = -EBUSY;
787 desc->total_len = desc->len = len;
789 /* set end-of-link to the last link descriptor of list*/
792 desc->txd.flags = flags; /* client is in control of this ack */
798 * atc_prep_dma_memcpy - prepare a memcpy operation
799 * @chan: the channel to prepare operation on
800 * @dest: operation virtual destination address
801 * @src: operation virtual source address
802 * @len: operation length
803 * @flags: tx descriptor status flags
805 static struct dma_async_tx_descriptor *
806 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
807 size_t len, unsigned long flags)
809 struct at_dma_chan *atchan = to_at_dma_chan(chan);
810 struct at_desc *desc = NULL;
811 struct at_desc *first = NULL;
812 struct at_desc *prev = NULL;
815 unsigned int src_width;
816 unsigned int dst_width;
820 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
821 &dest, &src, len, flags);
823 if (unlikely(!len)) {
824 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
828 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
829 | ATC_SRC_ADDR_MODE_INCR
830 | ATC_DST_ADDR_MODE_INCR
834 * We can be a lot more clever here, but this should take care
835 * of the most common optimization.
837 src_width = dst_width = atc_get_xfer_width(src, dest, len);
839 ctrla = ATC_SRC_WIDTH(src_width) |
840 ATC_DST_WIDTH(dst_width);
842 for (offset = 0; offset < len; offset += xfer_count << src_width) {
843 xfer_count = min_t(size_t, (len - offset) >> src_width,
846 desc = atc_desc_get(atchan);
850 desc->lli.saddr = src + offset;
851 desc->lli.daddr = dest + offset;
852 desc->lli.ctrla = ctrla | xfer_count;
853 desc->lli.ctrlb = ctrlb;
855 desc->txd.cookie = 0;
856 desc->len = xfer_count << src_width;
858 atc_desc_chain(&first, &prev, desc);
861 /* First descriptor of the chain embedds additional information */
862 first->txd.cookie = -EBUSY;
863 first->total_len = len;
865 /* set end-of-link to the last link descriptor of list*/
868 first->txd.flags = flags; /* client is in control of this ack */
873 atc_desc_put(atchan, first);
877 static struct at_desc *atc_create_memset_desc(struct dma_chan *chan,
882 struct at_dma_chan *atchan = to_at_dma_chan(chan);
883 struct at_desc *desc;
886 u32 ctrla = ATC_SRC_WIDTH(2) | ATC_DST_WIDTH(2);
887 u32 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
888 ATC_SRC_ADDR_MODE_FIXED |
889 ATC_DST_ADDR_MODE_INCR |
892 xfer_count = len >> 2;
893 if (xfer_count > ATC_BTSIZE_MAX) {
894 dev_err(chan2dev(chan), "%s: buffer is too big\n",
899 desc = atc_desc_get(atchan);
901 dev_err(chan2dev(chan), "%s: can't get a descriptor\n",
906 desc->lli.saddr = psrc;
907 desc->lli.daddr = pdst;
908 desc->lli.ctrla = ctrla | xfer_count;
909 desc->lli.ctrlb = ctrlb;
911 desc->txd.cookie = 0;
918 * atc_prep_dma_memset - prepare a memcpy operation
919 * @chan: the channel to prepare operation on
920 * @dest: operation virtual destination address
921 * @value: value to set memory buffer to
922 * @len: operation length
923 * @flags: tx descriptor status flags
925 static struct dma_async_tx_descriptor *
926 atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
927 size_t len, unsigned long flags)
929 struct at_dma *atdma = to_at_dma(chan->device);
930 struct at_desc *desc;
934 dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
935 &dest, value, len, flags);
937 if (unlikely(!len)) {
938 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
942 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
943 dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n",
948 vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
950 dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
954 *(u32*)vaddr = value;
956 desc = atc_create_memset_desc(chan, paddr, dest, len);
958 dev_err(chan2dev(chan), "%s: couldn't get a descriptor\n",
960 goto err_free_buffer;
963 desc->memset_paddr = paddr;
964 desc->memset_vaddr = vaddr;
965 desc->memset_buffer = true;
967 desc->txd.cookie = -EBUSY;
968 desc->total_len = len;
970 /* set end-of-link on the descriptor */
973 desc->txd.flags = flags;
978 dma_pool_free(atdma->memset_pool, vaddr, paddr);
982 static struct dma_async_tx_descriptor *
983 atc_prep_dma_memset_sg(struct dma_chan *chan,
984 struct scatterlist *sgl,
985 unsigned int sg_len, int value,
988 struct at_dma_chan *atchan = to_at_dma_chan(chan);
989 struct at_dma *atdma = to_at_dma(chan->device);
990 struct at_desc *desc = NULL, *first = NULL, *prev = NULL;
991 struct scatterlist *sg;
994 size_t total_len = 0;
997 dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__,
998 value, sg_len, flags);
1000 if (unlikely(!sgl || !sg_len)) {
1001 dev_dbg(chan2dev(chan), "%s: scatterlist is empty!\n",
1006 vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
1008 dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
1012 *(u32*)vaddr = value;
1014 for_each_sg(sgl, sg, sg_len, i) {
1015 dma_addr_t dest = sg_dma_address(sg);
1016 size_t len = sg_dma_len(sg);
1018 dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
1019 __func__, &dest, len);
1021 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
1022 dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
1027 desc = atc_create_memset_desc(chan, paddr, dest, len);
1031 atc_desc_chain(&first, &prev, desc);
1037 * Only set the buffer pointers on the last descriptor to
1038 * avoid free'ing while we have our transfer still going
1040 desc->memset_paddr = paddr;
1041 desc->memset_vaddr = vaddr;
1042 desc->memset_buffer = true;
1044 first->txd.cookie = -EBUSY;
1045 first->total_len = total_len;
1047 /* set end-of-link on the descriptor */
1050 first->txd.flags = flags;
1055 atc_desc_put(atchan, first);
1060 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1061 * @chan: DMA channel
1062 * @sgl: scatterlist to transfer to/from
1063 * @sg_len: number of entries in @scatterlist
1064 * @direction: DMA direction
1065 * @flags: tx descriptor status flags
1066 * @context: transaction context (ignored)
1068 static struct dma_async_tx_descriptor *
1069 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1070 unsigned int sg_len, enum dma_transfer_direction direction,
1071 unsigned long flags, void *context)
1073 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1074 struct at_dma_slave *atslave = chan->private;
1075 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1076 struct at_desc *first = NULL;
1077 struct at_desc *prev = NULL;
1081 unsigned int reg_width;
1082 unsigned int mem_width;
1084 struct scatterlist *sg;
1085 size_t total_len = 0;
1087 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
1089 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1092 if (unlikely(!atslave || !sg_len)) {
1093 dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
1097 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
1098 | ATC_DCSIZE(sconfig->dst_maxburst);
1101 switch (direction) {
1102 case DMA_MEM_TO_DEV:
1103 reg_width = convert_buswidth(sconfig->dst_addr_width);
1104 ctrla |= ATC_DST_WIDTH(reg_width);
1105 ctrlb |= ATC_DST_ADDR_MODE_FIXED
1106 | ATC_SRC_ADDR_MODE_INCR
1108 | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
1109 reg = sconfig->dst_addr;
1110 for_each_sg(sgl, sg, sg_len, i) {
1111 struct at_desc *desc;
1115 desc = atc_desc_get(atchan);
1119 mem = sg_dma_address(sg);
1120 len = sg_dma_len(sg);
1121 if (unlikely(!len)) {
1122 dev_dbg(chan2dev(chan),
1123 "prep_slave_sg: sg(%d) data length is zero\n", i);
1127 if (unlikely(mem & 3 || len & 3))
1130 desc->lli.saddr = mem;
1131 desc->lli.daddr = reg;
1132 desc->lli.ctrla = ctrla
1133 | ATC_SRC_WIDTH(mem_width)
1135 desc->lli.ctrlb = ctrlb;
1138 atc_desc_chain(&first, &prev, desc);
1142 case DMA_DEV_TO_MEM:
1143 reg_width = convert_buswidth(sconfig->src_addr_width);
1144 ctrla |= ATC_SRC_WIDTH(reg_width);
1145 ctrlb |= ATC_DST_ADDR_MODE_INCR
1146 | ATC_SRC_ADDR_MODE_FIXED
1148 | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
1150 reg = sconfig->src_addr;
1151 for_each_sg(sgl, sg, sg_len, i) {
1152 struct at_desc *desc;
1156 desc = atc_desc_get(atchan);
1160 mem = sg_dma_address(sg);
1161 len = sg_dma_len(sg);
1162 if (unlikely(!len)) {
1163 dev_dbg(chan2dev(chan),
1164 "prep_slave_sg: sg(%d) data length is zero\n", i);
1168 if (unlikely(mem & 3 || len & 3))
1171 desc->lli.saddr = reg;
1172 desc->lli.daddr = mem;
1173 desc->lli.ctrla = ctrla
1174 | ATC_DST_WIDTH(mem_width)
1176 desc->lli.ctrlb = ctrlb;
1179 atc_desc_chain(&first, &prev, desc);
1187 /* set end-of-link to the last link descriptor of list*/
1190 /* First descriptor of the chain embedds additional information */
1191 first->txd.cookie = -EBUSY;
1192 first->total_len = total_len;
1194 /* first link descriptor of list is responsible of flags */
1195 first->txd.flags = flags; /* client is in control of this ack */
1200 dev_err(chan2dev(chan), "not enough descriptors available\n");
1202 atc_desc_put(atchan, first);
1207 * atc_dma_cyclic_check_values
1208 * Check for too big/unaligned periods and unaligned DMA buffer
1211 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
1214 if (period_len > (ATC_BTSIZE_MAX << reg_width))
1216 if (unlikely(period_len & ((1 << reg_width) - 1)))
1218 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1228 * atc_dma_cyclic_fill_desc - Fill one period descriptor
1231 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
1232 unsigned int period_index, dma_addr_t buf_addr,
1233 unsigned int reg_width, size_t period_len,
1234 enum dma_transfer_direction direction)
1236 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1237 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1240 /* prepare common CRTLA value */
1241 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
1242 | ATC_DCSIZE(sconfig->dst_maxburst)
1243 | ATC_DST_WIDTH(reg_width)
1244 | ATC_SRC_WIDTH(reg_width)
1245 | period_len >> reg_width;
1247 switch (direction) {
1248 case DMA_MEM_TO_DEV:
1249 desc->lli.saddr = buf_addr + (period_len * period_index);
1250 desc->lli.daddr = sconfig->dst_addr;
1251 desc->lli.ctrla = ctrla;
1252 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
1253 | ATC_SRC_ADDR_MODE_INCR
1255 | ATC_SIF(atchan->mem_if)
1256 | ATC_DIF(atchan->per_if);
1257 desc->len = period_len;
1260 case DMA_DEV_TO_MEM:
1261 desc->lli.saddr = sconfig->src_addr;
1262 desc->lli.daddr = buf_addr + (period_len * period_index);
1263 desc->lli.ctrla = ctrla;
1264 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
1265 | ATC_SRC_ADDR_MODE_FIXED
1267 | ATC_SIF(atchan->per_if)
1268 | ATC_DIF(atchan->mem_if);
1269 desc->len = period_len;
1280 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
1281 * @chan: the DMA channel to prepare
1282 * @buf_addr: physical DMA address where the buffer starts
1283 * @buf_len: total number of bytes for the entire buffer
1284 * @period_len: number of bytes for each period
1285 * @direction: transfer direction, to or from device
1286 * @flags: tx descriptor status flags
1288 static struct dma_async_tx_descriptor *
1289 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1290 size_t period_len, enum dma_transfer_direction direction,
1291 unsigned long flags)
1293 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1294 struct at_dma_slave *atslave = chan->private;
1295 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1296 struct at_desc *first = NULL;
1297 struct at_desc *prev = NULL;
1298 unsigned long was_cyclic;
1299 unsigned int reg_width;
1300 unsigned int periods = buf_len / period_len;
1303 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
1304 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1306 periods, buf_len, period_len);
1308 if (unlikely(!atslave || !buf_len || !period_len)) {
1309 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
1313 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
1315 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
1319 if (unlikely(!is_slave_direction(direction)))
1322 if (direction == DMA_MEM_TO_DEV)
1323 reg_width = convert_buswidth(sconfig->dst_addr_width);
1325 reg_width = convert_buswidth(sconfig->src_addr_width);
1327 /* Check for too big/unaligned periods and unaligned DMA buffer */
1328 if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
1331 /* build cyclic linked list */
1332 for (i = 0; i < periods; i++) {
1333 struct at_desc *desc;
1335 desc = atc_desc_get(atchan);
1339 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
1340 reg_width, period_len, direction))
1343 atc_desc_chain(&first, &prev, desc);
1346 /* lets make a cyclic list */
1347 prev->lli.dscr = first->txd.phys;
1349 /* First descriptor of the chain embedds additional information */
1350 first->txd.cookie = -EBUSY;
1351 first->total_len = buf_len;
1356 dev_err(chan2dev(chan), "not enough descriptors available\n");
1357 atc_desc_put(atchan, first);
1359 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1363 static int atc_config(struct dma_chan *chan,
1364 struct dma_slave_config *sconfig)
1366 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1368 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1370 /* Check if it is chan is configured for slave transfers */
1374 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
1376 convert_burst(&atchan->dma_sconfig.src_maxburst);
1377 convert_burst(&atchan->dma_sconfig.dst_maxburst);
1382 static int atc_pause(struct dma_chan *chan)
1384 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1385 struct at_dma *atdma = to_at_dma(chan->device);
1386 int chan_id = atchan->chan_common.chan_id;
1387 unsigned long flags;
1389 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1391 spin_lock_irqsave(&atchan->lock, flags);
1393 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
1394 set_bit(ATC_IS_PAUSED, &atchan->status);
1396 spin_unlock_irqrestore(&atchan->lock, flags);
1401 static int atc_resume(struct dma_chan *chan)
1403 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1404 struct at_dma *atdma = to_at_dma(chan->device);
1405 int chan_id = atchan->chan_common.chan_id;
1406 unsigned long flags;
1408 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1410 if (!atc_chan_is_paused(atchan))
1413 spin_lock_irqsave(&atchan->lock, flags);
1415 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
1416 clear_bit(ATC_IS_PAUSED, &atchan->status);
1418 spin_unlock_irqrestore(&atchan->lock, flags);
1423 static int atc_terminate_all(struct dma_chan *chan)
1425 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1426 struct at_dma *atdma = to_at_dma(chan->device);
1427 int chan_id = atchan->chan_common.chan_id;
1428 struct at_desc *desc, *_desc;
1429 unsigned long flags;
1433 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1436 * This is only called when something went wrong elsewhere, so
1437 * we don't really care about the data. Just disable the
1438 * channel. We still have to poll the channel enable bit due
1439 * to AHB/HSB limitations.
1441 spin_lock_irqsave(&atchan->lock, flags);
1443 /* disabling channel: must also remove suspend state */
1444 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1446 /* confirm that this channel is disabled */
1447 while (dma_readl(atdma, CHSR) & atchan->mask)
1450 /* active_list entries will end up before queued entries */
1451 list_splice_init(&atchan->queue, &list);
1452 list_splice_init(&atchan->active_list, &list);
1454 /* Flush all pending and queued descriptors */
1455 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1456 atc_chain_complete(atchan, desc);
1458 clear_bit(ATC_IS_PAUSED, &atchan->status);
1459 /* if channel dedicated to cyclic operations, free it */
1460 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1462 spin_unlock_irqrestore(&atchan->lock, flags);
1468 * atc_tx_status - poll for transaction completion
1469 * @chan: DMA channel
1470 * @cookie: transaction identifier to check status of
1471 * @txstate: if not %NULL updated with transaction state
1473 * If @txstate is passed in, upon return it reflect the driver
1474 * internal state and can be used with dma_async_is_complete() to check
1475 * the status of multiple cookies without re-checking hardware state.
1477 static enum dma_status
1478 atc_tx_status(struct dma_chan *chan,
1479 dma_cookie_t cookie,
1480 struct dma_tx_state *txstate)
1482 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1483 unsigned long flags;
1484 enum dma_status ret;
1487 ret = dma_cookie_status(chan, cookie, txstate);
1488 if (ret == DMA_COMPLETE)
1491 * There's no point calculating the residue if there's
1492 * no txstate to store the value.
1497 spin_lock_irqsave(&atchan->lock, flags);
1499 /* Get number of bytes left in the active transactions */
1500 bytes = atc_get_bytes_left(chan, cookie);
1502 spin_unlock_irqrestore(&atchan->lock, flags);
1504 if (unlikely(bytes < 0)) {
1505 dev_vdbg(chan2dev(chan), "get residual bytes error\n");
1508 dma_set_residue(txstate, bytes);
1511 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
1512 ret, cookie, bytes);
1518 * atc_issue_pending - try to finish work
1519 * @chan: target DMA channel
1521 static void atc_issue_pending(struct dma_chan *chan)
1523 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1524 unsigned long flags;
1526 dev_vdbg(chan2dev(chan), "issue_pending\n");
1528 /* Not needed for cyclic transfers */
1529 if (atc_chan_is_cyclic(atchan))
1532 spin_lock_irqsave(&atchan->lock, flags);
1533 atc_advance_work(atchan);
1534 spin_unlock_irqrestore(&atchan->lock, flags);
1538 * atc_alloc_chan_resources - allocate resources for DMA channel
1539 * @chan: allocate descriptor resources for this channel
1540 * @client: current client requesting the channel be ready for requests
1542 * return - the number of allocated descriptors
1544 static int atc_alloc_chan_resources(struct dma_chan *chan)
1546 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1547 struct at_dma *atdma = to_at_dma(chan->device);
1548 struct at_desc *desc;
1549 struct at_dma_slave *atslave;
1550 unsigned long flags;
1553 LIST_HEAD(tmp_list);
1555 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1557 /* ASSERT: channel is idle */
1558 if (atc_chan_is_enabled(atchan)) {
1559 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1563 cfg = ATC_DEFAULT_CFG;
1565 atslave = chan->private;
1568 * We need controller-specific data to set up slave
1571 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1573 /* if cfg configuration specified take it instead of default */
1578 /* have we already been set up?
1579 * reconfigure channel but no need to reallocate descriptors */
1580 if (!list_empty(&atchan->free_list))
1581 return atchan->descs_allocated;
1583 /* Allocate initial pool of descriptors */
1584 for (i = 0; i < init_nr_desc_per_channel; i++) {
1585 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1587 dev_err(atdma->dma_common.dev,
1588 "Only %d initial descriptors\n", i);
1591 list_add_tail(&desc->desc_node, &tmp_list);
1594 spin_lock_irqsave(&atchan->lock, flags);
1595 atchan->descs_allocated = i;
1596 list_splice(&tmp_list, &atchan->free_list);
1597 dma_cookie_init(chan);
1598 spin_unlock_irqrestore(&atchan->lock, flags);
1600 /* channel parameters */
1601 channel_writel(atchan, CFG, cfg);
1603 dev_dbg(chan2dev(chan),
1604 "alloc_chan_resources: allocated %d descriptors\n",
1605 atchan->descs_allocated);
1607 return atchan->descs_allocated;
1611 * atc_free_chan_resources - free all channel resources
1612 * @chan: DMA channel
1614 static void atc_free_chan_resources(struct dma_chan *chan)
1616 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1617 struct at_dma *atdma = to_at_dma(chan->device);
1618 struct at_desc *desc, *_desc;
1621 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1622 atchan->descs_allocated);
1624 /* ASSERT: channel is idle */
1625 BUG_ON(!list_empty(&atchan->active_list));
1626 BUG_ON(!list_empty(&atchan->queue));
1627 BUG_ON(atc_chan_is_enabled(atchan));
1629 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1630 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1631 list_del(&desc->desc_node);
1632 /* free link descriptor */
1633 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1635 list_splice_init(&atchan->free_list, &list);
1636 atchan->descs_allocated = 0;
1640 * Free atslave allocated in at_dma_xlate()
1642 kfree(chan->private);
1643 chan->private = NULL;
1645 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1649 static bool at_dma_filter(struct dma_chan *chan, void *slave)
1651 struct at_dma_slave *atslave = slave;
1653 if (atslave->dma_dev == chan->device->dev) {
1654 chan->private = atslave;
1661 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1662 struct of_dma *of_dma)
1664 struct dma_chan *chan;
1665 struct at_dma_chan *atchan;
1666 struct at_dma_slave *atslave;
1667 dma_cap_mask_t mask;
1668 unsigned int per_id;
1669 struct platform_device *dmac_pdev;
1671 if (dma_spec->args_count != 2)
1674 dmac_pdev = of_find_device_by_node(dma_spec->np);
1677 dma_cap_set(DMA_SLAVE, mask);
1679 atslave = kzalloc(sizeof(*atslave), GFP_KERNEL);
1683 atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
1685 * We can fill both SRC_PER and DST_PER, one of these fields will be
1686 * ignored depending on DMA transfer direction.
1688 per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
1689 atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
1690 | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
1692 * We have to translate the value we get from the device tree since
1693 * the half FIFO configuration value had to be 0 to keep backward
1696 switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
1697 case AT91_DMA_CFG_FIFOCFG_ALAP:
1698 atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
1700 case AT91_DMA_CFG_FIFOCFG_ASAP:
1701 atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
1703 case AT91_DMA_CFG_FIFOCFG_HALF:
1705 atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
1707 atslave->dma_dev = &dmac_pdev->dev;
1709 chan = dma_request_channel(mask, at_dma_filter, atslave);
1713 atchan = to_at_dma_chan(chan);
1714 atchan->per_if = dma_spec->args[0] & 0xff;
1715 atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1720 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1721 struct of_dma *of_dma)
1727 /*-- Module Management -----------------------------------------------*/
1729 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1730 static struct at_dma_platform_data at91sam9rl_config = {
1733 static struct at_dma_platform_data at91sam9g45_config = {
1737 #if defined(CONFIG_OF)
1738 static const struct of_device_id atmel_dma_dt_ids[] = {
1740 .compatible = "atmel,at91sam9rl-dma",
1741 .data = &at91sam9rl_config,
1743 .compatible = "atmel,at91sam9g45-dma",
1744 .data = &at91sam9g45_config,
1750 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1753 static const struct platform_device_id atdma_devtypes[] = {
1755 .name = "at91sam9rl_dma",
1756 .driver_data = (unsigned long) &at91sam9rl_config,
1758 .name = "at91sam9g45_dma",
1759 .driver_data = (unsigned long) &at91sam9g45_config,
1765 static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1766 struct platform_device *pdev)
1768 if (pdev->dev.of_node) {
1769 const struct of_device_id *match;
1770 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1775 return (struct at_dma_platform_data *)
1776 platform_get_device_id(pdev)->driver_data;
1780 * at_dma_off - disable DMA controller
1781 * @atdma: the Atmel HDAMC device
1783 static void at_dma_off(struct at_dma *atdma)
1785 dma_writel(atdma, EN, 0);
1787 /* disable all interrupts */
1788 dma_writel(atdma, EBCIDR, -1L);
1790 /* confirm that all channels are disabled */
1791 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1795 static int __init at_dma_probe(struct platform_device *pdev)
1797 struct resource *io;
1798 struct at_dma *atdma;
1803 const struct at_dma_platform_data *plat_dat;
1805 /* setup platform data for each SoC */
1806 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1807 dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
1808 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1809 dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
1810 dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
1811 dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
1812 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1814 /* get DMA parameters from controller type */
1815 plat_dat = at_dma_get_driver_data(pdev);
1819 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1823 irq = platform_get_irq(pdev, 0);
1827 size = sizeof(struct at_dma);
1828 size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1829 atdma = kzalloc(size, GFP_KERNEL);
1833 /* discover transaction capabilities */
1834 atdma->dma_common.cap_mask = plat_dat->cap_mask;
1835 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1837 size = resource_size(io);
1838 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1843 atdma->regs = ioremap(io->start, size);
1849 atdma->clk = clk_get(&pdev->dev, "dma_clk");
1850 if (IS_ERR(atdma->clk)) {
1851 err = PTR_ERR(atdma->clk);
1854 err = clk_prepare_enable(atdma->clk);
1856 goto err_clk_prepare;
1858 /* force dma off, just in case */
1861 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1865 platform_set_drvdata(pdev, atdma);
1867 /* create a pool of consistent memory blocks for hardware descriptors */
1868 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1869 &pdev->dev, sizeof(struct at_desc),
1870 4 /* word alignment */, 0);
1871 if (!atdma->dma_desc_pool) {
1872 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1874 goto err_desc_pool_create;
1877 /* create a pool of consistent memory blocks for memset blocks */
1878 atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool",
1879 &pdev->dev, sizeof(int), 4, 0);
1880 if (!atdma->memset_pool) {
1881 dev_err(&pdev->dev, "No memory for memset dma pool\n");
1883 goto err_memset_pool_create;
1886 /* clear any pending interrupt */
1887 while (dma_readl(atdma, EBCISR))
1890 /* initialize channels related values */
1891 INIT_LIST_HEAD(&atdma->dma_common.channels);
1892 for (i = 0; i < plat_dat->nr_channels; i++) {
1893 struct at_dma_chan *atchan = &atdma->chan[i];
1895 atchan->mem_if = AT_DMA_MEM_IF;
1896 atchan->per_if = AT_DMA_PER_IF;
1897 atchan->chan_common.device = &atdma->dma_common;
1898 dma_cookie_init(&atchan->chan_common);
1899 list_add_tail(&atchan->chan_common.device_node,
1900 &atdma->dma_common.channels);
1902 atchan->ch_regs = atdma->regs + ch_regs(i);
1903 spin_lock_init(&atchan->lock);
1904 atchan->mask = 1 << i;
1906 INIT_LIST_HEAD(&atchan->active_list);
1907 INIT_LIST_HEAD(&atchan->queue);
1908 INIT_LIST_HEAD(&atchan->free_list);
1910 tasklet_init(&atchan->tasklet, atc_tasklet,
1911 (unsigned long)atchan);
1912 atc_enable_chan_irq(atdma, i);
1915 /* set base routines */
1916 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1917 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1918 atdma->dma_common.device_tx_status = atc_tx_status;
1919 atdma->dma_common.device_issue_pending = atc_issue_pending;
1920 atdma->dma_common.dev = &pdev->dev;
1922 /* set prep routines based on capability */
1923 if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_common.cap_mask))
1924 atdma->dma_common.device_prep_interleaved_dma = atc_prep_dma_interleaved;
1926 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1927 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1929 if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) {
1930 atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset;
1931 atdma->dma_common.device_prep_dma_memset_sg = atc_prep_dma_memset_sg;
1932 atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES;
1935 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1936 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1937 /* controller can do slave DMA: can trigger cyclic transfers */
1938 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1939 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1940 atdma->dma_common.device_config = atc_config;
1941 atdma->dma_common.device_pause = atc_pause;
1942 atdma->dma_common.device_resume = atc_resume;
1943 atdma->dma_common.device_terminate_all = atc_terminate_all;
1944 atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
1945 atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
1946 atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1947 atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1950 dma_writel(atdma, EN, AT_DMA_ENABLE);
1952 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
1953 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1954 dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
1955 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
1956 plat_dat->nr_channels);
1958 dma_async_device_register(&atdma->dma_common);
1961 * Do not return an error if the dmac node is not present in order to
1962 * not break the existing way of requesting channel with
1963 * dma_request_channel().
1965 if (pdev->dev.of_node) {
1966 err = of_dma_controller_register(pdev->dev.of_node,
1967 at_dma_xlate, atdma);
1969 dev_err(&pdev->dev, "could not register of_dma_controller\n");
1970 goto err_of_dma_controller_register;
1976 err_of_dma_controller_register:
1977 dma_async_device_unregister(&atdma->dma_common);
1978 dma_pool_destroy(atdma->memset_pool);
1979 err_memset_pool_create:
1980 dma_pool_destroy(atdma->dma_desc_pool);
1981 err_desc_pool_create:
1982 free_irq(platform_get_irq(pdev, 0), atdma);
1984 clk_disable_unprepare(atdma->clk);
1986 clk_put(atdma->clk);
1988 iounmap(atdma->regs);
1991 release_mem_region(io->start, size);
1997 static int at_dma_remove(struct platform_device *pdev)
1999 struct at_dma *atdma = platform_get_drvdata(pdev);
2000 struct dma_chan *chan, *_chan;
2001 struct resource *io;
2004 if (pdev->dev.of_node)
2005 of_dma_controller_free(pdev->dev.of_node);
2006 dma_async_device_unregister(&atdma->dma_common);
2008 dma_pool_destroy(atdma->memset_pool);
2009 dma_pool_destroy(atdma->dma_desc_pool);
2010 free_irq(platform_get_irq(pdev, 0), atdma);
2012 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2014 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2016 /* Disable interrupts */
2017 atc_disable_chan_irq(atdma, chan->chan_id);
2019 tasklet_kill(&atchan->tasklet);
2020 list_del(&chan->device_node);
2023 clk_disable_unprepare(atdma->clk);
2024 clk_put(atdma->clk);
2026 iounmap(atdma->regs);
2029 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2030 release_mem_region(io->start, resource_size(io));
2037 static void at_dma_shutdown(struct platform_device *pdev)
2039 struct at_dma *atdma = platform_get_drvdata(pdev);
2041 at_dma_off(platform_get_drvdata(pdev));
2042 clk_disable_unprepare(atdma->clk);
2045 static int at_dma_prepare(struct device *dev)
2047 struct at_dma *atdma = dev_get_drvdata(dev);
2048 struct dma_chan *chan, *_chan;
2050 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2052 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2053 /* wait for transaction completion (except in cyclic case) */
2054 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
2060 static void atc_suspend_cyclic(struct at_dma_chan *atchan)
2062 struct dma_chan *chan = &atchan->chan_common;
2064 /* Channel should be paused by user
2065 * do it anyway even if it is not done already */
2066 if (!atc_chan_is_paused(atchan)) {
2067 dev_warn(chan2dev(chan),
2068 "cyclic channel not paused, should be done by channel user\n");
2072 /* now preserve additional data for cyclic operations */
2073 /* next descriptor address in the cyclic list */
2074 atchan->save_dscr = channel_readl(atchan, DSCR);
2076 vdbg_dump_regs(atchan);
2079 static int at_dma_suspend_noirq(struct device *dev)
2081 struct at_dma *atdma = dev_get_drvdata(dev);
2082 struct dma_chan *chan, *_chan;
2085 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2087 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2089 if (atc_chan_is_cyclic(atchan))
2090 atc_suspend_cyclic(atchan);
2091 atchan->save_cfg = channel_readl(atchan, CFG);
2093 atdma->save_imr = dma_readl(atdma, EBCIMR);
2095 /* disable DMA controller */
2097 clk_disable_unprepare(atdma->clk);
2101 static void atc_resume_cyclic(struct at_dma_chan *atchan)
2103 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
2105 /* restore channel status for cyclic descriptors list:
2106 * next descriptor in the cyclic list at the time of suspend */
2107 channel_writel(atchan, SADDR, 0);
2108 channel_writel(atchan, DADDR, 0);
2109 channel_writel(atchan, CTRLA, 0);
2110 channel_writel(atchan, CTRLB, 0);
2111 channel_writel(atchan, DSCR, atchan->save_dscr);
2112 dma_writel(atdma, CHER, atchan->mask);
2114 /* channel pause status should be removed by channel user
2115 * We cannot take the initiative to do it here */
2117 vdbg_dump_regs(atchan);
2120 static int at_dma_resume_noirq(struct device *dev)
2122 struct at_dma *atdma = dev_get_drvdata(dev);
2123 struct dma_chan *chan, *_chan;
2125 /* bring back DMA controller */
2126 clk_prepare_enable(atdma->clk);
2127 dma_writel(atdma, EN, AT_DMA_ENABLE);
2129 /* clear any pending interrupt */
2130 while (dma_readl(atdma, EBCISR))
2133 /* restore saved data */
2134 dma_writel(atdma, EBCIER, atdma->save_imr);
2135 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2137 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2139 channel_writel(atchan, CFG, atchan->save_cfg);
2140 if (atc_chan_is_cyclic(atchan))
2141 atc_resume_cyclic(atchan);
2146 static const struct dev_pm_ops at_dma_dev_pm_ops = {
2147 .prepare = at_dma_prepare,
2148 .suspend_noirq = at_dma_suspend_noirq,
2149 .resume_noirq = at_dma_resume_noirq,
2152 static struct platform_driver at_dma_driver = {
2153 .remove = at_dma_remove,
2154 .shutdown = at_dma_shutdown,
2155 .id_table = atdma_devtypes,
2158 .pm = &at_dma_dev_pm_ops,
2159 .of_match_table = of_match_ptr(atmel_dma_dt_ids),
2163 static int __init at_dma_init(void)
2165 return platform_driver_probe(&at_dma_driver, at_dma_probe);
2167 subsys_initcall(at_dma_init);
2169 static void __exit at_dma_exit(void)
2171 platform_driver_unregister(&at_dma_driver);
2173 module_exit(at_dma_exit);
2175 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
2176 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
2177 MODULE_LICENSE("GPL");
2178 MODULE_ALIAS("platform:at_hdmac");