OSDN Git Service

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
[android-x86/kernel.git] / drivers / dma / shdma.c
1 /*
2  * Renesas SuperH DMA Engine support
3  *
4  * base is drivers/dma/flsdma.c
5  *
6  * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7  * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8  * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
9  *
10  * This is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * - DMA of SuperH does not have Hardware DMA chain mode.
16  * - MAX DMA size is 16MB.
17  *
18  */
19
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/interrupt.h>
23 #include <linux/dmaengine.h>
24 #include <linux/delay.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/dmapool.h>
27 #include <linux/platform_device.h>
28 #include <cpu/dma.h>
29 #include <asm/dma-sh.h>
30 #include "shdma.h"
31
32 /* DMA descriptor control */
33 #define DESC_LAST       (-1)
34 #define DESC_COMP       (1)
35 #define DESC_NCOMP      (0)
36
37 #define NR_DESCS_PER_CHANNEL 32
38 /*
39  * Define the default configuration for dual address memory-memory transfer.
40  * The 0x400 value represents auto-request, external->external.
41  *
42  * And this driver set 4byte burst mode.
43  * If you want to change mode, you need to change RS_DEFAULT of value.
44  * (ex 1byte burst mode -> (RS_DUAL & ~TS_32)
45  */
46 #define RS_DEFAULT  (RS_DUAL)
47
48 #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
49 static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
50 {
51         ctrl_outl(data, (SH_DMAC_CHAN_BASE(sh_dc->id) + reg));
52 }
53
54 static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
55 {
56         return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc->id) + reg));
57 }
58
59 static void dmae_init(struct sh_dmae_chan *sh_chan)
60 {
61         u32 chcr = RS_DEFAULT; /* default is DUAL mode */
62         sh_dmae_writel(sh_chan, chcr, CHCR);
63 }
64
65 /*
66  * Reset DMA controller
67  *
68  * SH7780 has two DMAOR register
69  */
70 static void sh_dmae_ctl_stop(int id)
71 {
72         unsigned short dmaor = dmaor_read_reg(id);
73
74         dmaor &= ~(DMAOR_NMIF | DMAOR_AE);
75         dmaor_write_reg(id, dmaor);
76 }
77
78 static int sh_dmae_rst(int id)
79 {
80         unsigned short dmaor;
81
82         sh_dmae_ctl_stop(id);
83         dmaor = dmaor_read_reg(id) | DMAOR_INIT;
84
85         dmaor_write_reg(id, dmaor);
86         if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) {
87                 pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
88                 return -EINVAL;
89         }
90         return 0;
91 }
92
93 static int dmae_is_busy(struct sh_dmae_chan *sh_chan)
94 {
95         u32 chcr = sh_dmae_readl(sh_chan, CHCR);
96         if (chcr & CHCR_DE) {
97                 if (!(chcr & CHCR_TE))
98                         return -EBUSY; /* working */
99         }
100         return 0; /* waiting */
101 }
102
103 static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan)
104 {
105         u32 chcr = sh_dmae_readl(sh_chan, CHCR);
106         return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT];
107 }
108
109 static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw)
110 {
111         sh_dmae_writel(sh_chan, hw.sar, SAR);
112         sh_dmae_writel(sh_chan, hw.dar, DAR);
113         sh_dmae_writel(sh_chan, hw.tcr >> calc_xmit_shift(sh_chan), TCR);
114 }
115
116 static void dmae_start(struct sh_dmae_chan *sh_chan)
117 {
118         u32 chcr = sh_dmae_readl(sh_chan, CHCR);
119
120         chcr |= CHCR_DE | CHCR_IE;
121         sh_dmae_writel(sh_chan, chcr, CHCR);
122 }
123
124 static void dmae_halt(struct sh_dmae_chan *sh_chan)
125 {
126         u32 chcr = sh_dmae_readl(sh_chan, CHCR);
127
128         chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
129         sh_dmae_writel(sh_chan, chcr, CHCR);
130 }
131
132 static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
133 {
134         int ret = dmae_is_busy(sh_chan);
135         /* When DMA was working, can not set data to CHCR */
136         if (ret)
137                 return ret;
138
139         sh_dmae_writel(sh_chan, val, CHCR);
140         return 0;
141 }
142
143 #define DMARS1_ADDR     0x04
144 #define DMARS2_ADDR     0x08
145 #define DMARS_SHIFT 8
146 #define DMARS_CHAN_MSK 0x01
147 static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
148 {
149         u32 addr;
150         int shift = 0;
151         int ret = dmae_is_busy(sh_chan);
152         if (ret)
153                 return ret;
154
155         if (sh_chan->id & DMARS_CHAN_MSK)
156                 shift = DMARS_SHIFT;
157
158         switch (sh_chan->id) {
159         /* DMARS0 */
160         case 0:
161         case 1:
162                 addr = SH_DMARS_BASE;
163                 break;
164         /* DMARS1 */
165         case 2:
166         case 3:
167                 addr = (SH_DMARS_BASE + DMARS1_ADDR);
168                 break;
169         /* DMARS2 */
170         case 4:
171         case 5:
172                 addr = (SH_DMARS_BASE + DMARS2_ADDR);
173                 break;
174         default:
175                 return -EINVAL;
176         }
177
178         ctrl_outw((val << shift) |
179                 (ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)),
180                 addr);
181
182         return 0;
183 }
184
185 static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
186 {
187         struct sh_desc *desc = tx_to_sh_desc(tx);
188         struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
189         dma_cookie_t cookie;
190
191         spin_lock_bh(&sh_chan->desc_lock);
192
193         cookie = sh_chan->common.cookie;
194         cookie++;
195         if (cookie < 0)
196                 cookie = 1;
197
198         /* If desc only in the case of 1 */
199         if (desc->async_tx.cookie != -EBUSY)
200                 desc->async_tx.cookie = cookie;
201         sh_chan->common.cookie = desc->async_tx.cookie;
202
203         list_splice_init(&desc->tx_list, sh_chan->ld_queue.prev);
204
205         spin_unlock_bh(&sh_chan->desc_lock);
206
207         return cookie;
208 }
209
210 static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
211 {
212         struct sh_desc *desc, *_desc, *ret = NULL;
213
214         spin_lock_bh(&sh_chan->desc_lock);
215         list_for_each_entry_safe(desc, _desc, &sh_chan->ld_free, node) {
216                 if (async_tx_test_ack(&desc->async_tx)) {
217                         list_del(&desc->node);
218                         ret = desc;
219                         break;
220                 }
221         }
222         spin_unlock_bh(&sh_chan->desc_lock);
223
224         return ret;
225 }
226
227 static void sh_dmae_put_desc(struct sh_dmae_chan *sh_chan, struct sh_desc *desc)
228 {
229         if (desc) {
230                 spin_lock_bh(&sh_chan->desc_lock);
231
232                 list_splice_init(&desc->tx_list, &sh_chan->ld_free);
233                 list_add(&desc->node, &sh_chan->ld_free);
234
235                 spin_unlock_bh(&sh_chan->desc_lock);
236         }
237 }
238
239 static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
240 {
241         struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
242         struct sh_desc *desc;
243
244         spin_lock_bh(&sh_chan->desc_lock);
245         while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
246                 spin_unlock_bh(&sh_chan->desc_lock);
247                 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
248                 if (!desc) {
249                         spin_lock_bh(&sh_chan->desc_lock);
250                         break;
251                 }
252                 dma_async_tx_descriptor_init(&desc->async_tx,
253                                         &sh_chan->common);
254                 desc->async_tx.tx_submit = sh_dmae_tx_submit;
255                 desc->async_tx.flags = DMA_CTRL_ACK;
256                 INIT_LIST_HEAD(&desc->tx_list);
257                 sh_dmae_put_desc(sh_chan, desc);
258
259                 spin_lock_bh(&sh_chan->desc_lock);
260                 sh_chan->descs_allocated++;
261         }
262         spin_unlock_bh(&sh_chan->desc_lock);
263
264         return sh_chan->descs_allocated;
265 }
266
267 /*
268  * sh_dma_free_chan_resources - Free all resources of the channel.
269  */
270 static void sh_dmae_free_chan_resources(struct dma_chan *chan)
271 {
272         struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
273         struct sh_desc *desc, *_desc;
274         LIST_HEAD(list);
275
276         BUG_ON(!list_empty(&sh_chan->ld_queue));
277         spin_lock_bh(&sh_chan->desc_lock);
278
279         list_splice_init(&sh_chan->ld_free, &list);
280         sh_chan->descs_allocated = 0;
281
282         spin_unlock_bh(&sh_chan->desc_lock);
283
284         list_for_each_entry_safe(desc, _desc, &list, node)
285                 kfree(desc);
286 }
287
288 static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
289         struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
290         size_t len, unsigned long flags)
291 {
292         struct sh_dmae_chan *sh_chan;
293         struct sh_desc *first = NULL, *prev = NULL, *new;
294         size_t copy_size;
295
296         if (!chan)
297                 return NULL;
298
299         if (!len)
300                 return NULL;
301
302         sh_chan = to_sh_chan(chan);
303
304         do {
305                 /* Allocate the link descriptor from DMA pool */
306                 new = sh_dmae_get_desc(sh_chan);
307                 if (!new) {
308                         dev_err(sh_chan->dev,
309                                 "No free memory for link descriptor\n");
310                         goto err_get_desc;
311                 }
312
313                 copy_size = min(len, (size_t)SH_DMA_TCR_MAX);
314
315                 new->hw.sar = dma_src;
316                 new->hw.dar = dma_dest;
317                 new->hw.tcr = copy_size;
318                 if (!first)
319                         first = new;
320
321                 new->mark = DESC_NCOMP;
322                 async_tx_ack(&new->async_tx);
323
324                 prev = new;
325                 len -= copy_size;
326                 dma_src += copy_size;
327                 dma_dest += copy_size;
328                 /* Insert the link descriptor to the LD ring */
329                 list_add_tail(&new->node, &first->tx_list);
330         } while (len);
331
332         new->async_tx.flags = flags; /* client is in control of this ack */
333         new->async_tx.cookie = -EBUSY; /* Last desc */
334
335         return &first->async_tx;
336
337 err_get_desc:
338         sh_dmae_put_desc(sh_chan, first);
339         return NULL;
340
341 }
342
343 /*
344  * sh_chan_ld_cleanup - Clean up link descriptors
345  *
346  * This function clean up the ld_queue of DMA channel.
347  */
348 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan)
349 {
350         struct sh_desc *desc, *_desc;
351
352         spin_lock_bh(&sh_chan->desc_lock);
353         list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
354                 dma_async_tx_callback callback;
355                 void *callback_param;
356
357                 /* non send data */
358                 if (desc->mark == DESC_NCOMP)
359                         break;
360
361                 /* send data sesc */
362                 callback = desc->async_tx.callback;
363                 callback_param = desc->async_tx.callback_param;
364
365                 /* Remove from ld_queue list */
366                 list_splice_init(&desc->tx_list, &sh_chan->ld_free);
367
368                 dev_dbg(sh_chan->dev, "link descriptor %p will be recycle.\n",
369                                 desc);
370
371                 list_move(&desc->node, &sh_chan->ld_free);
372                 /* Run the link descriptor callback function */
373                 if (callback) {
374                         spin_unlock_bh(&sh_chan->desc_lock);
375                         dev_dbg(sh_chan->dev, "link descriptor %p callback\n",
376                                         desc);
377                         callback(callback_param);
378                         spin_lock_bh(&sh_chan->desc_lock);
379                 }
380         }
381         spin_unlock_bh(&sh_chan->desc_lock);
382 }
383
384 static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
385 {
386         struct list_head *ld_node;
387         struct sh_dmae_regs hw;
388
389         /* DMA work check */
390         if (dmae_is_busy(sh_chan))
391                 return;
392
393         /* Find the first un-transfer desciptor */
394         for (ld_node = sh_chan->ld_queue.next;
395                 (ld_node != &sh_chan->ld_queue)
396                         && (to_sh_desc(ld_node)->mark == DESC_COMP);
397                 ld_node = ld_node->next)
398                 cpu_relax();
399
400         if (ld_node != &sh_chan->ld_queue) {
401                 /* Get the ld start address from ld_queue */
402                 hw = to_sh_desc(ld_node)->hw;
403                 dmae_set_reg(sh_chan, hw);
404                 dmae_start(sh_chan);
405         }
406 }
407
408 static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
409 {
410         struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
411         sh_chan_xfer_ld_queue(sh_chan);
412 }
413
414 static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
415                                         dma_cookie_t cookie,
416                                         dma_cookie_t *done,
417                                         dma_cookie_t *used)
418 {
419         struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
420         dma_cookie_t last_used;
421         dma_cookie_t last_complete;
422
423         sh_dmae_chan_ld_cleanup(sh_chan);
424
425         last_used = chan->cookie;
426         last_complete = sh_chan->completed_cookie;
427         if (last_complete == -EBUSY)
428                 last_complete = last_used;
429
430         if (done)
431                 *done = last_complete;
432
433         if (used)
434                 *used = last_used;
435
436         return dma_async_is_complete(cookie, last_complete, last_used);
437 }
438
439 static irqreturn_t sh_dmae_interrupt(int irq, void *data)
440 {
441         irqreturn_t ret = IRQ_NONE;
442         struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
443         u32 chcr = sh_dmae_readl(sh_chan, CHCR);
444
445         if (chcr & CHCR_TE) {
446                 /* DMA stop */
447                 dmae_halt(sh_chan);
448
449                 ret = IRQ_HANDLED;
450                 tasklet_schedule(&sh_chan->tasklet);
451         }
452
453         return ret;
454 }
455
456 #if defined(CONFIG_CPU_SH4)
457 static irqreturn_t sh_dmae_err(int irq, void *data)
458 {
459         int err = 0;
460         struct sh_dmae_device *shdev = (struct sh_dmae_device *)data;
461
462         /* IRQ Multi */
463         if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
464                 int cnt = 0;
465                 switch (irq) {
466 #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
467                 case DMTE6_IRQ:
468                         cnt++;
469 #endif
470                 case DMTE0_IRQ:
471                         if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) {
472                                 disable_irq(irq);
473                                 return IRQ_HANDLED;
474                         }
475                 default:
476                         return IRQ_NONE;
477                 }
478         } else {
479                 /* reset dma controller */
480                 err = sh_dmae_rst(0);
481                 if (err)
482                         return err;
483                 if (shdev->pdata.mode & SHDMA_DMAOR1) {
484                         err = sh_dmae_rst(1);
485                         if (err)
486                                 return err;
487                 }
488                 disable_irq(irq);
489                 return IRQ_HANDLED;
490         }
491 }
492 #endif
493
494 static void dmae_do_tasklet(unsigned long data)
495 {
496         struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
497         struct sh_desc *desc, *_desc, *cur_desc = NULL;
498         u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
499
500         list_for_each_entry_safe(desc, _desc,
501                                  &sh_chan->ld_queue, node) {
502                 if ((desc->hw.sar + desc->hw.tcr) == sar_buf) {
503                         cur_desc = desc;
504                         break;
505                 }
506         }
507
508         if (cur_desc) {
509                 switch (cur_desc->async_tx.cookie) {
510                 case 0: /* other desc data */
511                         break;
512                 case -EBUSY: /* last desc */
513                 sh_chan->completed_cookie =
514                                 cur_desc->async_tx.cookie;
515                         break;
516                 default: /* first desc ( 0 < )*/
517                         sh_chan->completed_cookie =
518                                 cur_desc->async_tx.cookie - 1;
519                         break;
520                 }
521                 cur_desc->mark = DESC_COMP;
522         }
523         /* Next desc */
524         sh_chan_xfer_ld_queue(sh_chan);
525         sh_dmae_chan_ld_cleanup(sh_chan);
526 }
527
528 static unsigned int get_dmae_irq(unsigned int id)
529 {
530         unsigned int irq = 0;
531         if (id < ARRAY_SIZE(dmte_irq_map))
532                 irq = dmte_irq_map[id];
533         return irq;
534 }
535
536 static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
537 {
538         int err;
539         unsigned int irq = get_dmae_irq(id);
540         unsigned long irqflags = IRQF_DISABLED;
541         struct sh_dmae_chan *new_sh_chan;
542
543         /* alloc channel */
544         new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
545         if (!new_sh_chan) {
546                 dev_err(shdev->common.dev,
547                         "No free memory for allocating dma channels!\n");
548                 return -ENOMEM;
549         }
550
551         new_sh_chan->dev = shdev->common.dev;
552         new_sh_chan->id = id;
553
554         /* Init DMA tasklet */
555         tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
556                         (unsigned long)new_sh_chan);
557
558         /* Init the channel */
559         dmae_init(new_sh_chan);
560
561         spin_lock_init(&new_sh_chan->desc_lock);
562
563         /* Init descripter manage list */
564         INIT_LIST_HEAD(&new_sh_chan->ld_queue);
565         INIT_LIST_HEAD(&new_sh_chan->ld_free);
566
567         /* copy struct dma_device */
568         new_sh_chan->common.device = &shdev->common;
569
570         /* Add the channel to DMA device channel list */
571         list_add_tail(&new_sh_chan->common.device_node,
572                         &shdev->common.channels);
573         shdev->common.chancnt++;
574
575         if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
576                 irqflags = IRQF_SHARED;
577 #if defined(DMTE6_IRQ)
578                 if (irq >= DMTE6_IRQ)
579                         irq = DMTE6_IRQ;
580                 else
581 #endif
582                         irq = DMTE0_IRQ;
583         }
584
585         snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
586                         "sh-dmae%d", new_sh_chan->id);
587
588         /* set up channel irq */
589         err = request_irq(irq, &sh_dmae_interrupt, irqflags,
590                           new_sh_chan->dev_id, new_sh_chan);
591         if (err) {
592                 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
593                         "with return %d\n", id, err);
594                 goto err_no_irq;
595         }
596
597         /* CHCR register control function */
598         new_sh_chan->set_chcr = dmae_set_chcr;
599         /* DMARS register control function */
600         new_sh_chan->set_dmars = dmae_set_dmars;
601
602         shdev->chan[id] = new_sh_chan;
603         return 0;
604
605 err_no_irq:
606         /* remove from dmaengine device node */
607         list_del(&new_sh_chan->common.device_node);
608         kfree(new_sh_chan);
609         return err;
610 }
611
612 static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
613 {
614         int i;
615
616         for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
617                 if (shdev->chan[i]) {
618                         struct sh_dmae_chan *shchan = shdev->chan[i];
619                         if (!(shdev->pdata.mode & SHDMA_MIX_IRQ))
620                                 free_irq(dmte_irq_map[i], shchan);
621
622                         list_del(&shchan->common.device_node);
623                         kfree(shchan);
624                         shdev->chan[i] = NULL;
625                 }
626         }
627         shdev->common.chancnt = 0;
628 }
629
630 static int __init sh_dmae_probe(struct platform_device *pdev)
631 {
632         int err = 0, cnt, ecnt;
633         unsigned long irqflags = IRQF_DISABLED;
634 #if defined(CONFIG_CPU_SH4)
635         int eirq[] = { DMAE0_IRQ,
636 #if defined(DMAE1_IRQ)
637                         DMAE1_IRQ
638 #endif
639                 };
640 #endif
641         struct sh_dmae_device *shdev;
642
643         /* get platform data */
644         if (!pdev->dev.platform_data)
645                 return -ENODEV;
646
647         shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
648         if (!shdev) {
649                 dev_err(&pdev->dev, "No enough memory\n");
650                 return -ENOMEM;
651         }
652
653         /* platform data */
654         memcpy(&shdev->pdata, pdev->dev.platform_data,
655                         sizeof(struct sh_dmae_pdata));
656
657         /* reset dma controller */
658         err = sh_dmae_rst(0);
659         if (err)
660                 goto rst_err;
661
662         /* SH7780/85/23 has DMAOR1 */
663         if (shdev->pdata.mode & SHDMA_DMAOR1) {
664                 err = sh_dmae_rst(1);
665                 if (err)
666                         goto rst_err;
667         }
668
669         INIT_LIST_HEAD(&shdev->common.channels);
670
671         dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
672         shdev->common.device_alloc_chan_resources
673                 = sh_dmae_alloc_chan_resources;
674         shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
675         shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
676         shdev->common.device_is_tx_complete = sh_dmae_is_complete;
677         shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
678         shdev->common.dev = &pdev->dev;
679         /* Default transfer size of 32 bytes requires 32-byte alignment */
680         shdev->common.copy_align = 5;
681
682 #if defined(CONFIG_CPU_SH4)
683         /* Non Mix IRQ mode SH7722/SH7730 etc... */
684         if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
685                 irqflags = IRQF_SHARED;
686                 eirq[0] = DMTE0_IRQ;
687 #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
688                 eirq[1] = DMTE6_IRQ;
689 #endif
690         }
691
692         for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) {
693                 err = request_irq(eirq[ecnt], sh_dmae_err, irqflags,
694                                   "DMAC Address Error", shdev);
695                 if (err) {
696                         dev_err(&pdev->dev, "DMA device request_irq"
697                                 "error (irq %d) with return %d\n",
698                                 eirq[ecnt], err);
699                         goto eirq_err;
700                 }
701         }
702 #endif /* CONFIG_CPU_SH4 */
703
704         /* Create DMA Channel */
705         for (cnt = 0 ; cnt < MAX_DMA_CHANNELS ; cnt++) {
706                 err = sh_dmae_chan_probe(shdev, cnt);
707                 if (err)
708                         goto chan_probe_err;
709         }
710
711         platform_set_drvdata(pdev, shdev);
712         dma_async_device_register(&shdev->common);
713
714         return err;
715
716 chan_probe_err:
717         sh_dmae_chan_remove(shdev);
718
719 eirq_err:
720         for (ecnt-- ; ecnt >= 0; ecnt--)
721                 free_irq(eirq[ecnt], shdev);
722
723 rst_err:
724         kfree(shdev);
725
726         return err;
727 }
728
729 static int __exit sh_dmae_remove(struct platform_device *pdev)
730 {
731         struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
732
733         dma_async_device_unregister(&shdev->common);
734
735         if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
736                 free_irq(DMTE0_IRQ, shdev);
737 #if defined(DMTE6_IRQ)
738                 free_irq(DMTE6_IRQ, shdev);
739 #endif
740         }
741
742         /* channel data remove */
743         sh_dmae_chan_remove(shdev);
744
745         if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) {
746                 free_irq(DMAE0_IRQ, shdev);
747 #if defined(DMAE1_IRQ)
748                 free_irq(DMAE1_IRQ, shdev);
749 #endif
750         }
751         kfree(shdev);
752
753         return 0;
754 }
755
756 static void sh_dmae_shutdown(struct platform_device *pdev)
757 {
758         struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
759         sh_dmae_ctl_stop(0);
760         if (shdev->pdata.mode & SHDMA_DMAOR1)
761                 sh_dmae_ctl_stop(1);
762 }
763
764 static struct platform_driver sh_dmae_driver = {
765         .remove         = __exit_p(sh_dmae_remove),
766         .shutdown       = sh_dmae_shutdown,
767         .driver = {
768                 .name   = "sh-dma-engine",
769         },
770 };
771
772 static int __init sh_dmae_init(void)
773 {
774         return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
775 }
776 module_init(sh_dmae_init);
777
778 static void __exit sh_dmae_exit(void)
779 {
780         platform_driver_unregister(&sh_dmae_driver);
781 }
782 module_exit(sh_dmae_exit);
783
784 MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
785 MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
786 MODULE_LICENSE("GPL");