OSDN Git Service

21a759731ef1433e44e694f0e5920fe30ea7e2ff
[android-x86/kernel.git] / drivers / dma / ste_dma40.c
1 /*
2  * driver/dma/ste_dma40.c
3  *
4  * Copyright (C) ST-Ericsson 2007-2010
5  * License terms: GNU General Public License (GPL) version 2
6  * Author: Per Friden <per.friden@stericsson.com>
7  * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8  *
9  */
10
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
17
18 #include <plat/ste_dma40.h>
19
20 #include "ste_dma40_ll.h"
21
22 #define D40_NAME "dma40"
23
24 #define D40_PHY_CHAN -1
25
26 /* For masking out/in 2 bit channel positions */
27 #define D40_CHAN_POS(chan)  (2 * (chan / 2))
28 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
29
30 /* Maximum iterations taken before giving up suspending a channel */
31 #define D40_SUSPEND_MAX_IT 500
32
33 /* Hardware requirement on LCLA alignment */
34 #define LCLA_ALIGNMENT 0x40000
35 /* Attempts before giving up to trying to get pages that are aligned */
36 #define MAX_LCLA_ALLOC_ATTEMPTS 256
37
38 /* Bit markings for allocation map */
39 #define D40_ALLOC_FREE          (1 << 31)
40 #define D40_ALLOC_PHY           (1 << 30)
41 #define D40_ALLOC_LOG_FREE      0
42
43 /* Hardware designer of the block */
44 #define D40_PERIPHID2_DESIGNER 0x8
45
46 /**
47  * enum 40_command - The different commands and/or statuses.
48  *
49  * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
50  * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
51  * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
52  * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
53  */
54 enum d40_command {
55         D40_DMA_STOP            = 0,
56         D40_DMA_RUN             = 1,
57         D40_DMA_SUSPEND_REQ     = 2,
58         D40_DMA_SUSPENDED       = 3
59 };
60
61 /**
62  * struct d40_lli_pool - Structure for keeping LLIs in memory
63  *
64  * @base: Pointer to memory area when the pre_alloc_lli's are not large
65  * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
66  * pre_alloc_lli is used.
67  * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
68  * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
69  * one buffer to one buffer.
70  */
71 struct d40_lli_pool {
72         void    *base;
73         int      size;
74         /* Space for dst and src, plus an extra for padding */
75         u8       pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
76 };
77
78 /**
79  * struct d40_desc - A descriptor is one DMA job.
80  *
81  * @lli_phy: LLI settings for physical channel. Both src and dst=
82  * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
83  * lli_len equals one.
84  * @lli_log: Same as above but for logical channels.
85  * @lli_pool: The pool with two entries pre-allocated.
86  * @lli_len: Number of llis of current descriptor.
87  * @lli_count: Number of transfered llis.
88  * @lli_tx_len: Max number of LLIs per transfer, there can be
89  * many transfer for one descriptor.
90  * @txd: DMA engine struct. Used for among other things for communication
91  * during a transfer.
92  * @node: List entry.
93  * @dir: The transfer direction of this job.
94  * @is_in_client_list: true if the client owns this descriptor.
95  *
96  * This descriptor is used for both logical and physical transfers.
97  */
98
99 struct d40_desc {
100         /* LLI physical */
101         struct d40_phy_lli_bidir         lli_phy;
102         /* LLI logical */
103         struct d40_log_lli_bidir         lli_log;
104
105         struct d40_lli_pool              lli_pool;
106         int                              lli_len;
107         int                              lli_count;
108         u32                              lli_tx_len;
109
110         struct dma_async_tx_descriptor   txd;
111         struct list_head                 node;
112
113         enum dma_data_direction          dir;
114         bool                             is_in_client_list;
115 };
116
117 /**
118  * struct d40_lcla_pool - LCLA pool settings and data.
119  *
120  * @base: The virtual address of LCLA. 18 bit aligned.
121  * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
122  * This pointer is only there for clean-up on error.
123  * @pages: The number of pages needed for all physical channels.
124  * Only used later for clean-up on error
125  * @lock: Lock to protect the content in this struct.
126  * @alloc_map: Bitmap mapping between physical channel and LCLA entries.
127  * @num_blocks: The number of entries of alloc_map. Equals to the
128  * number of physical channels.
129  */
130 struct d40_lcla_pool {
131         void            *base;
132         void            *base_unaligned;
133         int              pages;
134         spinlock_t       lock;
135         u32             *alloc_map;
136         int              num_blocks;
137 };
138
139 /**
140  * struct d40_phy_res - struct for handling eventlines mapped to physical
141  * channels.
142  *
143  * @lock: A lock protection this entity.
144  * @num: The physical channel number of this entity.
145  * @allocated_src: Bit mapped to show which src event line's are mapped to
146  * this physical channel. Can also be free or physically allocated.
147  * @allocated_dst: Same as for src but is dst.
148  * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
149  * event line number. Both allocated_src and allocated_dst can not be
150  * allocated to a physical channel, since the interrupt handler has then
151  * no way of figure out which one the interrupt belongs to.
152  */
153 struct d40_phy_res {
154         spinlock_t lock;
155         int        num;
156         u32        allocated_src;
157         u32        allocated_dst;
158 };
159
160 struct d40_base;
161
162 /**
163  * struct d40_chan - Struct that describes a channel.
164  *
165  * @lock: A spinlock to protect this struct.
166  * @log_num: The logical number, if any of this channel.
167  * @completed: Starts with 1, after first interrupt it is set to dma engine's
168  * current cookie.
169  * @pending_tx: The number of pending transfers. Used between interrupt handler
170  * and tasklet.
171  * @busy: Set to true when transfer is ongoing on this channel.
172  * @phy_chan: Pointer to physical channel which this instance runs on. If this
173  * point is NULL, then the channel is not allocated.
174  * @chan: DMA engine handle.
175  * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
176  * transfer and call client callback.
177  * @client: Cliented owned descriptor list.
178  * @active: Active descriptor.
179  * @queue: Queued jobs.
180  * @dma_cfg: The client configuration of this dma channel.
181  * @base: Pointer to the device instance struct.
182  * @src_def_cfg: Default cfg register setting for src.
183  * @dst_def_cfg: Default cfg register setting for dst.
184  * @log_def: Default logical channel settings.
185  * @lcla: Space for one dst src pair for logical channel transfers.
186  * @lcpa: Pointer to dst and src lcpa settings.
187  *
188  * This struct can either "be" a logical or a physical channel.
189  */
190 struct d40_chan {
191         spinlock_t                       lock;
192         int                              log_num;
193         /* ID of the most recent completed transfer */
194         int                              completed;
195         int                              pending_tx;
196         bool                             busy;
197         struct d40_phy_res              *phy_chan;
198         struct dma_chan                  chan;
199         struct tasklet_struct            tasklet;
200         struct list_head                 client;
201         struct list_head                 active;
202         struct list_head                 queue;
203         struct stedma40_chan_cfg         dma_cfg;
204         struct d40_base                 *base;
205         /* Default register configurations */
206         u32                              src_def_cfg;
207         u32                              dst_def_cfg;
208         struct d40_def_lcsp              log_def;
209         struct d40_lcla_elem             lcla;
210         struct d40_log_lli_full         *lcpa;
211 };
212
213 /**
214  * struct d40_base - The big global struct, one for each probe'd instance.
215  *
216  * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
217  * @execmd_lock: Lock for execute command usage since several channels share
218  * the same physical register.
219  * @dev: The device structure.
220  * @virtbase: The virtual base address of the DMA's register.
221  * @rev: silicon revision detected.
222  * @clk: Pointer to the DMA clock structure.
223  * @phy_start: Physical memory start of the DMA registers.
224  * @phy_size: Size of the DMA register map.
225  * @irq: The IRQ number.
226  * @num_phy_chans: The number of physical channels. Read from HW. This
227  * is the number of available channels for this driver, not counting "Secure
228  * mode" allocated physical channels.
229  * @num_log_chans: The number of logical channels. Calculated from
230  * num_phy_chans.
231  * @dma_both: dma_device channels that can do both memcpy and slave transfers.
232  * @dma_slave: dma_device channels that can do only do slave transfers.
233  * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
234  * @phy_chans: Room for all possible physical channels in system.
235  * @log_chans: Room for all possible logical channels in system.
236  * @lookup_log_chans: Used to map interrupt number to logical channel. Points
237  * to log_chans entries.
238  * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
239  * to phy_chans entries.
240  * @plat_data: Pointer to provided platform_data which is the driver
241  * configuration.
242  * @phy_res: Vector containing all physical channels.
243  * @lcla_pool: lcla pool settings and data.
244  * @lcpa_base: The virtual mapped address of LCPA.
245  * @phy_lcpa: The physical address of the LCPA.
246  * @lcpa_size: The size of the LCPA area.
247  * @desc_slab: cache for descriptors.
248  */
249 struct d40_base {
250         spinlock_t                       interrupt_lock;
251         spinlock_t                       execmd_lock;
252         struct device                    *dev;
253         void __iomem                     *virtbase;
254         u8                                rev:4;
255         struct clk                       *clk;
256         phys_addr_t                       phy_start;
257         resource_size_t                   phy_size;
258         int                               irq;
259         int                               num_phy_chans;
260         int                               num_log_chans;
261         struct dma_device                 dma_both;
262         struct dma_device                 dma_slave;
263         struct dma_device                 dma_memcpy;
264         struct d40_chan                  *phy_chans;
265         struct d40_chan                  *log_chans;
266         struct d40_chan                 **lookup_log_chans;
267         struct d40_chan                 **lookup_phy_chans;
268         struct stedma40_platform_data    *plat_data;
269         /* Physical half channels */
270         struct d40_phy_res               *phy_res;
271         struct d40_lcla_pool              lcla_pool;
272         void                             *lcpa_base;
273         dma_addr_t                        phy_lcpa;
274         resource_size_t                   lcpa_size;
275         struct kmem_cache                *desc_slab;
276 };
277
278 /**
279  * struct d40_interrupt_lookup - lookup table for interrupt handler
280  *
281  * @src: Interrupt mask register.
282  * @clr: Interrupt clear register.
283  * @is_error: true if this is an error interrupt.
284  * @offset: start delta in the lookup_log_chans in d40_base. If equals to
285  * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
286  */
287 struct d40_interrupt_lookup {
288         u32 src;
289         u32 clr;
290         bool is_error;
291         int offset;
292 };
293
294 /**
295  * struct d40_reg_val - simple lookup struct
296  *
297  * @reg: The register.
298  * @val: The value that belongs to the register in reg.
299  */
300 struct d40_reg_val {
301         unsigned int reg;
302         unsigned int val;
303 };
304
305 static int d40_pool_lli_alloc(struct d40_desc *d40d,
306                               int lli_len, bool is_log)
307 {
308         u32 align;
309         void *base;
310
311         if (is_log)
312                 align = sizeof(struct d40_log_lli);
313         else
314                 align = sizeof(struct d40_phy_lli);
315
316         if (lli_len == 1) {
317                 base = d40d->lli_pool.pre_alloc_lli;
318                 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
319                 d40d->lli_pool.base = NULL;
320         } else {
321                 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
322
323                 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
324                 d40d->lli_pool.base = base;
325
326                 if (d40d->lli_pool.base == NULL)
327                         return -ENOMEM;
328         }
329
330         if (is_log) {
331                 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
332                                               align);
333                 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
334                                               align);
335         } else {
336                 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
337                                               align);
338                 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
339                                               align);
340
341                 d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
342                 d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
343         }
344
345         return 0;
346 }
347
348 static void d40_pool_lli_free(struct d40_desc *d40d)
349 {
350         kfree(d40d->lli_pool.base);
351         d40d->lli_pool.base = NULL;
352         d40d->lli_pool.size = 0;
353         d40d->lli_log.src = NULL;
354         d40d->lli_log.dst = NULL;
355         d40d->lli_phy.src = NULL;
356         d40d->lli_phy.dst = NULL;
357         d40d->lli_phy.src_addr = 0;
358         d40d->lli_phy.dst_addr = 0;
359 }
360
361 static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
362                                       struct d40_desc *desc)
363 {
364         dma_cookie_t cookie = d40c->chan.cookie;
365
366         if (++cookie < 0)
367                 cookie = 1;
368
369         d40c->chan.cookie = cookie;
370         desc->txd.cookie = cookie;
371
372         return cookie;
373 }
374
375 static void d40_desc_remove(struct d40_desc *d40d)
376 {
377         list_del(&d40d->node);
378 }
379
380 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
381 {
382         struct d40_desc *d;
383         struct d40_desc *_d;
384
385         if (!list_empty(&d40c->client)) {
386                 list_for_each_entry_safe(d, _d, &d40c->client, node)
387                         if (async_tx_test_ack(&d->txd)) {
388                                 d40_pool_lli_free(d);
389                                 d40_desc_remove(d);
390                                 break;
391                         }
392         } else {
393                 d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT);
394                 if (d != NULL) {
395                         memset(d, 0, sizeof(struct d40_desc));
396                         INIT_LIST_HEAD(&d->node);
397                 }
398         }
399         return d;
400 }
401
402 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
403 {
404         kmem_cache_free(d40c->base->desc_slab, d40d);
405 }
406
407 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
408 {
409         list_add_tail(&desc->node, &d40c->active);
410 }
411
412 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
413 {
414         struct d40_desc *d;
415
416         if (list_empty(&d40c->active))
417                 return NULL;
418
419         d = list_first_entry(&d40c->active,
420                              struct d40_desc,
421                              node);
422         return d;
423 }
424
425 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
426 {
427         list_add_tail(&desc->node, &d40c->queue);
428 }
429
430 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
431 {
432         struct d40_desc *d;
433
434         if (list_empty(&d40c->queue))
435                 return NULL;
436
437         d = list_first_entry(&d40c->queue,
438                              struct d40_desc,
439                              node);
440         return d;
441 }
442
443 /* Support functions for logical channels */
444
445 static int d40_lcla_id_get(struct d40_chan *d40c)
446 {
447         int src_id = 0;
448         int dst_id = 0;
449         struct d40_log_lli *lcla_lidx_base =
450                 d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024;
451         int i;
452         int lli_per_log = d40c->base->plat_data->llis_per_log;
453         unsigned long flags;
454
455         if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
456                 return 0;
457
458         if (d40c->base->lcla_pool.num_blocks > 32)
459                 return -EINVAL;
460
461         spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
462
463         for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) {
464                 if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
465                       (0x1 << i))) {
466                         d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
467                                 (0x1 << i);
468                         break;
469                 }
470         }
471         src_id = i;
472         if (src_id >= d40c->base->lcla_pool.num_blocks)
473                 goto err;
474
475         for (; i < d40c->base->lcla_pool.num_blocks; i++) {
476                 if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
477                       (0x1 << i))) {
478                         d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
479                                 (0x1 << i);
480                         break;
481                 }
482         }
483
484         dst_id = i;
485         if (dst_id == src_id)
486                 goto err;
487
488         d40c->lcla.src_id = src_id;
489         d40c->lcla.dst_id = dst_id;
490         d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
491         d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
492
493         spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
494         return 0;
495 err:
496         spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
497         return -EINVAL;
498 }
499
500
501 static int d40_channel_execute_command(struct d40_chan *d40c,
502                                        enum d40_command command)
503 {
504         int status, i;
505         void __iomem *active_reg;
506         int ret = 0;
507         unsigned long flags;
508         u32 wmask;
509
510         spin_lock_irqsave(&d40c->base->execmd_lock, flags);
511
512         if (d40c->phy_chan->num % 2 == 0)
513                 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
514         else
515                 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
516
517         if (command == D40_DMA_SUSPEND_REQ) {
518                 status = (readl(active_reg) &
519                           D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
520                         D40_CHAN_POS(d40c->phy_chan->num);
521
522                 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
523                         goto done;
524         }
525
526         wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
527         writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
528                active_reg);
529
530         if (command == D40_DMA_SUSPEND_REQ) {
531
532                 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
533                         status = (readl(active_reg) &
534                                   D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
535                                 D40_CHAN_POS(d40c->phy_chan->num);
536
537                         cpu_relax();
538                         /*
539                          * Reduce the number of bus accesses while
540                          * waiting for the DMA to suspend.
541                          */
542                         udelay(3);
543
544                         if (status == D40_DMA_STOP ||
545                             status == D40_DMA_SUSPENDED)
546                                 break;
547                 }
548
549                 if (i == D40_SUSPEND_MAX_IT) {
550                         dev_err(&d40c->chan.dev->device,
551                                 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
552                                 __func__, d40c->phy_chan->num, d40c->log_num,
553                                 status);
554                         dump_stack();
555                         ret = -EBUSY;
556                 }
557
558         }
559 done:
560         spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
561         return ret;
562 }
563
564 static void d40_term_all(struct d40_chan *d40c)
565 {
566         struct d40_desc *d40d;
567         unsigned long flags;
568
569         /* Release active descriptors */
570         while ((d40d = d40_first_active_get(d40c))) {
571                 d40_desc_remove(d40d);
572
573                 /* Return desc to free-list */
574                 d40_desc_free(d40c, d40d);
575         }
576
577         /* Release queued descriptors waiting for transfer */
578         while ((d40d = d40_first_queued(d40c))) {
579                 d40_desc_remove(d40d);
580
581                 /* Return desc to free-list */
582                 d40_desc_free(d40c, d40d);
583         }
584
585         spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
586
587         d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
588                 (~(0x1 << d40c->lcla.dst_id));
589         d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
590                 (~(0x1 << d40c->lcla.src_id));
591
592         d40c->lcla.src_id = -1;
593         d40c->lcla.dst_id = -1;
594
595         spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
596
597         d40c->pending_tx = 0;
598         d40c->busy = false;
599 }
600
601 static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
602 {
603         u32 val;
604         unsigned long flags;
605
606         /* Notice, that disable requires the physical channel to be stopped */
607         if (do_enable)
608                 val = D40_ACTIVATE_EVENTLINE;
609         else
610                 val = D40_DEACTIVATE_EVENTLINE;
611
612         spin_lock_irqsave(&d40c->phy_chan->lock, flags);
613
614         /* Enable event line connected to device (or memcpy) */
615         if ((d40c->dma_cfg.dir ==  STEDMA40_PERIPH_TO_MEM) ||
616             (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
617                 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
618
619                 writel((val << D40_EVENTLINE_POS(event)) |
620                        ~D40_EVENTLINE_MASK(event),
621                        d40c->base->virtbase + D40_DREG_PCBASE +
622                        d40c->phy_chan->num * D40_DREG_PCDELTA +
623                        D40_CHAN_REG_SSLNK);
624         }
625         if (d40c->dma_cfg.dir !=  STEDMA40_PERIPH_TO_MEM) {
626                 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
627
628                 writel((val << D40_EVENTLINE_POS(event)) |
629                        ~D40_EVENTLINE_MASK(event),
630                        d40c->base->virtbase + D40_DREG_PCBASE +
631                        d40c->phy_chan->num * D40_DREG_PCDELTA +
632                        D40_CHAN_REG_SDLNK);
633         }
634
635         spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
636 }
637
638 static u32 d40_chan_has_events(struct d40_chan *d40c)
639 {
640         u32 val = 0;
641
642         /* If SSLNK or SDLNK is zero all events are disabled */
643         if ((d40c->dma_cfg.dir ==  STEDMA40_PERIPH_TO_MEM) ||
644             (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
645                 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
646                             d40c->phy_chan->num * D40_DREG_PCDELTA +
647                             D40_CHAN_REG_SSLNK);
648
649         if (d40c->dma_cfg.dir !=  STEDMA40_PERIPH_TO_MEM)
650                 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
651                             d40c->phy_chan->num * D40_DREG_PCDELTA +
652                             D40_CHAN_REG_SDLNK);
653         return val;
654 }
655
656 static void d40_config_enable_lidx(struct d40_chan *d40c)
657 {
658         /* Set LIDX for lcla */
659         writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
660                D40_SREG_ELEM_LOG_LIDX_MASK,
661                d40c->base->virtbase + D40_DREG_PCBASE +
662                d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
663
664         writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
665                D40_SREG_ELEM_LOG_LIDX_MASK,
666                d40c->base->virtbase + D40_DREG_PCBASE +
667                d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
668 }
669
670 static int d40_config_write(struct d40_chan *d40c)
671 {
672         u32 addr_base;
673         u32 var;
674         int res;
675
676         res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
677         if (res)
678                 return res;
679
680         /* Odd addresses are even addresses + 4 */
681         addr_base = (d40c->phy_chan->num % 2) * 4;
682         /* Setup channel mode to logical or physical */
683         var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
684                 D40_CHAN_POS(d40c->phy_chan->num);
685         writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
686
687         /* Setup operational mode option register */
688         var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
689                0x3) << D40_CHAN_POS(d40c->phy_chan->num);
690
691         writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
692
693         if (d40c->log_num != D40_PHY_CHAN) {
694                 /* Set default config for CFG reg */
695                 writel(d40c->src_def_cfg,
696                        d40c->base->virtbase + D40_DREG_PCBASE +
697                        d40c->phy_chan->num * D40_DREG_PCDELTA +
698                        D40_CHAN_REG_SSCFG);
699                 writel(d40c->dst_def_cfg,
700                        d40c->base->virtbase + D40_DREG_PCBASE +
701                        d40c->phy_chan->num * D40_DREG_PCDELTA +
702                        D40_CHAN_REG_SDCFG);
703
704                 d40_config_enable_lidx(d40c);
705         }
706         return res;
707 }
708
709 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
710 {
711         if (d40d->lli_phy.dst && d40d->lli_phy.src) {
712                 d40_phy_lli_write(d40c->base->virtbase,
713                                   d40c->phy_chan->num,
714                                   d40d->lli_phy.dst,
715                                   d40d->lli_phy.src);
716         } else if (d40d->lli_log.dst && d40d->lli_log.src) {
717                 struct d40_log_lli *src = d40d->lli_log.src;
718                 struct d40_log_lli *dst = d40d->lli_log.dst;
719                 int s;
720
721                 src += d40d->lli_count;
722                 dst += d40d->lli_count;
723                 s = d40_log_lli_write(d40c->lcpa,
724                                       d40c->lcla.src, d40c->lcla.dst,
725                                       dst, src,
726                                       d40c->base->plat_data->llis_per_log);
727
728                 /* If s equals to zero, the job is not linked */
729                 if (s > 0) {
730                         (void) dma_map_single(d40c->base->dev, d40c->lcla.src,
731                                               s * sizeof(struct d40_log_lli),
732                                               DMA_TO_DEVICE);
733                         (void) dma_map_single(d40c->base->dev, d40c->lcla.dst,
734                                               s * sizeof(struct d40_log_lli),
735                                               DMA_TO_DEVICE);
736                 }
737         }
738         d40d->lli_count += d40d->lli_tx_len;
739 }
740
741 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
742 {
743         struct d40_chan *d40c = container_of(tx->chan,
744                                              struct d40_chan,
745                                              chan);
746         struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
747         unsigned long flags;
748
749         spin_lock_irqsave(&d40c->lock, flags);
750
751         tx->cookie = d40_assign_cookie(d40c, d40d);
752
753         d40_desc_queue(d40c, d40d);
754
755         spin_unlock_irqrestore(&d40c->lock, flags);
756
757         return tx->cookie;
758 }
759
760 static int d40_start(struct d40_chan *d40c)
761 {
762         if (d40c->base->rev == 0) {
763                 int err;
764
765                 if (d40c->log_num != D40_PHY_CHAN) {
766                         err = d40_channel_execute_command(d40c,
767                                                           D40_DMA_SUSPEND_REQ);
768                         if (err)
769                                 return err;
770                 }
771         }
772
773         if (d40c->log_num != D40_PHY_CHAN)
774                 d40_config_set_event(d40c, true);
775
776         return d40_channel_execute_command(d40c, D40_DMA_RUN);
777 }
778
779 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
780 {
781         struct d40_desc *d40d;
782         int err;
783
784         /* Start queued jobs, if any */
785         d40d = d40_first_queued(d40c);
786
787         if (d40d != NULL) {
788                 d40c->busy = true;
789
790                 /* Remove from queue */
791                 d40_desc_remove(d40d);
792
793                 /* Add to active queue */
794                 d40_desc_submit(d40c, d40d);
795
796                 /* Initiate DMA job */
797                 d40_desc_load(d40c, d40d);
798
799                 /* Start dma job */
800                 err = d40_start(d40c);
801
802                 if (err)
803                         return NULL;
804         }
805
806         return d40d;
807 }
808
809 /* called from interrupt context */
810 static void dma_tc_handle(struct d40_chan *d40c)
811 {
812         struct d40_desc *d40d;
813
814         if (!d40c->phy_chan)
815                 return;
816
817         /* Get first active entry from list */
818         d40d = d40_first_active_get(d40c);
819
820         if (d40d == NULL)
821                 return;
822
823         if (d40d->lli_count < d40d->lli_len) {
824
825                 d40_desc_load(d40c, d40d);
826                 /* Start dma job */
827                 (void) d40_start(d40c);
828                 return;
829         }
830
831         if (d40_queue_start(d40c) == NULL)
832                 d40c->busy = false;
833
834         d40c->pending_tx++;
835         tasklet_schedule(&d40c->tasklet);
836
837 }
838
839 static void dma_tasklet(unsigned long data)
840 {
841         struct d40_chan *d40c = (struct d40_chan *) data;
842         struct d40_desc *d40d_fin;
843         unsigned long flags;
844         dma_async_tx_callback callback;
845         void *callback_param;
846
847         spin_lock_irqsave(&d40c->lock, flags);
848
849         /* Get first active entry from list */
850         d40d_fin = d40_first_active_get(d40c);
851
852         if (d40d_fin == NULL)
853                 goto err;
854
855         d40c->completed = d40d_fin->txd.cookie;
856
857         /*
858          * If terminating a channel pending_tx is set to zero.
859          * This prevents any finished active jobs to return to the client.
860          */
861         if (d40c->pending_tx == 0) {
862                 spin_unlock_irqrestore(&d40c->lock, flags);
863                 return;
864         }
865
866         /* Callback to client */
867         callback = d40d_fin->txd.callback;
868         callback_param = d40d_fin->txd.callback_param;
869
870         if (async_tx_test_ack(&d40d_fin->txd)) {
871                 d40_pool_lli_free(d40d_fin);
872                 d40_desc_remove(d40d_fin);
873                 /* Return desc to free-list */
874                 d40_desc_free(d40c, d40d_fin);
875         } else {
876                 if (!d40d_fin->is_in_client_list) {
877                         d40_desc_remove(d40d_fin);
878                         list_add_tail(&d40d_fin->node, &d40c->client);
879                         d40d_fin->is_in_client_list = true;
880                 }
881         }
882
883         d40c->pending_tx--;
884
885         if (d40c->pending_tx)
886                 tasklet_schedule(&d40c->tasklet);
887
888         spin_unlock_irqrestore(&d40c->lock, flags);
889
890         if (callback)
891                 callback(callback_param);
892
893         return;
894
895  err:
896         /* Rescue manouver if receiving double interrupts */
897         if (d40c->pending_tx > 0)
898                 d40c->pending_tx--;
899         spin_unlock_irqrestore(&d40c->lock, flags);
900 }
901
902 static irqreturn_t d40_handle_interrupt(int irq, void *data)
903 {
904         static const struct d40_interrupt_lookup il[] = {
905                 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false,  0},
906                 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
907                 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
908                 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
909                 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true,   0},
910                 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true,  32},
911                 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true,  64},
912                 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true,  96},
913                 {D40_DREG_PCTIS,  D40_DREG_PCICR,  false, D40_PHY_CHAN},
914                 {D40_DREG_PCEIS,  D40_DREG_PCICR,  true,  D40_PHY_CHAN},
915         };
916
917         int i;
918         u32 regs[ARRAY_SIZE(il)];
919         u32 tmp;
920         u32 idx;
921         u32 row;
922         long chan = -1;
923         struct d40_chan *d40c;
924         unsigned long flags;
925         struct d40_base *base = data;
926
927         spin_lock_irqsave(&base->interrupt_lock, flags);
928
929         /* Read interrupt status of both logical and physical channels */
930         for (i = 0; i < ARRAY_SIZE(il); i++)
931                 regs[i] = readl(base->virtbase + il[i].src);
932
933         for (;;) {
934
935                 chan = find_next_bit((unsigned long *)regs,
936                                      BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
937
938                 /* No more set bits found? */
939                 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
940                         break;
941
942                 row = chan / BITS_PER_LONG;
943                 idx = chan & (BITS_PER_LONG - 1);
944
945                 /* ACK interrupt */
946                 tmp = readl(base->virtbase + il[row].clr);
947                 tmp |= 1 << idx;
948                 writel(tmp, base->virtbase + il[row].clr);
949
950                 if (il[row].offset == D40_PHY_CHAN)
951                         d40c = base->lookup_phy_chans[idx];
952                 else
953                         d40c = base->lookup_log_chans[il[row].offset + idx];
954                 spin_lock(&d40c->lock);
955
956                 if (!il[row].is_error)
957                         dma_tc_handle(d40c);
958                 else
959                         dev_err(base->dev,
960                                 "[%s] IRQ chan: %ld offset %d idx %d\n",
961                                 __func__, chan, il[row].offset, idx);
962
963                 spin_unlock(&d40c->lock);
964         }
965
966         spin_unlock_irqrestore(&base->interrupt_lock, flags);
967
968         return IRQ_HANDLED;
969 }
970
971
972 static int d40_validate_conf(struct d40_chan *d40c,
973                              struct stedma40_chan_cfg *conf)
974 {
975         int res = 0;
976         u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
977         u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
978         bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
979                 == STEDMA40_CHANNEL_IN_LOG_MODE;
980
981         if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
982             dst_event_group == STEDMA40_DEV_DST_MEMORY) {
983                 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
984                         __func__);
985                 res = -EINVAL;
986         }
987
988         if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
989             src_event_group == STEDMA40_DEV_SRC_MEMORY) {
990                 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
991                         __func__);
992                 res = -EINVAL;
993         }
994
995         if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
996             dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
997                 dev_err(&d40c->chan.dev->device,
998                         "[%s] No event line\n", __func__);
999                 res = -EINVAL;
1000         }
1001
1002         if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1003             (src_event_group != dst_event_group)) {
1004                 dev_err(&d40c->chan.dev->device,
1005                         "[%s] Invalid event group\n", __func__);
1006                 res = -EINVAL;
1007         }
1008
1009         if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1010                 /*
1011                  * DMAC HW supports it. Will be added to this driver,
1012                  * in case any dma client requires it.
1013                  */
1014                 dev_err(&d40c->chan.dev->device,
1015                         "[%s] periph to periph not supported\n",
1016                         __func__);
1017                 res = -EINVAL;
1018         }
1019
1020         return res;
1021 }
1022
1023 static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
1024                                int log_event_line, bool is_log)
1025 {
1026         unsigned long flags;
1027         spin_lock_irqsave(&phy->lock, flags);
1028         if (!is_log) {
1029                 /* Physical interrupts are masked per physical full channel */
1030                 if (phy->allocated_src == D40_ALLOC_FREE &&
1031                     phy->allocated_dst == D40_ALLOC_FREE) {
1032                         phy->allocated_dst = D40_ALLOC_PHY;
1033                         phy->allocated_src = D40_ALLOC_PHY;
1034                         goto found;
1035                 } else
1036                         goto not_found;
1037         }
1038
1039         /* Logical channel */
1040         if (is_src) {
1041                 if (phy->allocated_src == D40_ALLOC_PHY)
1042                         goto not_found;
1043
1044                 if (phy->allocated_src == D40_ALLOC_FREE)
1045                         phy->allocated_src = D40_ALLOC_LOG_FREE;
1046
1047                 if (!(phy->allocated_src & (1 << log_event_line))) {
1048                         phy->allocated_src |= 1 << log_event_line;
1049                         goto found;
1050                 } else
1051                         goto not_found;
1052         } else {
1053                 if (phy->allocated_dst == D40_ALLOC_PHY)
1054                         goto not_found;
1055
1056                 if (phy->allocated_dst == D40_ALLOC_FREE)
1057                         phy->allocated_dst = D40_ALLOC_LOG_FREE;
1058
1059                 if (!(phy->allocated_dst & (1 << log_event_line))) {
1060                         phy->allocated_dst |= 1 << log_event_line;
1061                         goto found;
1062                 } else
1063                         goto not_found;
1064         }
1065
1066 not_found:
1067         spin_unlock_irqrestore(&phy->lock, flags);
1068         return false;
1069 found:
1070         spin_unlock_irqrestore(&phy->lock, flags);
1071         return true;
1072 }
1073
1074 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1075                                int log_event_line)
1076 {
1077         unsigned long flags;
1078         bool is_free = false;
1079
1080         spin_lock_irqsave(&phy->lock, flags);
1081         if (!log_event_line) {
1082                 /* Physical interrupts are masked per physical full channel */
1083                 phy->allocated_dst = D40_ALLOC_FREE;
1084                 phy->allocated_src = D40_ALLOC_FREE;
1085                 is_free = true;
1086                 goto out;
1087         }
1088
1089         /* Logical channel */
1090         if (is_src) {
1091                 phy->allocated_src &= ~(1 << log_event_line);
1092                 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1093                         phy->allocated_src = D40_ALLOC_FREE;
1094         } else {
1095                 phy->allocated_dst &= ~(1 << log_event_line);
1096                 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1097                         phy->allocated_dst = D40_ALLOC_FREE;
1098         }
1099
1100         is_free = ((phy->allocated_src | phy->allocated_dst) ==
1101                    D40_ALLOC_FREE);
1102
1103 out:
1104         spin_unlock_irqrestore(&phy->lock, flags);
1105
1106         return is_free;
1107 }
1108
1109 static int d40_allocate_channel(struct d40_chan *d40c)
1110 {
1111         int dev_type;
1112         int event_group;
1113         int event_line;
1114         struct d40_phy_res *phys;
1115         int i;
1116         int j;
1117         int log_num;
1118         bool is_src;
1119         bool is_log = (d40c->dma_cfg.channel_type &
1120                        STEDMA40_CHANNEL_IN_OPER_MODE)
1121                 == STEDMA40_CHANNEL_IN_LOG_MODE;
1122
1123
1124         phys = d40c->base->phy_res;
1125
1126         if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1127                 dev_type = d40c->dma_cfg.src_dev_type;
1128                 log_num = 2 * dev_type;
1129                 is_src = true;
1130         } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1131                    d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1132                 /* dst event lines are used for logical memcpy */
1133                 dev_type = d40c->dma_cfg.dst_dev_type;
1134                 log_num = 2 * dev_type + 1;
1135                 is_src = false;
1136         } else
1137                 return -EINVAL;
1138
1139         event_group = D40_TYPE_TO_GROUP(dev_type);
1140         event_line = D40_TYPE_TO_EVENT(dev_type);
1141
1142         if (!is_log) {
1143                 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1144                         /* Find physical half channel */
1145                         for (i = 0; i < d40c->base->num_phy_chans; i++) {
1146
1147                                 if (d40_alloc_mask_set(&phys[i], is_src,
1148                                                        0, is_log))
1149                                         goto found_phy;
1150                         }
1151                 } else
1152                         for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1153                                 int phy_num = j  + event_group * 2;
1154                                 for (i = phy_num; i < phy_num + 2; i++) {
1155                                         if (d40_alloc_mask_set(&phys[i],
1156                                                                is_src,
1157                                                                0,
1158                                                                is_log))
1159                                                 goto found_phy;
1160                                 }
1161                         }
1162                 return -EINVAL;
1163 found_phy:
1164                 d40c->phy_chan = &phys[i];
1165                 d40c->log_num = D40_PHY_CHAN;
1166                 goto out;
1167         }
1168         if (dev_type == -1)
1169                 return -EINVAL;
1170
1171         /* Find logical channel */
1172         for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1173                 int phy_num = j + event_group * 2;
1174                 /*
1175                  * Spread logical channels across all available physical rather
1176                  * than pack every logical channel at the first available phy
1177                  * channels.
1178                  */
1179                 if (is_src) {
1180                         for (i = phy_num; i < phy_num + 2; i++) {
1181                                 if (d40_alloc_mask_set(&phys[i], is_src,
1182                                                        event_line, is_log))
1183                                         goto found_log;
1184                         }
1185                 } else {
1186                         for (i = phy_num + 1; i >= phy_num; i--) {
1187                                 if (d40_alloc_mask_set(&phys[i], is_src,
1188                                                        event_line, is_log))
1189                                         goto found_log;
1190                         }
1191                 }
1192         }
1193         return -EINVAL;
1194
1195 found_log:
1196         d40c->phy_chan = &phys[i];
1197         d40c->log_num = log_num;
1198 out:
1199
1200         if (is_log)
1201                 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1202         else
1203                 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1204
1205         return 0;
1206
1207 }
1208
1209 static int d40_config_memcpy(struct d40_chan *d40c)
1210 {
1211         dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1212
1213         if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1214                 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1215                 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1216                 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1217                         memcpy[d40c->chan.chan_id];
1218
1219         } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1220                    dma_has_cap(DMA_SLAVE, cap)) {
1221                 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1222         } else {
1223                 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1224                         __func__);
1225                 return -EINVAL;
1226         }
1227
1228         return 0;
1229 }
1230
1231
1232 static int d40_free_dma(struct d40_chan *d40c)
1233 {
1234
1235         int res = 0;
1236         u32 event;
1237         struct d40_phy_res *phy = d40c->phy_chan;
1238         bool is_src;
1239         struct d40_desc *d;
1240         struct d40_desc *_d;
1241
1242
1243         /* Terminate all queued and active transfers */
1244         d40_term_all(d40c);
1245
1246         /* Release client owned descriptors */
1247         if (!list_empty(&d40c->client))
1248                 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1249                         d40_pool_lli_free(d);
1250                         d40_desc_remove(d);
1251                         /* Return desc to free-list */
1252                         d40_desc_free(d40c, d);
1253                 }
1254
1255         if (phy == NULL) {
1256                 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1257                         __func__);
1258                 return -EINVAL;
1259         }
1260
1261         if (phy->allocated_src == D40_ALLOC_FREE &&
1262             phy->allocated_dst == D40_ALLOC_FREE) {
1263                 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1264                         __func__);
1265                 return -EINVAL;
1266         }
1267
1268         if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1269             d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1270                 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1271                 is_src = false;
1272         } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1273                 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1274                 is_src = true;
1275         } else {
1276                 dev_err(&d40c->chan.dev->device,
1277                         "[%s] Unknown direction\n", __func__);
1278                 return -EINVAL;
1279         }
1280
1281         res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1282         if (res) {
1283                 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
1284                         __func__);
1285                 return res;
1286         }
1287
1288         if (d40c->log_num != D40_PHY_CHAN) {
1289                 /* Release logical channel, deactivate the event line */
1290
1291                 d40_config_set_event(d40c, false);
1292                 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1293
1294                 /*
1295                  * Check if there are more logical allocation
1296                  * on this phy channel.
1297                  */
1298                 if (!d40_alloc_mask_free(phy, is_src, event)) {
1299                         /* Resume the other logical channels if any */
1300                         if (d40_chan_has_events(d40c)) {
1301                                 res = d40_channel_execute_command(d40c,
1302                                                                   D40_DMA_RUN);
1303                                 if (res) {
1304                                         dev_err(&d40c->chan.dev->device,
1305                                                 "[%s] Executing RUN command\n",
1306                                                 __func__);
1307                                         return res;
1308                                 }
1309                         }
1310                         return 0;
1311                 }
1312         } else {
1313                 (void) d40_alloc_mask_free(phy, is_src, 0);
1314         }
1315
1316         /* Release physical channel */
1317         res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1318         if (res) {
1319                 dev_err(&d40c->chan.dev->device,
1320                         "[%s] Failed to stop channel\n", __func__);
1321                 return res;
1322         }
1323         d40c->phy_chan = NULL;
1324         /* Invalidate channel type */
1325         d40c->dma_cfg.channel_type = 0;
1326         d40c->base->lookup_phy_chans[phy->num] = NULL;
1327
1328         return 0;
1329 }
1330
1331 static int d40_pause(struct dma_chan *chan)
1332 {
1333         struct d40_chan *d40c =
1334                 container_of(chan, struct d40_chan, chan);
1335         int res;
1336         unsigned long flags;
1337
1338         spin_lock_irqsave(&d40c->lock, flags);
1339
1340         res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1341         if (res == 0) {
1342                 if (d40c->log_num != D40_PHY_CHAN) {
1343                         d40_config_set_event(d40c, false);
1344                         /* Resume the other logical channels if any */
1345                         if (d40_chan_has_events(d40c))
1346                                 res = d40_channel_execute_command(d40c,
1347                                                                   D40_DMA_RUN);
1348                 }
1349         }
1350
1351         spin_unlock_irqrestore(&d40c->lock, flags);
1352         return res;
1353 }
1354
1355 static bool d40_is_paused(struct d40_chan *d40c)
1356 {
1357         bool is_paused = false;
1358         unsigned long flags;
1359         void __iomem *active_reg;
1360         u32 status;
1361         u32 event;
1362
1363         spin_lock_irqsave(&d40c->lock, flags);
1364
1365         if (d40c->log_num == D40_PHY_CHAN) {
1366                 if (d40c->phy_chan->num % 2 == 0)
1367                         active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1368                 else
1369                         active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1370
1371                 status = (readl(active_reg) &
1372                           D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1373                         D40_CHAN_POS(d40c->phy_chan->num);
1374                 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1375                         is_paused = true;
1376
1377                 goto _exit;
1378         }
1379
1380         if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1381             d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
1382                 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1383         else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1384                 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1385         else {
1386                 dev_err(&d40c->chan.dev->device,
1387                         "[%s] Unknown direction\n", __func__);
1388                 goto _exit;
1389         }
1390         status = d40_chan_has_events(d40c);
1391         status = (status & D40_EVENTLINE_MASK(event)) >>
1392                 D40_EVENTLINE_POS(event);
1393
1394         if (status != D40_DMA_RUN)
1395                 is_paused = true;
1396 _exit:
1397         spin_unlock_irqrestore(&d40c->lock, flags);
1398         return is_paused;
1399
1400 }
1401
1402
1403 static bool d40_tx_is_linked(struct d40_chan *d40c)
1404 {
1405         bool is_link;
1406
1407         if (d40c->log_num != D40_PHY_CHAN)
1408                 is_link = readl(&d40c->lcpa->lcsp3) &  D40_MEM_LCSP3_DLOS_MASK;
1409         else
1410                 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1411                                 d40c->phy_chan->num * D40_DREG_PCDELTA +
1412                                 D40_CHAN_REG_SDLNK) &
1413                         D40_SREG_LNK_PHYS_LNK_MASK;
1414         return is_link;
1415 }
1416
1417 static u32 d40_residue(struct d40_chan *d40c)
1418 {
1419         u32 num_elt;
1420
1421         if (d40c->log_num != D40_PHY_CHAN)
1422                 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1423                         >> D40_MEM_LCSP2_ECNT_POS;
1424         else
1425                 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
1426                                  d40c->phy_chan->num * D40_DREG_PCDELTA +
1427                                  D40_CHAN_REG_SDELT) &
1428                            D40_SREG_ELEM_PHY_ECNT_MASK) >>
1429                         D40_SREG_ELEM_PHY_ECNT_POS;
1430         return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1431 }
1432
1433 static int d40_resume(struct dma_chan *chan)
1434 {
1435         struct d40_chan *d40c =
1436                 container_of(chan, struct d40_chan, chan);
1437         int res = 0;
1438         unsigned long flags;
1439
1440         spin_lock_irqsave(&d40c->lock, flags);
1441
1442         if (d40c->base->rev == 0)
1443                 if (d40c->log_num != D40_PHY_CHAN) {
1444                         res = d40_channel_execute_command(d40c,
1445                                                           D40_DMA_SUSPEND_REQ);
1446                         goto no_suspend;
1447                 }
1448
1449         /* If bytes left to transfer or linked tx resume job */
1450         if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1451                 if (d40c->log_num != D40_PHY_CHAN)
1452                         d40_config_set_event(d40c, true);
1453                 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1454         }
1455
1456 no_suspend:
1457         spin_unlock_irqrestore(&d40c->lock, flags);
1458         return res;
1459 }
1460
1461 static u32 stedma40_residue(struct dma_chan *chan)
1462 {
1463         struct d40_chan *d40c =
1464                 container_of(chan, struct d40_chan, chan);
1465         u32 bytes_left;
1466         unsigned long flags;
1467
1468         spin_lock_irqsave(&d40c->lock, flags);
1469         bytes_left = d40_residue(d40c);
1470         spin_unlock_irqrestore(&d40c->lock, flags);
1471
1472         return bytes_left;
1473 }
1474
1475 /* Public DMA functions in addition to the DMA engine framework */
1476
1477 int stedma40_set_psize(struct dma_chan *chan,
1478                        int src_psize,
1479                        int dst_psize)
1480 {
1481         struct d40_chan *d40c =
1482                 container_of(chan, struct d40_chan, chan);
1483         unsigned long flags;
1484
1485         spin_lock_irqsave(&d40c->lock, flags);
1486
1487         if (d40c->log_num != D40_PHY_CHAN) {
1488                 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1489                 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1490                 d40c->log_def.lcsp1 |= src_psize <<
1491                         D40_MEM_LCSP1_SCFG_PSIZE_POS;
1492                 d40c->log_def.lcsp3 |= dst_psize <<
1493                         D40_MEM_LCSP1_SCFG_PSIZE_POS;
1494                 goto out;
1495         }
1496
1497         if (src_psize == STEDMA40_PSIZE_PHY_1)
1498                 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1499         else {
1500                 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1501                 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1502                                        D40_SREG_CFG_PSIZE_POS);
1503                 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1504         }
1505
1506         if (dst_psize == STEDMA40_PSIZE_PHY_1)
1507                 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1508         else {
1509                 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1510                 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1511                                        D40_SREG_CFG_PSIZE_POS);
1512                 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1513         }
1514 out:
1515         spin_unlock_irqrestore(&d40c->lock, flags);
1516         return 0;
1517 }
1518 EXPORT_SYMBOL(stedma40_set_psize);
1519
1520 struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1521                                                    struct scatterlist *sgl_dst,
1522                                                    struct scatterlist *sgl_src,
1523                                                    unsigned int sgl_len,
1524                                                    unsigned long dma_flags)
1525 {
1526         int res;
1527         struct d40_desc *d40d;
1528         struct d40_chan *d40c = container_of(chan, struct d40_chan,
1529                                              chan);
1530         unsigned long flags;
1531
1532         if (d40c->phy_chan == NULL) {
1533                 dev_err(&d40c->chan.dev->device,
1534                         "[%s] Unallocated channel.\n", __func__);
1535                 return ERR_PTR(-EINVAL);
1536         }
1537
1538         spin_lock_irqsave(&d40c->lock, flags);
1539         d40d = d40_desc_get(d40c);
1540
1541         if (d40d == NULL)
1542                 goto err;
1543
1544         d40d->lli_len = sgl_len;
1545         d40d->lli_tx_len = d40d->lli_len;
1546         d40d->txd.flags = dma_flags;
1547
1548         if (d40c->log_num != D40_PHY_CHAN) {
1549                 if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
1550                         d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1551
1552                 if (sgl_len > 1)
1553                         /*
1554                          * Check if there is space available in lcla. If not,
1555                          * split list into 1-length and run only in lcpa
1556                          * space.
1557                          */
1558                         if (d40_lcla_id_get(d40c) != 0)
1559                                 d40d->lli_tx_len = 1;
1560
1561                 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1562                         dev_err(&d40c->chan.dev->device,
1563                                 "[%s] Out of memory\n", __func__);
1564                         goto err;
1565                 }
1566
1567                 (void) d40_log_sg_to_lli(d40c->lcla.src_id,
1568                                          sgl_src,
1569                                          sgl_len,
1570                                          d40d->lli_log.src,
1571                                          d40c->log_def.lcsp1,
1572                                          d40c->dma_cfg.src_info.data_width,
1573                                          dma_flags & DMA_PREP_INTERRUPT,
1574                                          d40d->lli_tx_len,
1575                                          d40c->base->plat_data->llis_per_log);
1576
1577                 (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
1578                                          sgl_dst,
1579                                          sgl_len,
1580                                          d40d->lli_log.dst,
1581                                          d40c->log_def.lcsp3,
1582                                          d40c->dma_cfg.dst_info.data_width,
1583                                          dma_flags & DMA_PREP_INTERRUPT,
1584                                          d40d->lli_tx_len,
1585                                          d40c->base->plat_data->llis_per_log);
1586
1587
1588         } else {
1589                 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1590                         dev_err(&d40c->chan.dev->device,
1591                                 "[%s] Out of memory\n", __func__);
1592                         goto err;
1593                 }
1594
1595                 res = d40_phy_sg_to_lli(sgl_src,
1596                                         sgl_len,
1597                                         0,
1598                                         d40d->lli_phy.src,
1599                                         d40d->lli_phy.src_addr,
1600                                         d40c->src_def_cfg,
1601                                         d40c->dma_cfg.src_info.data_width,
1602                                         d40c->dma_cfg.src_info.psize,
1603                                         true);
1604
1605                 if (res < 0)
1606                         goto err;
1607
1608                 res = d40_phy_sg_to_lli(sgl_dst,
1609                                         sgl_len,
1610                                         0,
1611                                         d40d->lli_phy.dst,
1612                                         d40d->lli_phy.dst_addr,
1613                                         d40c->dst_def_cfg,
1614                                         d40c->dma_cfg.dst_info.data_width,
1615                                         d40c->dma_cfg.dst_info.psize,
1616                                         true);
1617
1618                 if (res < 0)
1619                         goto err;
1620
1621                 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1622                                       d40d->lli_pool.size, DMA_TO_DEVICE);
1623         }
1624
1625         dma_async_tx_descriptor_init(&d40d->txd, chan);
1626
1627         d40d->txd.tx_submit = d40_tx_submit;
1628
1629         spin_unlock_irqrestore(&d40c->lock, flags);
1630
1631         return &d40d->txd;
1632 err:
1633         spin_unlock_irqrestore(&d40c->lock, flags);
1634         return NULL;
1635 }
1636 EXPORT_SYMBOL(stedma40_memcpy_sg);
1637
1638 bool stedma40_filter(struct dma_chan *chan, void *data)
1639 {
1640         struct stedma40_chan_cfg *info = data;
1641         struct d40_chan *d40c =
1642                 container_of(chan, struct d40_chan, chan);
1643         int err;
1644
1645         if (data) {
1646                 err = d40_validate_conf(d40c, info);
1647                 if (!err)
1648                         d40c->dma_cfg = *info;
1649         } else
1650                 err = d40_config_memcpy(d40c);
1651
1652         return err == 0;
1653 }
1654 EXPORT_SYMBOL(stedma40_filter);
1655
1656 /* DMA ENGINE functions */
1657 static int d40_alloc_chan_resources(struct dma_chan *chan)
1658 {
1659         int err;
1660         unsigned long flags;
1661         struct d40_chan *d40c =
1662                 container_of(chan, struct d40_chan, chan);
1663         bool is_free_phy;
1664         spin_lock_irqsave(&d40c->lock, flags);
1665
1666         d40c->completed = chan->cookie = 1;
1667
1668         /*
1669          * If no dma configuration is set (channel_type == 0)
1670          * use default configuration (memcpy)
1671          */
1672         if (d40c->dma_cfg.channel_type == 0) {
1673                 err = d40_config_memcpy(d40c);
1674                 if (err) {
1675                         dev_err(&d40c->chan.dev->device,
1676                                 "[%s] Failed to configure memcpy channel\n",
1677                                 __func__);
1678                         goto fail;
1679                 }
1680         }
1681         is_free_phy = (d40c->phy_chan == NULL);
1682
1683         err = d40_allocate_channel(d40c);
1684         if (err) {
1685                 dev_err(&d40c->chan.dev->device,
1686                         "[%s] Failed to allocate channel\n", __func__);
1687                 goto fail;
1688         }
1689
1690         /* Fill in basic CFG register values */
1691         d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1692                     &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1693
1694         if (d40c->log_num != D40_PHY_CHAN) {
1695                 d40_log_cfg(&d40c->dma_cfg,
1696                             &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1697
1698                 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1699                         d40c->lcpa = d40c->base->lcpa_base +
1700                           d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1701                 else
1702                         d40c->lcpa = d40c->base->lcpa_base +
1703                           d40c->dma_cfg.dst_dev_type *
1704                           D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1705         }
1706
1707         /*
1708          * Only write channel configuration to the DMA if the physical
1709          * resource is free. In case of multiple logical channels
1710          * on the same physical resource, only the first write is necessary.
1711          */
1712         if (is_free_phy) {
1713                 err = d40_config_write(d40c);
1714                 if (err) {
1715                         dev_err(&d40c->chan.dev->device,
1716                                 "[%s] Failed to configure channel\n",
1717                                 __func__);
1718                 }
1719         }
1720 fail:
1721         spin_unlock_irqrestore(&d40c->lock, flags);
1722         return err;
1723 }
1724
1725 static void d40_free_chan_resources(struct dma_chan *chan)
1726 {
1727         struct d40_chan *d40c =
1728                 container_of(chan, struct d40_chan, chan);
1729         int err;
1730         unsigned long flags;
1731
1732         if (d40c->phy_chan == NULL) {
1733                 dev_err(&d40c->chan.dev->device,
1734                         "[%s] Cannot free unallocated channel\n", __func__);
1735                 return;
1736         }
1737
1738
1739         spin_lock_irqsave(&d40c->lock, flags);
1740
1741         err = d40_free_dma(d40c);
1742
1743         if (err)
1744                 dev_err(&d40c->chan.dev->device,
1745                         "[%s] Failed to free channel\n", __func__);
1746         spin_unlock_irqrestore(&d40c->lock, flags);
1747 }
1748
1749 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1750                                                        dma_addr_t dst,
1751                                                        dma_addr_t src,
1752                                                        size_t size,
1753                                                        unsigned long dma_flags)
1754 {
1755         struct d40_desc *d40d;
1756         struct d40_chan *d40c = container_of(chan, struct d40_chan,
1757                                              chan);
1758         unsigned long flags;
1759         int err = 0;
1760
1761         if (d40c->phy_chan == NULL) {
1762                 dev_err(&d40c->chan.dev->device,
1763                         "[%s] Channel is not allocated.\n", __func__);
1764                 return ERR_PTR(-EINVAL);
1765         }
1766
1767         spin_lock_irqsave(&d40c->lock, flags);
1768         d40d = d40_desc_get(d40c);
1769
1770         if (d40d == NULL) {
1771                 dev_err(&d40c->chan.dev->device,
1772                         "[%s] Descriptor is NULL\n", __func__);
1773                 goto err;
1774         }
1775
1776         d40d->txd.flags = dma_flags;
1777
1778         dma_async_tx_descriptor_init(&d40d->txd, chan);
1779
1780         d40d->txd.tx_submit = d40_tx_submit;
1781
1782         if (d40c->log_num != D40_PHY_CHAN) {
1783
1784                 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1785                         dev_err(&d40c->chan.dev->device,
1786                                 "[%s] Out of memory\n", __func__);
1787                         goto err;
1788                 }
1789                 d40d->lli_len = 1;
1790                 d40d->lli_tx_len = 1;
1791
1792                 d40_log_fill_lli(d40d->lli_log.src,
1793                                  src,
1794                                  size,
1795                                  0,
1796                                  d40c->log_def.lcsp1,
1797                                  d40c->dma_cfg.src_info.data_width,
1798                                  false, true);
1799
1800                 d40_log_fill_lli(d40d->lli_log.dst,
1801                                  dst,
1802                                  size,
1803                                  0,
1804                                  d40c->log_def.lcsp3,
1805                                  d40c->dma_cfg.dst_info.data_width,
1806                                  true, true);
1807
1808         } else {
1809
1810                 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1811                         dev_err(&d40c->chan.dev->device,
1812                                 "[%s] Out of memory\n", __func__);
1813                         goto err;
1814                 }
1815
1816                 err = d40_phy_fill_lli(d40d->lli_phy.src,
1817                                        src,
1818                                        size,
1819                                        d40c->dma_cfg.src_info.psize,
1820                                        0,
1821                                        d40c->src_def_cfg,
1822                                        true,
1823                                        d40c->dma_cfg.src_info.data_width,
1824                                        false);
1825                 if (err)
1826                         goto err_fill_lli;
1827
1828                 err = d40_phy_fill_lli(d40d->lli_phy.dst,
1829                                        dst,
1830                                        size,
1831                                        d40c->dma_cfg.dst_info.psize,
1832                                        0,
1833                                        d40c->dst_def_cfg,
1834                                        true,
1835                                        d40c->dma_cfg.dst_info.data_width,
1836                                        false);
1837
1838                 if (err)
1839                         goto err_fill_lli;
1840
1841                 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1842                                       d40d->lli_pool.size, DMA_TO_DEVICE);
1843         }
1844
1845         spin_unlock_irqrestore(&d40c->lock, flags);
1846         return &d40d->txd;
1847
1848 err_fill_lli:
1849         dev_err(&d40c->chan.dev->device,
1850                 "[%s] Failed filling in PHY LLI\n", __func__);
1851         d40_pool_lli_free(d40d);
1852 err:
1853         spin_unlock_irqrestore(&d40c->lock, flags);
1854         return NULL;
1855 }
1856
1857 static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1858                                  struct d40_chan *d40c,
1859                                  struct scatterlist *sgl,
1860                                  unsigned int sg_len,
1861                                  enum dma_data_direction direction,
1862                                  unsigned long dma_flags)
1863 {
1864         dma_addr_t dev_addr = 0;
1865         int total_size;
1866
1867         if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1868                 dev_err(&d40c->chan.dev->device,
1869                         "[%s] Out of memory\n", __func__);
1870                 return -ENOMEM;
1871         }
1872
1873         d40d->lli_len = sg_len;
1874         if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
1875                 d40d->lli_tx_len = d40d->lli_len;
1876         else
1877                 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1878
1879         if (sg_len > 1)
1880                 /*
1881                  * Check if there is space available in lcla.
1882                  * If not, split list into 1-length and run only
1883                  * in lcpa space.
1884                  */
1885                 if (d40_lcla_id_get(d40c) != 0)
1886                         d40d->lli_tx_len = 1;
1887
1888         if (direction == DMA_FROM_DEVICE)
1889                 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1890         else if (direction == DMA_TO_DEVICE)
1891                 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1892         else
1893                 return -EINVAL;
1894
1895         total_size = d40_log_sg_to_dev(&d40c->lcla,
1896                                        sgl, sg_len,
1897                                        &d40d->lli_log,
1898                                        &d40c->log_def,
1899                                        d40c->dma_cfg.src_info.data_width,
1900                                        d40c->dma_cfg.dst_info.data_width,
1901                                        direction,
1902                                        dma_flags & DMA_PREP_INTERRUPT,
1903                                        dev_addr, d40d->lli_tx_len,
1904                                        d40c->base->plat_data->llis_per_log);
1905
1906         if (total_size < 0)
1907                 return -EINVAL;
1908
1909         return 0;
1910 }
1911
1912 static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1913                                  struct d40_chan *d40c,
1914                                  struct scatterlist *sgl,
1915                                  unsigned int sgl_len,
1916                                  enum dma_data_direction direction,
1917                                  unsigned long dma_flags)
1918 {
1919         dma_addr_t src_dev_addr;
1920         dma_addr_t dst_dev_addr;
1921         int res;
1922
1923         if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1924                 dev_err(&d40c->chan.dev->device,
1925                         "[%s] Out of memory\n", __func__);
1926                 return -ENOMEM;
1927         }
1928
1929         d40d->lli_len = sgl_len;
1930         d40d->lli_tx_len = sgl_len;
1931
1932         if (direction == DMA_FROM_DEVICE) {
1933                 dst_dev_addr = 0;
1934                 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1935         } else if (direction == DMA_TO_DEVICE) {
1936                 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1937                 src_dev_addr = 0;
1938         } else
1939                 return -EINVAL;
1940
1941         res = d40_phy_sg_to_lli(sgl,
1942                                 sgl_len,
1943                                 src_dev_addr,
1944                                 d40d->lli_phy.src,
1945                                 d40d->lli_phy.src_addr,
1946                                 d40c->src_def_cfg,
1947                                 d40c->dma_cfg.src_info.data_width,
1948                                 d40c->dma_cfg.src_info.psize,
1949                                 true);
1950         if (res < 0)
1951                 return res;
1952
1953         res = d40_phy_sg_to_lli(sgl,
1954                                 sgl_len,
1955                                 dst_dev_addr,
1956                                 d40d->lli_phy.dst,
1957                                 d40d->lli_phy.dst_addr,
1958                                 d40c->dst_def_cfg,
1959                                 d40c->dma_cfg.dst_info.data_width,
1960                                 d40c->dma_cfg.dst_info.psize,
1961                                  true);
1962         if (res < 0)
1963                 return res;
1964
1965         (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1966                               d40d->lli_pool.size, DMA_TO_DEVICE);
1967         return 0;
1968 }
1969
1970 static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
1971                                                          struct scatterlist *sgl,
1972                                                          unsigned int sg_len,
1973                                                          enum dma_data_direction direction,
1974                                                          unsigned long dma_flags)
1975 {
1976         struct d40_desc *d40d;
1977         struct d40_chan *d40c = container_of(chan, struct d40_chan,
1978                                              chan);
1979         unsigned long flags;
1980         int err;
1981
1982         if (d40c->phy_chan == NULL) {
1983                 dev_err(&d40c->chan.dev->device,
1984                         "[%s] Cannot prepare unallocated channel\n", __func__);
1985                 return ERR_PTR(-EINVAL);
1986         }
1987
1988         if (d40c->dma_cfg.pre_transfer)
1989                 d40c->dma_cfg.pre_transfer(chan,
1990                                            d40c->dma_cfg.pre_transfer_data,
1991                                            sg_dma_len(sgl));
1992
1993         spin_lock_irqsave(&d40c->lock, flags);
1994         d40d = d40_desc_get(d40c);
1995         spin_unlock_irqrestore(&d40c->lock, flags);
1996
1997         if (d40d == NULL)
1998                 return NULL;
1999
2000         if (d40c->log_num != D40_PHY_CHAN)
2001                 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
2002                                             direction, dma_flags);
2003         else
2004                 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
2005                                             direction, dma_flags);
2006         if (err) {
2007                 dev_err(&d40c->chan.dev->device,
2008                         "[%s] Failed to prepare %s slave sg job: %d\n",
2009                         __func__,
2010                         d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
2011                 return NULL;
2012         }
2013
2014         d40d->txd.flags = dma_flags;
2015
2016         dma_async_tx_descriptor_init(&d40d->txd, chan);
2017
2018         d40d->txd.tx_submit = d40_tx_submit;
2019
2020         return &d40d->txd;
2021 }
2022
2023 static enum dma_status d40_tx_status(struct dma_chan *chan,
2024                                      dma_cookie_t cookie,
2025                                      struct dma_tx_state *txstate)
2026 {
2027         struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2028         dma_cookie_t last_used;
2029         dma_cookie_t last_complete;
2030         int ret;
2031
2032         if (d40c->phy_chan == NULL) {
2033                 dev_err(&d40c->chan.dev->device,
2034                         "[%s] Cannot read status of unallocated channel\n",
2035                         __func__);
2036                 return -EINVAL;
2037         }
2038
2039         last_complete = d40c->completed;
2040         last_used = chan->cookie;
2041
2042         if (d40_is_paused(d40c))
2043                 ret = DMA_PAUSED;
2044         else
2045                 ret = dma_async_is_complete(cookie, last_complete, last_used);
2046
2047         dma_set_tx_state(txstate, last_complete, last_used,
2048                          stedma40_residue(chan));
2049
2050         return ret;
2051 }
2052
2053 static void d40_issue_pending(struct dma_chan *chan)
2054 {
2055         struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2056         unsigned long flags;
2057
2058         if (d40c->phy_chan == NULL) {
2059                 dev_err(&d40c->chan.dev->device,
2060                         "[%s] Channel is not allocated!\n", __func__);
2061                 return;
2062         }
2063
2064         spin_lock_irqsave(&d40c->lock, flags);
2065
2066         /* Busy means that pending jobs are already being processed */
2067         if (!d40c->busy)
2068                 (void) d40_queue_start(d40c);
2069
2070         spin_unlock_irqrestore(&d40c->lock, flags);
2071 }
2072
2073 static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2074                        unsigned long arg)
2075 {
2076         unsigned long flags;
2077         struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2078
2079         if (d40c->phy_chan == NULL) {
2080                 dev_err(&d40c->chan.dev->device,
2081                         "[%s] Channel is not allocated!\n", __func__);
2082                 return -EINVAL;
2083         }
2084
2085         switch (cmd) {
2086         case DMA_TERMINATE_ALL:
2087                 spin_lock_irqsave(&d40c->lock, flags);
2088                 d40_term_all(d40c);
2089                 spin_unlock_irqrestore(&d40c->lock, flags);
2090                 return 0;
2091         case DMA_PAUSE:
2092                 return d40_pause(chan);
2093         case DMA_RESUME:
2094                 return d40_resume(chan);
2095         }
2096
2097         /* Other commands are unimplemented */
2098         return -ENXIO;
2099 }
2100
2101 /* Initialization functions */
2102
2103 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2104                                  struct d40_chan *chans, int offset,
2105                                  int num_chans)
2106 {
2107         int i = 0;
2108         struct d40_chan *d40c;
2109
2110         INIT_LIST_HEAD(&dma->channels);
2111
2112         for (i = offset; i < offset + num_chans; i++) {
2113                 d40c = &chans[i];
2114                 d40c->base = base;
2115                 d40c->chan.device = dma;
2116
2117                 /* Invalidate lcla element */
2118                 d40c->lcla.src_id = -1;
2119                 d40c->lcla.dst_id = -1;
2120
2121                 spin_lock_init(&d40c->lock);
2122
2123                 d40c->log_num = D40_PHY_CHAN;
2124
2125                 INIT_LIST_HEAD(&d40c->active);
2126                 INIT_LIST_HEAD(&d40c->queue);
2127                 INIT_LIST_HEAD(&d40c->client);
2128
2129                 tasklet_init(&d40c->tasklet, dma_tasklet,
2130                              (unsigned long) d40c);
2131
2132                 list_add_tail(&d40c->chan.device_node,
2133                               &dma->channels);
2134         }
2135 }
2136
2137 static int __init d40_dmaengine_init(struct d40_base *base,
2138                                      int num_reserved_chans)
2139 {
2140         int err ;
2141
2142         d40_chan_init(base, &base->dma_slave, base->log_chans,
2143                       0, base->num_log_chans);
2144
2145         dma_cap_zero(base->dma_slave.cap_mask);
2146         dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2147
2148         base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2149         base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2150         base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2151         base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2152         base->dma_slave.device_tx_status = d40_tx_status;
2153         base->dma_slave.device_issue_pending = d40_issue_pending;
2154         base->dma_slave.device_control = d40_control;
2155         base->dma_slave.dev = base->dev;
2156
2157         err = dma_async_device_register(&base->dma_slave);
2158
2159         if (err) {
2160                 dev_err(base->dev,
2161                         "[%s] Failed to register slave channels\n",
2162                         __func__);
2163                 goto failure1;
2164         }
2165
2166         d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2167                       base->num_log_chans, base->plat_data->memcpy_len);
2168
2169         dma_cap_zero(base->dma_memcpy.cap_mask);
2170         dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2171
2172         base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2173         base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2174         base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2175         base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2176         base->dma_memcpy.device_tx_status = d40_tx_status;
2177         base->dma_memcpy.device_issue_pending = d40_issue_pending;
2178         base->dma_memcpy.device_control = d40_control;
2179         base->dma_memcpy.dev = base->dev;
2180         /*
2181          * This controller can only access address at even
2182          * 32bit boundaries, i.e. 2^2
2183          */
2184         base->dma_memcpy.copy_align = 2;
2185
2186         err = dma_async_device_register(&base->dma_memcpy);
2187
2188         if (err) {
2189                 dev_err(base->dev,
2190                         "[%s] Failed to regsiter memcpy only channels\n",
2191                         __func__);
2192                 goto failure2;
2193         }
2194
2195         d40_chan_init(base, &base->dma_both, base->phy_chans,
2196                       0, num_reserved_chans);
2197
2198         dma_cap_zero(base->dma_both.cap_mask);
2199         dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2200         dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2201
2202         base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2203         base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2204         base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2205         base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2206         base->dma_both.device_tx_status = d40_tx_status;
2207         base->dma_both.device_issue_pending = d40_issue_pending;
2208         base->dma_both.device_control = d40_control;
2209         base->dma_both.dev = base->dev;
2210         base->dma_both.copy_align = 2;
2211         err = dma_async_device_register(&base->dma_both);
2212
2213         if (err) {
2214                 dev_err(base->dev,
2215                         "[%s] Failed to register logical and physical capable channels\n",
2216                         __func__);
2217                 goto failure3;
2218         }
2219         return 0;
2220 failure3:
2221         dma_async_device_unregister(&base->dma_memcpy);
2222 failure2:
2223         dma_async_device_unregister(&base->dma_slave);
2224 failure1:
2225         return err;
2226 }
2227
2228 /* Initialization functions. */
2229
2230 static int __init d40_phy_res_init(struct d40_base *base)
2231 {
2232         int i;
2233         int num_phy_chans_avail = 0;
2234         u32 val[2];
2235         int odd_even_bit = -2;
2236
2237         val[0] = readl(base->virtbase + D40_DREG_PRSME);
2238         val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2239
2240         for (i = 0; i < base->num_phy_chans; i++) {
2241                 base->phy_res[i].num = i;
2242                 odd_even_bit += 2 * ((i % 2) == 0);
2243                 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2244                         /* Mark security only channels as occupied */
2245                         base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2246                         base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2247                 } else {
2248                         base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2249                         base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2250                         num_phy_chans_avail++;
2251                 }
2252                 spin_lock_init(&base->phy_res[i].lock);
2253         }
2254
2255         /* Mark disabled channels as occupied */
2256         for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
2257                         base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2258                         base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2259                         num_phy_chans_avail--;
2260         }
2261
2262         dev_info(base->dev, "%d of %d physical DMA channels available\n",
2263                  num_phy_chans_avail, base->num_phy_chans);
2264
2265         /* Verify settings extended vs standard */
2266         val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2267
2268         for (i = 0; i < base->num_phy_chans; i++) {
2269
2270                 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2271                     (val[0] & 0x3) != 1)
2272                         dev_info(base->dev,
2273                                  "[%s] INFO: channel %d is misconfigured (%d)\n",
2274                                  __func__, i, val[0] & 0x3);
2275
2276                 val[0] = val[0] >> 2;
2277         }
2278
2279         return num_phy_chans_avail;
2280 }
2281
2282 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2283 {
2284         static const struct d40_reg_val dma_id_regs[] = {
2285                 /* Peripheral Id */
2286                 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2287                 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2288                 /*
2289                  * D40_DREG_PERIPHID2 Depends on HW revision:
2290                  *  MOP500/HREF ED has 0x0008,
2291                  *  ? has 0x0018,
2292                  *  HREF V1 has 0x0028
2293                  */
2294                 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2295
2296                 /* PCell Id */
2297                 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2298                 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2299                 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2300                 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2301         };
2302         struct stedma40_platform_data *plat_data;
2303         struct clk *clk = NULL;
2304         void __iomem *virtbase = NULL;
2305         struct resource *res = NULL;
2306         struct d40_base *base = NULL;
2307         int num_log_chans = 0;
2308         int num_phy_chans;
2309         int i;
2310         u32 val;
2311
2312         clk = clk_get(&pdev->dev, NULL);
2313
2314         if (IS_ERR(clk)) {
2315                 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2316                         __func__);
2317                 goto failure;
2318         }
2319
2320         clk_enable(clk);
2321
2322         /* Get IO for DMAC base address */
2323         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2324         if (!res)
2325                 goto failure;
2326
2327         if (request_mem_region(res->start, resource_size(res),
2328                                D40_NAME " I/O base") == NULL)
2329                 goto failure;
2330
2331         virtbase = ioremap(res->start, resource_size(res));
2332         if (!virtbase)
2333                 goto failure;
2334
2335         /* HW version check */
2336         for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2337                 if (dma_id_regs[i].val !=
2338                     readl(virtbase + dma_id_regs[i].reg)) {
2339                         dev_err(&pdev->dev,
2340                                 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2341                                 __func__,
2342                                 dma_id_regs[i].val,
2343                                 dma_id_regs[i].reg,
2344                                 readl(virtbase + dma_id_regs[i].reg));
2345                         goto failure;
2346                 }
2347         }
2348
2349         /* Get silicon revision */
2350         val = readl(virtbase + D40_DREG_PERIPHID2);
2351
2352         if ((val & 0xf) != D40_PERIPHID2_DESIGNER) {
2353                 dev_err(&pdev->dev,
2354                         "[%s] Unknown designer! Got %x wanted %x\n",
2355                         __func__, val & 0xf, D40_PERIPHID2_DESIGNER);
2356                 goto failure;
2357         }
2358
2359         /* The number of physical channels on this HW */
2360         num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2361
2362         dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2363                  (val >> 4) & 0xf, res->start);
2364
2365         plat_data = pdev->dev.platform_data;
2366
2367         /* Count the number of logical channels in use */
2368         for (i = 0; i < plat_data->dev_len; i++)
2369                 if (plat_data->dev_rx[i] != 0)
2370                         num_log_chans++;
2371
2372         for (i = 0; i < plat_data->dev_len; i++)
2373                 if (plat_data->dev_tx[i] != 0)
2374                         num_log_chans++;
2375
2376         base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2377                        (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2378                        sizeof(struct d40_chan), GFP_KERNEL);
2379
2380         if (base == NULL) {
2381                 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2382                 goto failure;
2383         }
2384
2385         base->rev = (val >> 4) & 0xf;
2386         base->clk = clk;
2387         base->num_phy_chans = num_phy_chans;
2388         base->num_log_chans = num_log_chans;
2389         base->phy_start = res->start;
2390         base->phy_size = resource_size(res);
2391         base->virtbase = virtbase;
2392         base->plat_data = plat_data;
2393         base->dev = &pdev->dev;
2394         base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2395         base->log_chans = &base->phy_chans[num_phy_chans];
2396
2397         base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2398                                 GFP_KERNEL);
2399         if (!base->phy_res)
2400                 goto failure;
2401
2402         base->lookup_phy_chans = kzalloc(num_phy_chans *
2403                                          sizeof(struct d40_chan *),
2404                                          GFP_KERNEL);
2405         if (!base->lookup_phy_chans)
2406                 goto failure;
2407
2408         if (num_log_chans + plat_data->memcpy_len) {
2409                 /*
2410                  * The max number of logical channels are event lines for all
2411                  * src devices and dst devices
2412                  */
2413                 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2414                                                  sizeof(struct d40_chan *),
2415                                                  GFP_KERNEL);
2416                 if (!base->lookup_log_chans)
2417                         goto failure;
2418         }
2419         base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
2420                                             GFP_KERNEL);
2421         if (!base->lcla_pool.alloc_map)
2422                 goto failure;
2423
2424         base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2425                                             0, SLAB_HWCACHE_ALIGN,
2426                                             NULL);
2427         if (base->desc_slab == NULL)
2428                 goto failure;
2429
2430         return base;
2431
2432 failure:
2433         if (clk) {
2434                 clk_disable(clk);
2435                 clk_put(clk);
2436         }
2437         if (virtbase)
2438                 iounmap(virtbase);
2439         if (res)
2440                 release_mem_region(res->start,
2441                                    resource_size(res));
2442         if (virtbase)
2443                 iounmap(virtbase);
2444
2445         if (base) {
2446                 kfree(base->lcla_pool.alloc_map);
2447                 kfree(base->lookup_log_chans);
2448                 kfree(base->lookup_phy_chans);
2449                 kfree(base->phy_res);
2450                 kfree(base);
2451         }
2452
2453         return NULL;
2454 }
2455
2456 static void __init d40_hw_init(struct d40_base *base)
2457 {
2458
2459         static const struct d40_reg_val dma_init_reg[] = {
2460                 /* Clock every part of the DMA block from start */
2461                 { .reg = D40_DREG_GCC,    .val = 0x0000ff01},
2462
2463                 /* Interrupts on all logical channels */
2464                 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2465                 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2466                 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2467                 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2468                 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2469                 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2470                 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2471                 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2472                 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2473                 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2474                 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2475                 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2476         };
2477         int i;
2478         u32 prmseo[2] = {0, 0};
2479         u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2480         u32 pcmis = 0;
2481         u32 pcicr = 0;
2482
2483         for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2484                 writel(dma_init_reg[i].val,
2485                        base->virtbase + dma_init_reg[i].reg);
2486
2487         /* Configure all our dma channels to default settings */
2488         for (i = 0; i < base->num_phy_chans; i++) {
2489
2490                 activeo[i % 2] = activeo[i % 2] << 2;
2491
2492                 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2493                     == D40_ALLOC_PHY) {
2494                         activeo[i % 2] |= 3;
2495                         continue;
2496                 }
2497
2498                 /* Enable interrupt # */
2499                 pcmis = (pcmis << 1) | 1;
2500
2501                 /* Clear interrupt # */
2502                 pcicr = (pcicr << 1) | 1;
2503
2504                 /* Set channel to physical mode */
2505                 prmseo[i % 2] = prmseo[i % 2] << 2;
2506                 prmseo[i % 2] |= 1;
2507
2508         }
2509
2510         writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2511         writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2512         writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2513         writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2514
2515         /* Write which interrupt to enable */
2516         writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2517
2518         /* Write which interrupt to clear */
2519         writel(pcicr, base->virtbase + D40_DREG_PCICR);
2520
2521 }
2522
2523 static int __init d40_lcla_allocate(struct d40_base *base)
2524 {
2525         unsigned long *page_list;
2526         int i, j;
2527         int ret = 0;
2528
2529         /*
2530          * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2531          * To full fill this hardware requirement without wasting 256 kb
2532          * we allocate pages until we get an aligned one.
2533          */
2534         page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
2535                             GFP_KERNEL);
2536
2537         if (!page_list) {
2538                 ret = -ENOMEM;
2539                 goto failure;
2540         }
2541
2542         /* Calculating how many pages that are required */
2543         base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
2544
2545         for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
2546                 page_list[i] = __get_free_pages(GFP_KERNEL,
2547                                                 base->lcla_pool.pages);
2548                 if (!page_list[i]) {
2549
2550                         dev_err(base->dev,
2551                                 "[%s] Failed to allocate %d pages.\n",
2552                                 __func__, base->lcla_pool.pages);
2553
2554                         for (j = 0; j < i; j++)
2555                                 free_pages(page_list[j], base->lcla_pool.pages);
2556                         goto failure;
2557                 }
2558
2559                 if ((virt_to_phys((void *)page_list[i]) &
2560                      (LCLA_ALIGNMENT - 1)) == 0)
2561                         break;
2562         }
2563
2564         for (j = 0; j < i; j++)
2565                 free_pages(page_list[j], base->lcla_pool.pages);
2566
2567         if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
2568                 base->lcla_pool.base = (void *)page_list[i];
2569         } else {
2570                 /* After many attempts, no succees with finding the correct
2571                  * alignment try with allocating a big buffer */
2572                 dev_warn(base->dev,
2573                          "[%s] Failed to get %d pages @ 18 bit align.\n",
2574                          __func__, base->lcla_pool.pages);
2575                 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
2576                                                          base->num_phy_chans +
2577                                                          LCLA_ALIGNMENT,
2578                                                          GFP_KERNEL);
2579                 if (!base->lcla_pool.base_unaligned) {
2580                         ret = -ENOMEM;
2581                         goto failure;
2582                 }
2583
2584                 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
2585                                                  LCLA_ALIGNMENT);
2586         }
2587
2588         writel(virt_to_phys(base->lcla_pool.base),
2589                base->virtbase + D40_DREG_LCLA);
2590 failure:
2591         kfree(page_list);
2592         return ret;
2593 }
2594
2595 static int __init d40_probe(struct platform_device *pdev)
2596 {
2597         int err;
2598         int ret = -ENOENT;
2599         struct d40_base *base;
2600         struct resource *res = NULL;
2601         int num_reserved_chans;
2602         u32 val;
2603
2604         base = d40_hw_detect_init(pdev);
2605
2606         if (!base)
2607                 goto failure;
2608
2609         num_reserved_chans = d40_phy_res_init(base);
2610
2611         platform_set_drvdata(pdev, base);
2612
2613         spin_lock_init(&base->interrupt_lock);
2614         spin_lock_init(&base->execmd_lock);
2615
2616         /* Get IO for logical channel parameter address */
2617         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2618         if (!res) {
2619                 ret = -ENOENT;
2620                 dev_err(&pdev->dev,
2621                         "[%s] No \"lcpa\" memory resource\n",
2622                         __func__);
2623                 goto failure;
2624         }
2625         base->lcpa_size = resource_size(res);
2626         base->phy_lcpa = res->start;
2627
2628         if (request_mem_region(res->start, resource_size(res),
2629                                D40_NAME " I/O lcpa") == NULL) {
2630                 ret = -EBUSY;
2631                 dev_err(&pdev->dev,
2632                         "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2633                         __func__, res->start, res->end);
2634                 goto failure;
2635         }
2636
2637         /* We make use of ESRAM memory for this. */
2638         val = readl(base->virtbase + D40_DREG_LCPA);
2639         if (res->start != val && val != 0) {
2640                 dev_warn(&pdev->dev,
2641                          "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2642                          __func__, val, res->start);
2643         } else
2644                 writel(res->start, base->virtbase + D40_DREG_LCPA);
2645
2646         base->lcpa_base = ioremap(res->start, resource_size(res));
2647         if (!base->lcpa_base) {
2648                 ret = -ENOMEM;
2649                 dev_err(&pdev->dev,
2650                         "[%s] Failed to ioremap LCPA region\n",
2651                         __func__);
2652                 goto failure;
2653         }
2654
2655         ret = d40_lcla_allocate(base);
2656         if (ret) {
2657                 dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n",
2658                         __func__);
2659                 goto failure;
2660         }
2661
2662         spin_lock_init(&base->lcla_pool.lock);
2663
2664         base->lcla_pool.num_blocks = base->num_phy_chans;
2665
2666         base->irq = platform_get_irq(pdev, 0);
2667
2668         ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2669
2670         if (ret) {
2671                 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2672                 goto failure;
2673         }
2674
2675         err = d40_dmaengine_init(base, num_reserved_chans);
2676         if (err)
2677                 goto failure;
2678
2679         d40_hw_init(base);
2680
2681         dev_info(base->dev, "initialized\n");
2682         return 0;
2683
2684 failure:
2685         if (base) {
2686                 if (base->desc_slab)
2687                         kmem_cache_destroy(base->desc_slab);
2688                 if (base->virtbase)
2689                         iounmap(base->virtbase);
2690                 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
2691                         free_pages((unsigned long)base->lcla_pool.base,
2692                                    base->lcla_pool.pages);
2693                 if (base->lcla_pool.base_unaligned)
2694                         kfree(base->lcla_pool.base_unaligned);
2695                 if (base->phy_lcpa)
2696                         release_mem_region(base->phy_lcpa,
2697                                            base->lcpa_size);
2698                 if (base->phy_start)
2699                         release_mem_region(base->phy_start,
2700                                            base->phy_size);
2701                 if (base->clk) {
2702                         clk_disable(base->clk);
2703                         clk_put(base->clk);
2704                 }
2705
2706                 kfree(base->lcla_pool.alloc_map);
2707                 kfree(base->lookup_log_chans);
2708                 kfree(base->lookup_phy_chans);
2709                 kfree(base->phy_res);
2710                 kfree(base);
2711         }
2712
2713         dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
2714         return ret;
2715 }
2716
2717 static struct platform_driver d40_driver = {
2718         .driver = {
2719                 .owner = THIS_MODULE,
2720                 .name  = D40_NAME,
2721         },
2722 };
2723
2724 int __init stedma40_init(void)
2725 {
2726         return platform_driver_probe(&d40_driver, d40_probe);
2727 }
2728 arch_initcall(stedma40_init);