OSDN Git Service

Merge tag 'sound-4.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[android-x86/kernel.git] / drivers / usb / gadget / udc / atmel_usba_udc.c
1 /*
2  * Driver for the Atmel USBA high speed USB device controller
3  *
4  * Copyright (C) 2005-2007 Atmel Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/clk.h>
11 #include <linux/clk/at91_pmc.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/slab.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/list.h>
20 #include <linux/platform_device.h>
21 #include <linux/usb/ch9.h>
22 #include <linux/usb/gadget.h>
23 #include <linux/usb/atmel_usba_udc.h>
24 #include <linux/delay.h>
25 #include <linux/of.h>
26 #include <linux/of_gpio.h>
27
28 #include "atmel_usba_udc.h"
29
30 #ifdef CONFIG_USB_GADGET_DEBUG_FS
31 #include <linux/debugfs.h>
32 #include <linux/uaccess.h>
33
34 static int queue_dbg_open(struct inode *inode, struct file *file)
35 {
36         struct usba_ep *ep = inode->i_private;
37         struct usba_request *req, *req_copy;
38         struct list_head *queue_data;
39
40         queue_data = kmalloc(sizeof(*queue_data), GFP_KERNEL);
41         if (!queue_data)
42                 return -ENOMEM;
43         INIT_LIST_HEAD(queue_data);
44
45         spin_lock_irq(&ep->udc->lock);
46         list_for_each_entry(req, &ep->queue, queue) {
47                 req_copy = kmemdup(req, sizeof(*req_copy), GFP_ATOMIC);
48                 if (!req_copy)
49                         goto fail;
50                 list_add_tail(&req_copy->queue, queue_data);
51         }
52         spin_unlock_irq(&ep->udc->lock);
53
54         file->private_data = queue_data;
55         return 0;
56
57 fail:
58         spin_unlock_irq(&ep->udc->lock);
59         list_for_each_entry_safe(req, req_copy, queue_data, queue) {
60                 list_del(&req->queue);
61                 kfree(req);
62         }
63         kfree(queue_data);
64         return -ENOMEM;
65 }
66
67 /*
68  * bbbbbbbb llllllll IZS sssss nnnn FDL\n\0
69  *
70  * b: buffer address
71  * l: buffer length
72  * I/i: interrupt/no interrupt
73  * Z/z: zero/no zero
74  * S/s: short ok/short not ok
75  * s: status
76  * n: nr_packets
77  * F/f: submitted/not submitted to FIFO
78  * D/d: using/not using DMA
79  * L/l: last transaction/not last transaction
80  */
81 static ssize_t queue_dbg_read(struct file *file, char __user *buf,
82                 size_t nbytes, loff_t *ppos)
83 {
84         struct list_head *queue = file->private_data;
85         struct usba_request *req, *tmp_req;
86         size_t len, remaining, actual = 0;
87         char tmpbuf[38];
88
89         if (!access_ok(VERIFY_WRITE, buf, nbytes))
90                 return -EFAULT;
91
92         inode_lock(file_inode(file));
93         list_for_each_entry_safe(req, tmp_req, queue, queue) {
94                 len = snprintf(tmpbuf, sizeof(tmpbuf),
95                                 "%8p %08x %c%c%c %5d %c%c%c\n",
96                                 req->req.buf, req->req.length,
97                                 req->req.no_interrupt ? 'i' : 'I',
98                                 req->req.zero ? 'Z' : 'z',
99                                 req->req.short_not_ok ? 's' : 'S',
100                                 req->req.status,
101                                 req->submitted ? 'F' : 'f',
102                                 req->using_dma ? 'D' : 'd',
103                                 req->last_transaction ? 'L' : 'l');
104                 len = min(len, sizeof(tmpbuf));
105                 if (len > nbytes)
106                         break;
107
108                 list_del(&req->queue);
109                 kfree(req);
110
111                 remaining = __copy_to_user(buf, tmpbuf, len);
112                 actual += len - remaining;
113                 if (remaining)
114                         break;
115
116                 nbytes -= len;
117                 buf += len;
118         }
119         inode_unlock(file_inode(file));
120
121         return actual;
122 }
123
124 static int queue_dbg_release(struct inode *inode, struct file *file)
125 {
126         struct list_head *queue_data = file->private_data;
127         struct usba_request *req, *tmp_req;
128
129         list_for_each_entry_safe(req, tmp_req, queue_data, queue) {
130                 list_del(&req->queue);
131                 kfree(req);
132         }
133         kfree(queue_data);
134         return 0;
135 }
136
137 static int regs_dbg_open(struct inode *inode, struct file *file)
138 {
139         struct usba_udc *udc;
140         unsigned int i;
141         u32 *data;
142         int ret = -ENOMEM;
143
144         inode_lock(inode);
145         udc = inode->i_private;
146         data = kmalloc(inode->i_size, GFP_KERNEL);
147         if (!data)
148                 goto out;
149
150         spin_lock_irq(&udc->lock);
151         for (i = 0; i < inode->i_size / 4; i++)
152                 data[i] = usba_io_readl(udc->regs + i * 4);
153         spin_unlock_irq(&udc->lock);
154
155         file->private_data = data;
156         ret = 0;
157
158 out:
159         inode_unlock(inode);
160
161         return ret;
162 }
163
164 static ssize_t regs_dbg_read(struct file *file, char __user *buf,
165                 size_t nbytes, loff_t *ppos)
166 {
167         struct inode *inode = file_inode(file);
168         int ret;
169
170         inode_lock(inode);
171         ret = simple_read_from_buffer(buf, nbytes, ppos,
172                         file->private_data,
173                         file_inode(file)->i_size);
174         inode_unlock(inode);
175
176         return ret;
177 }
178
179 static int regs_dbg_release(struct inode *inode, struct file *file)
180 {
181         kfree(file->private_data);
182         return 0;
183 }
184
185 const struct file_operations queue_dbg_fops = {
186         .owner          = THIS_MODULE,
187         .open           = queue_dbg_open,
188         .llseek         = no_llseek,
189         .read           = queue_dbg_read,
190         .release        = queue_dbg_release,
191 };
192
193 const struct file_operations regs_dbg_fops = {
194         .owner          = THIS_MODULE,
195         .open           = regs_dbg_open,
196         .llseek         = generic_file_llseek,
197         .read           = regs_dbg_read,
198         .release        = regs_dbg_release,
199 };
200
201 static void usba_ep_init_debugfs(struct usba_udc *udc,
202                 struct usba_ep *ep)
203 {
204         struct dentry *ep_root;
205
206         ep_root = debugfs_create_dir(ep->ep.name, udc->debugfs_root);
207         if (!ep_root)
208                 goto err_root;
209         ep->debugfs_dir = ep_root;
210
211         ep->debugfs_queue = debugfs_create_file("queue", 0400, ep_root,
212                                                 ep, &queue_dbg_fops);
213         if (!ep->debugfs_queue)
214                 goto err_queue;
215
216         if (ep->can_dma) {
217                 ep->debugfs_dma_status
218                         = debugfs_create_u32("dma_status", 0400, ep_root,
219                                         &ep->last_dma_status);
220                 if (!ep->debugfs_dma_status)
221                         goto err_dma_status;
222         }
223         if (ep_is_control(ep)) {
224                 ep->debugfs_state
225                         = debugfs_create_u32("state", 0400, ep_root,
226                                         &ep->state);
227                 if (!ep->debugfs_state)
228                         goto err_state;
229         }
230
231         return;
232
233 err_state:
234         if (ep->can_dma)
235                 debugfs_remove(ep->debugfs_dma_status);
236 err_dma_status:
237         debugfs_remove(ep->debugfs_queue);
238 err_queue:
239         debugfs_remove(ep_root);
240 err_root:
241         dev_err(&ep->udc->pdev->dev,
242                 "failed to create debugfs directory for %s\n", ep->ep.name);
243 }
244
245 static void usba_ep_cleanup_debugfs(struct usba_ep *ep)
246 {
247         debugfs_remove(ep->debugfs_queue);
248         debugfs_remove(ep->debugfs_dma_status);
249         debugfs_remove(ep->debugfs_state);
250         debugfs_remove(ep->debugfs_dir);
251         ep->debugfs_dma_status = NULL;
252         ep->debugfs_dir = NULL;
253 }
254
255 static void usba_init_debugfs(struct usba_udc *udc)
256 {
257         struct dentry *root, *regs;
258         struct resource *regs_resource;
259
260         root = debugfs_create_dir(udc->gadget.name, NULL);
261         if (IS_ERR(root) || !root)
262                 goto err_root;
263         udc->debugfs_root = root;
264
265         regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM,
266                                 CTRL_IOMEM_ID);
267
268         if (regs_resource) {
269                 regs = debugfs_create_file_size("regs", 0400, root, udc,
270                                                 &regs_dbg_fops,
271                                                 resource_size(regs_resource));
272                 if (!regs)
273                         goto err_regs;
274                 udc->debugfs_regs = regs;
275         }
276
277         usba_ep_init_debugfs(udc, to_usba_ep(udc->gadget.ep0));
278
279         return;
280
281 err_regs:
282         debugfs_remove(root);
283 err_root:
284         udc->debugfs_root = NULL;
285         dev_err(&udc->pdev->dev, "debugfs is not available\n");
286 }
287
288 static void usba_cleanup_debugfs(struct usba_udc *udc)
289 {
290         usba_ep_cleanup_debugfs(to_usba_ep(udc->gadget.ep0));
291         debugfs_remove(udc->debugfs_regs);
292         debugfs_remove(udc->debugfs_root);
293         udc->debugfs_regs = NULL;
294         udc->debugfs_root = NULL;
295 }
296 #else
297 static inline void usba_ep_init_debugfs(struct usba_udc *udc,
298                                          struct usba_ep *ep)
299 {
300
301 }
302
303 static inline void usba_ep_cleanup_debugfs(struct usba_ep *ep)
304 {
305
306 }
307
308 static inline void usba_init_debugfs(struct usba_udc *udc)
309 {
310
311 }
312
313 static inline void usba_cleanup_debugfs(struct usba_udc *udc)
314 {
315
316 }
317 #endif
318
319 static inline u32 usba_int_enb_get(struct usba_udc *udc)
320 {
321         return udc->int_enb_cache;
322 }
323
324 static inline void usba_int_enb_set(struct usba_udc *udc, u32 val)
325 {
326         usba_writel(udc, INT_ENB, val);
327         udc->int_enb_cache = val;
328 }
329
330 static int vbus_is_present(struct usba_udc *udc)
331 {
332         if (gpio_is_valid(udc->vbus_pin))
333                 return gpio_get_value(udc->vbus_pin) ^ udc->vbus_pin_inverted;
334
335         /* No Vbus detection: Assume always present */
336         return 1;
337 }
338
339 static void toggle_bias(struct usba_udc *udc, int is_on)
340 {
341         if (udc->errata && udc->errata->toggle_bias)
342                 udc->errata->toggle_bias(udc, is_on);
343 }
344
345 static void generate_bias_pulse(struct usba_udc *udc)
346 {
347         if (!udc->bias_pulse_needed)
348                 return;
349
350         if (udc->errata && udc->errata->pulse_bias)
351                 udc->errata->pulse_bias(udc);
352
353         udc->bias_pulse_needed = false;
354 }
355
356 static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req)
357 {
358         unsigned int transaction_len;
359
360         transaction_len = req->req.length - req->req.actual;
361         req->last_transaction = 1;
362         if (transaction_len > ep->ep.maxpacket) {
363                 transaction_len = ep->ep.maxpacket;
364                 req->last_transaction = 0;
365         } else if (transaction_len == ep->ep.maxpacket && req->req.zero)
366                 req->last_transaction = 0;
367
368         DBG(DBG_QUEUE, "%s: submit_transaction, req %p (length %d)%s\n",
369                 ep->ep.name, req, transaction_len,
370                 req->last_transaction ? ", done" : "");
371
372         memcpy_toio(ep->fifo, req->req.buf + req->req.actual, transaction_len);
373         usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
374         req->req.actual += transaction_len;
375 }
376
377 static void submit_request(struct usba_ep *ep, struct usba_request *req)
378 {
379         DBG(DBG_QUEUE, "%s: submit_request: req %p (length %d)\n",
380                 ep->ep.name, req, req->req.length);
381
382         req->req.actual = 0;
383         req->submitted = 1;
384
385         if (req->using_dma) {
386                 if (req->req.length == 0) {
387                         usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
388                         return;
389                 }
390
391                 if (req->req.zero)
392                         usba_ep_writel(ep, CTL_ENB, USBA_SHORT_PACKET);
393                 else
394                         usba_ep_writel(ep, CTL_DIS, USBA_SHORT_PACKET);
395
396                 usba_dma_writel(ep, ADDRESS, req->req.dma);
397                 usba_dma_writel(ep, CONTROL, req->ctrl);
398         } else {
399                 next_fifo_transaction(ep, req);
400                 if (req->last_transaction) {
401                         usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
402                         usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
403                 } else {
404                         usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
405                         usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
406                 }
407         }
408 }
409
410 static void submit_next_request(struct usba_ep *ep)
411 {
412         struct usba_request *req;
413
414         if (list_empty(&ep->queue)) {
415                 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY | USBA_RX_BK_RDY);
416                 return;
417         }
418
419         req = list_entry(ep->queue.next, struct usba_request, queue);
420         if (!req->submitted)
421                 submit_request(ep, req);
422 }
423
424 static void send_status(struct usba_udc *udc, struct usba_ep *ep)
425 {
426         ep->state = STATUS_STAGE_IN;
427         usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
428         usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
429 }
430
431 static void receive_data(struct usba_ep *ep)
432 {
433         struct usba_udc *udc = ep->udc;
434         struct usba_request *req;
435         unsigned long status;
436         unsigned int bytecount, nr_busy;
437         int is_complete = 0;
438
439         status = usba_ep_readl(ep, STA);
440         nr_busy = USBA_BFEXT(BUSY_BANKS, status);
441
442         DBG(DBG_QUEUE, "receive data: nr_busy=%u\n", nr_busy);
443
444         while (nr_busy > 0) {
445                 if (list_empty(&ep->queue)) {
446                         usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
447                         break;
448                 }
449                 req = list_entry(ep->queue.next,
450                                  struct usba_request, queue);
451
452                 bytecount = USBA_BFEXT(BYTE_COUNT, status);
453
454                 if (status & (1 << 31))
455                         is_complete = 1;
456                 if (req->req.actual + bytecount >= req->req.length) {
457                         is_complete = 1;
458                         bytecount = req->req.length - req->req.actual;
459                 }
460
461                 memcpy_fromio(req->req.buf + req->req.actual,
462                                 ep->fifo, bytecount);
463                 req->req.actual += bytecount;
464
465                 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
466
467                 if (is_complete) {
468                         DBG(DBG_QUEUE, "%s: request done\n", ep->ep.name);
469                         req->req.status = 0;
470                         list_del_init(&req->queue);
471                         usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
472                         spin_unlock(&udc->lock);
473                         usb_gadget_giveback_request(&ep->ep, &req->req);
474                         spin_lock(&udc->lock);
475                 }
476
477                 status = usba_ep_readl(ep, STA);
478                 nr_busy = USBA_BFEXT(BUSY_BANKS, status);
479
480                 if (is_complete && ep_is_control(ep)) {
481                         send_status(udc, ep);
482                         break;
483                 }
484         }
485 }
486
487 static void
488 request_complete(struct usba_ep *ep, struct usba_request *req, int status)
489 {
490         struct usba_udc *udc = ep->udc;
491
492         WARN_ON(!list_empty(&req->queue));
493
494         if (req->req.status == -EINPROGRESS)
495                 req->req.status = status;
496
497         if (req->using_dma)
498                 usb_gadget_unmap_request(&udc->gadget, &req->req, ep->is_in);
499
500         DBG(DBG_GADGET | DBG_REQ,
501                 "%s: req %p complete: status %d, actual %u\n",
502                 ep->ep.name, req, req->req.status, req->req.actual);
503
504         spin_unlock(&udc->lock);
505         usb_gadget_giveback_request(&ep->ep, &req->req);
506         spin_lock(&udc->lock);
507 }
508
509 static void
510 request_complete_list(struct usba_ep *ep, struct list_head *list, int status)
511 {
512         struct usba_request *req, *tmp_req;
513
514         list_for_each_entry_safe(req, tmp_req, list, queue) {
515                 list_del_init(&req->queue);
516                 request_complete(ep, req, status);
517         }
518 }
519
520 static int
521 usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
522 {
523         struct usba_ep *ep = to_usba_ep(_ep);
524         struct usba_udc *udc = ep->udc;
525         unsigned long flags, ept_cfg, maxpacket;
526         unsigned int nr_trans;
527
528         DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc);
529
530         maxpacket = usb_endpoint_maxp(desc) & 0x7ff;
531
532         if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != ep->index)
533                         || ep->index == 0
534                         || desc->bDescriptorType != USB_DT_ENDPOINT
535                         || maxpacket == 0
536                         || maxpacket > ep->fifo_size) {
537                 DBG(DBG_ERR, "ep_enable: Invalid argument");
538                 return -EINVAL;
539         }
540
541         ep->is_isoc = 0;
542         ep->is_in = 0;
543
544         if (maxpacket <= 8)
545                 ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8);
546         else
547                 /* LSB is bit 1, not 0 */
548                 ept_cfg = USBA_BF(EPT_SIZE, fls(maxpacket - 1) - 3);
549
550         DBG(DBG_HW, "%s: EPT_SIZE = %lu (maxpacket = %lu)\n",
551                         ep->ep.name, ept_cfg, maxpacket);
552
553         if (usb_endpoint_dir_in(desc)) {
554                 ep->is_in = 1;
555                 ept_cfg |= USBA_EPT_DIR_IN;
556         }
557
558         switch (usb_endpoint_type(desc)) {
559         case USB_ENDPOINT_XFER_CONTROL:
560                 ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL);
561                 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE);
562                 break;
563         case USB_ENDPOINT_XFER_ISOC:
564                 if (!ep->can_isoc) {
565                         DBG(DBG_ERR, "ep_enable: %s is not isoc capable\n",
566                                         ep->ep.name);
567                         return -EINVAL;
568                 }
569
570                 /*
571                  * Bits 11:12 specify number of _additional_
572                  * transactions per microframe.
573                  */
574                 nr_trans = ((usb_endpoint_maxp(desc) >> 11) & 3) + 1;
575                 if (nr_trans > 3)
576                         return -EINVAL;
577
578                 ep->is_isoc = 1;
579                 ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_ISO);
580
581                 /*
582                  * Do triple-buffering on high-bandwidth iso endpoints.
583                  */
584                 if (nr_trans > 1 && ep->nr_banks == 3)
585                         ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_TRIPLE);
586                 else
587                         ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
588                 ept_cfg |= USBA_BF(NB_TRANS, nr_trans);
589                 break;
590         case USB_ENDPOINT_XFER_BULK:
591                 ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK);
592                 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
593                 break;
594         case USB_ENDPOINT_XFER_INT:
595                 ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_INT);
596                 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
597                 break;
598         }
599
600         spin_lock_irqsave(&ep->udc->lock, flags);
601
602         ep->ep.desc = desc;
603         ep->ep.maxpacket = maxpacket;
604
605         usba_ep_writel(ep, CFG, ept_cfg);
606         usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
607
608         if (ep->can_dma) {
609                 u32 ctrl;
610
611                 usba_int_enb_set(udc, usba_int_enb_get(udc) |
612                                       USBA_BF(EPT_INT, 1 << ep->index) |
613                                       USBA_BF(DMA_INT, 1 << ep->index));
614                 ctrl = USBA_AUTO_VALID | USBA_INTDIS_DMA;
615                 usba_ep_writel(ep, CTL_ENB, ctrl);
616         } else {
617                 usba_int_enb_set(udc, usba_int_enb_get(udc) |
618                                       USBA_BF(EPT_INT, 1 << ep->index));
619         }
620
621         spin_unlock_irqrestore(&udc->lock, flags);
622
623         DBG(DBG_HW, "EPT_CFG%d after init: %#08lx\n", ep->index,
624                         (unsigned long)usba_ep_readl(ep, CFG));
625         DBG(DBG_HW, "INT_ENB after init: %#08lx\n",
626                         (unsigned long)usba_int_enb_get(udc));
627
628         return 0;
629 }
630
631 static int usba_ep_disable(struct usb_ep *_ep)
632 {
633         struct usba_ep *ep = to_usba_ep(_ep);
634         struct usba_udc *udc = ep->udc;
635         LIST_HEAD(req_list);
636         unsigned long flags;
637
638         DBG(DBG_GADGET, "ep_disable: %s\n", ep->ep.name);
639
640         spin_lock_irqsave(&udc->lock, flags);
641
642         if (!ep->ep.desc) {
643                 spin_unlock_irqrestore(&udc->lock, flags);
644                 /* REVISIT because this driver disables endpoints in
645                  * reset_all_endpoints() before calling disconnect(),
646                  * most gadget drivers would trigger this non-error ...
647                  */
648                 if (udc->gadget.speed != USB_SPEED_UNKNOWN)
649                         DBG(DBG_ERR, "ep_disable: %s not enabled\n",
650                                         ep->ep.name);
651                 return -EINVAL;
652         }
653         ep->ep.desc = NULL;
654
655         list_splice_init(&ep->queue, &req_list);
656         if (ep->can_dma) {
657                 usba_dma_writel(ep, CONTROL, 0);
658                 usba_dma_writel(ep, ADDRESS, 0);
659                 usba_dma_readl(ep, STATUS);
660         }
661         usba_ep_writel(ep, CTL_DIS, USBA_EPT_ENABLE);
662         usba_int_enb_set(udc, usba_int_enb_get(udc) &
663                               ~USBA_BF(EPT_INT, 1 << ep->index));
664
665         request_complete_list(ep, &req_list, -ESHUTDOWN);
666
667         spin_unlock_irqrestore(&udc->lock, flags);
668
669         return 0;
670 }
671
672 static struct usb_request *
673 usba_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
674 {
675         struct usba_request *req;
676
677         DBG(DBG_GADGET, "ep_alloc_request: %p, 0x%x\n", _ep, gfp_flags);
678
679         req = kzalloc(sizeof(*req), gfp_flags);
680         if (!req)
681                 return NULL;
682
683         INIT_LIST_HEAD(&req->queue);
684
685         return &req->req;
686 }
687
688 static void
689 usba_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
690 {
691         struct usba_request *req = to_usba_req(_req);
692
693         DBG(DBG_GADGET, "ep_free_request: %p, %p\n", _ep, _req);
694
695         kfree(req);
696 }
697
698 static int queue_dma(struct usba_udc *udc, struct usba_ep *ep,
699                 struct usba_request *req, gfp_t gfp_flags)
700 {
701         unsigned long flags;
702         int ret;
703
704         DBG(DBG_DMA, "%s: req l/%u d/%pad %c%c%c\n",
705                 ep->ep.name, req->req.length, &req->req.dma,
706                 req->req.zero ? 'Z' : 'z',
707                 req->req.short_not_ok ? 'S' : 's',
708                 req->req.no_interrupt ? 'I' : 'i');
709
710         if (req->req.length > 0x10000) {
711                 /* Lengths from 0 to 65536 (inclusive) are supported */
712                 DBG(DBG_ERR, "invalid request length %u\n", req->req.length);
713                 return -EINVAL;
714         }
715
716         ret = usb_gadget_map_request(&udc->gadget, &req->req, ep->is_in);
717         if (ret)
718                 return ret;
719
720         req->using_dma = 1;
721         req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length)
722                         | USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE
723                         | USBA_DMA_END_BUF_EN;
724
725         if (!ep->is_in)
726                 req->ctrl |= USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
727
728         /*
729          * Add this request to the queue and submit for DMA if
730          * possible. Check if we're still alive first -- we may have
731          * received a reset since last time we checked.
732          */
733         ret = -ESHUTDOWN;
734         spin_lock_irqsave(&udc->lock, flags);
735         if (ep->ep.desc) {
736                 if (list_empty(&ep->queue))
737                         submit_request(ep, req);
738
739                 list_add_tail(&req->queue, &ep->queue);
740                 ret = 0;
741         }
742         spin_unlock_irqrestore(&udc->lock, flags);
743
744         return ret;
745 }
746
747 static int
748 usba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
749 {
750         struct usba_request *req = to_usba_req(_req);
751         struct usba_ep *ep = to_usba_ep(_ep);
752         struct usba_udc *udc = ep->udc;
753         unsigned long flags;
754         int ret;
755
756         DBG(DBG_GADGET | DBG_QUEUE | DBG_REQ, "%s: queue req %p, len %u\n",
757                         ep->ep.name, req, _req->length);
758
759         if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN ||
760             !ep->ep.desc)
761                 return -ESHUTDOWN;
762
763         req->submitted = 0;
764         req->using_dma = 0;
765         req->last_transaction = 0;
766
767         _req->status = -EINPROGRESS;
768         _req->actual = 0;
769
770         if (ep->can_dma)
771                 return queue_dma(udc, ep, req, gfp_flags);
772
773         /* May have received a reset since last time we checked */
774         ret = -ESHUTDOWN;
775         spin_lock_irqsave(&udc->lock, flags);
776         if (ep->ep.desc) {
777                 list_add_tail(&req->queue, &ep->queue);
778
779                 if ((!ep_is_control(ep) && ep->is_in) ||
780                         (ep_is_control(ep)
781                                 && (ep->state == DATA_STAGE_IN
782                                         || ep->state == STATUS_STAGE_IN)))
783                         usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
784                 else
785                         usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY);
786                 ret = 0;
787         }
788         spin_unlock_irqrestore(&udc->lock, flags);
789
790         return ret;
791 }
792
793 static void
794 usba_update_req(struct usba_ep *ep, struct usba_request *req, u32 status)
795 {
796         req->req.actual = req->req.length - USBA_BFEXT(DMA_BUF_LEN, status);
797 }
798
799 static int stop_dma(struct usba_ep *ep, u32 *pstatus)
800 {
801         unsigned int timeout;
802         u32 status;
803
804         /*
805          * Stop the DMA controller. When writing both CH_EN
806          * and LINK to 0, the other bits are not affected.
807          */
808         usba_dma_writel(ep, CONTROL, 0);
809
810         /* Wait for the FIFO to empty */
811         for (timeout = 40; timeout; --timeout) {
812                 status = usba_dma_readl(ep, STATUS);
813                 if (!(status & USBA_DMA_CH_EN))
814                         break;
815                 udelay(1);
816         }
817
818         if (pstatus)
819                 *pstatus = status;
820
821         if (timeout == 0) {
822                 dev_err(&ep->udc->pdev->dev,
823                         "%s: timed out waiting for DMA FIFO to empty\n",
824                         ep->ep.name);
825                 return -ETIMEDOUT;
826         }
827
828         return 0;
829 }
830
831 static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
832 {
833         struct usba_ep *ep = to_usba_ep(_ep);
834         struct usba_udc *udc = ep->udc;
835         struct usba_request *req;
836         unsigned long flags;
837         u32 status;
838
839         DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n",
840                         ep->ep.name, req);
841
842         spin_lock_irqsave(&udc->lock, flags);
843
844         list_for_each_entry(req, &ep->queue, queue) {
845                 if (&req->req == _req)
846                         break;
847         }
848
849         if (&req->req != _req) {
850                 spin_unlock_irqrestore(&udc->lock, flags);
851                 return -EINVAL;
852         }
853
854         if (req->using_dma) {
855                 /*
856                  * If this request is currently being transferred,
857                  * stop the DMA controller and reset the FIFO.
858                  */
859                 if (ep->queue.next == &req->queue) {
860                         status = usba_dma_readl(ep, STATUS);
861                         if (status & USBA_DMA_CH_EN)
862                                 stop_dma(ep, &status);
863
864 #ifdef CONFIG_USB_GADGET_DEBUG_FS
865                         ep->last_dma_status = status;
866 #endif
867
868                         usba_writel(udc, EPT_RST, 1 << ep->index);
869
870                         usba_update_req(ep, req, status);
871                 }
872         }
873
874         /*
875          * Errors should stop the queue from advancing until the
876          * completion function returns.
877          */
878         list_del_init(&req->queue);
879
880         request_complete(ep, req, -ECONNRESET);
881
882         /* Process the next request if any */
883         submit_next_request(ep);
884         spin_unlock_irqrestore(&udc->lock, flags);
885
886         return 0;
887 }
888
889 static int usba_ep_set_halt(struct usb_ep *_ep, int value)
890 {
891         struct usba_ep *ep = to_usba_ep(_ep);
892         struct usba_udc *udc = ep->udc;
893         unsigned long flags;
894         int ret = 0;
895
896         DBG(DBG_GADGET, "endpoint %s: %s HALT\n", ep->ep.name,
897                         value ? "set" : "clear");
898
899         if (!ep->ep.desc) {
900                 DBG(DBG_ERR, "Attempted to halt uninitialized ep %s\n",
901                                 ep->ep.name);
902                 return -ENODEV;
903         }
904         if (ep->is_isoc) {
905                 DBG(DBG_ERR, "Attempted to halt isochronous ep %s\n",
906                                 ep->ep.name);
907                 return -ENOTTY;
908         }
909
910         spin_lock_irqsave(&udc->lock, flags);
911
912         /*
913          * We can't halt IN endpoints while there are still data to be
914          * transferred
915          */
916         if (!list_empty(&ep->queue)
917                         || ((value && ep->is_in && (usba_ep_readl(ep, STA)
918                                         & USBA_BF(BUSY_BANKS, -1L))))) {
919                 ret = -EAGAIN;
920         } else {
921                 if (value)
922                         usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL);
923                 else
924                         usba_ep_writel(ep, CLR_STA,
925                                         USBA_FORCE_STALL | USBA_TOGGLE_CLR);
926                 usba_ep_readl(ep, STA);
927         }
928
929         spin_unlock_irqrestore(&udc->lock, flags);
930
931         return ret;
932 }
933
934 static int usba_ep_fifo_status(struct usb_ep *_ep)
935 {
936         struct usba_ep *ep = to_usba_ep(_ep);
937
938         return USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
939 }
940
941 static void usba_ep_fifo_flush(struct usb_ep *_ep)
942 {
943         struct usba_ep *ep = to_usba_ep(_ep);
944         struct usba_udc *udc = ep->udc;
945
946         usba_writel(udc, EPT_RST, 1 << ep->index);
947 }
948
949 static const struct usb_ep_ops usba_ep_ops = {
950         .enable         = usba_ep_enable,
951         .disable        = usba_ep_disable,
952         .alloc_request  = usba_ep_alloc_request,
953         .free_request   = usba_ep_free_request,
954         .queue          = usba_ep_queue,
955         .dequeue        = usba_ep_dequeue,
956         .set_halt       = usba_ep_set_halt,
957         .fifo_status    = usba_ep_fifo_status,
958         .fifo_flush     = usba_ep_fifo_flush,
959 };
960
961 static int usba_udc_get_frame(struct usb_gadget *gadget)
962 {
963         struct usba_udc *udc = to_usba_udc(gadget);
964
965         return USBA_BFEXT(FRAME_NUMBER, usba_readl(udc, FNUM));
966 }
967
968 static int usba_udc_wakeup(struct usb_gadget *gadget)
969 {
970         struct usba_udc *udc = to_usba_udc(gadget);
971         unsigned long flags;
972         u32 ctrl;
973         int ret = -EINVAL;
974
975         spin_lock_irqsave(&udc->lock, flags);
976         if (udc->devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
977                 ctrl = usba_readl(udc, CTRL);
978                 usba_writel(udc, CTRL, ctrl | USBA_REMOTE_WAKE_UP);
979                 ret = 0;
980         }
981         spin_unlock_irqrestore(&udc->lock, flags);
982
983         return ret;
984 }
985
986 static int
987 usba_udc_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
988 {
989         struct usba_udc *udc = to_usba_udc(gadget);
990         unsigned long flags;
991
992         gadget->is_selfpowered = (is_selfpowered != 0);
993         spin_lock_irqsave(&udc->lock, flags);
994         if (is_selfpowered)
995                 udc->devstatus |= 1 << USB_DEVICE_SELF_POWERED;
996         else
997                 udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
998         spin_unlock_irqrestore(&udc->lock, flags);
999
1000         return 0;
1001 }
1002
1003 static int atmel_usba_start(struct usb_gadget *gadget,
1004                 struct usb_gadget_driver *driver);
1005 static int atmel_usba_stop(struct usb_gadget *gadget);
1006
1007 static const struct usb_gadget_ops usba_udc_ops = {
1008         .get_frame              = usba_udc_get_frame,
1009         .wakeup                 = usba_udc_wakeup,
1010         .set_selfpowered        = usba_udc_set_selfpowered,
1011         .udc_start              = atmel_usba_start,
1012         .udc_stop               = atmel_usba_stop,
1013 };
1014
1015 static struct usb_endpoint_descriptor usba_ep0_desc = {
1016         .bLength = USB_DT_ENDPOINT_SIZE,
1017         .bDescriptorType = USB_DT_ENDPOINT,
1018         .bEndpointAddress = 0,
1019         .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1020         .wMaxPacketSize = cpu_to_le16(64),
1021         /* FIXME: I have no idea what to put here */
1022         .bInterval = 1,
1023 };
1024
1025 static struct usb_gadget usba_gadget_template = {
1026         .ops            = &usba_udc_ops,
1027         .max_speed      = USB_SPEED_HIGH,
1028         .name           = "atmel_usba_udc",
1029 };
1030
1031 /*
1032  * Called with interrupts disabled and udc->lock held.
1033  */
1034 static void reset_all_endpoints(struct usba_udc *udc)
1035 {
1036         struct usba_ep *ep;
1037         struct usba_request *req, *tmp_req;
1038
1039         usba_writel(udc, EPT_RST, ~0UL);
1040
1041         ep = to_usba_ep(udc->gadget.ep0);
1042         list_for_each_entry_safe(req, tmp_req, &ep->queue, queue) {
1043                 list_del_init(&req->queue);
1044                 request_complete(ep, req, -ECONNRESET);
1045         }
1046
1047         /* NOTE:  normally, the next call to the gadget driver is in
1048          * charge of disabling endpoints... usually disconnect().
1049          * The exception would be entering a high speed test mode.
1050          *
1051          * FIXME remove this code ... and retest thoroughly.
1052          */
1053         list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1054                 if (ep->ep.desc) {
1055                         spin_unlock(&udc->lock);
1056                         usba_ep_disable(&ep->ep);
1057                         spin_lock(&udc->lock);
1058                 }
1059         }
1060 }
1061
1062 static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex)
1063 {
1064         struct usba_ep *ep;
1065
1066         if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1067                 return to_usba_ep(udc->gadget.ep0);
1068
1069         list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) {
1070                 u8 bEndpointAddress;
1071
1072                 if (!ep->ep.desc)
1073                         continue;
1074                 bEndpointAddress = ep->ep.desc->bEndpointAddress;
1075                 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1076                         continue;
1077                 if ((bEndpointAddress & USB_ENDPOINT_NUMBER_MASK)
1078                                 == (wIndex & USB_ENDPOINT_NUMBER_MASK))
1079                         return ep;
1080         }
1081
1082         return NULL;
1083 }
1084
1085 /* Called with interrupts disabled and udc->lock held */
1086 static inline void set_protocol_stall(struct usba_udc *udc, struct usba_ep *ep)
1087 {
1088         usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL);
1089         ep->state = WAIT_FOR_SETUP;
1090 }
1091
1092 static inline int is_stalled(struct usba_udc *udc, struct usba_ep *ep)
1093 {
1094         if (usba_ep_readl(ep, STA) & USBA_FORCE_STALL)
1095                 return 1;
1096         return 0;
1097 }
1098
1099 static inline void set_address(struct usba_udc *udc, unsigned int addr)
1100 {
1101         u32 regval;
1102
1103         DBG(DBG_BUS, "setting address %u...\n", addr);
1104         regval = usba_readl(udc, CTRL);
1105         regval = USBA_BFINS(DEV_ADDR, addr, regval);
1106         usba_writel(udc, CTRL, regval);
1107 }
1108
1109 static int do_test_mode(struct usba_udc *udc)
1110 {
1111         static const char test_packet_buffer[] = {
1112                 /* JKJKJKJK * 9 */
1113                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1114                 /* JJKKJJKK * 8 */
1115                 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1116                 /* JJKKJJKK * 8 */
1117                 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1118                 /* JJJJJJJKKKKKKK * 8 */
1119                 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1120                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1121                 /* JJJJJJJK * 8 */
1122                 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1123                 /* {JKKKKKKK * 10}, JK */
1124                 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 0x7E
1125         };
1126         struct usba_ep *ep;
1127         struct device *dev = &udc->pdev->dev;
1128         int test_mode;
1129
1130         test_mode = udc->test_mode;
1131
1132         /* Start from a clean slate */
1133         reset_all_endpoints(udc);
1134
1135         switch (test_mode) {
1136         case 0x0100:
1137                 /* Test_J */
1138                 usba_writel(udc, TST, USBA_TST_J_MODE);
1139                 dev_info(dev, "Entering Test_J mode...\n");
1140                 break;
1141         case 0x0200:
1142                 /* Test_K */
1143                 usba_writel(udc, TST, USBA_TST_K_MODE);
1144                 dev_info(dev, "Entering Test_K mode...\n");
1145                 break;
1146         case 0x0300:
1147                 /*
1148                  * Test_SE0_NAK: Force high-speed mode and set up ep0
1149                  * for Bulk IN transfers
1150                  */
1151                 ep = &udc->usba_ep[0];
1152                 usba_writel(udc, TST,
1153                                 USBA_BF(SPEED_CFG, USBA_SPEED_CFG_FORCE_HIGH));
1154                 usba_ep_writel(ep, CFG,
1155                                 USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
1156                                 | USBA_EPT_DIR_IN
1157                                 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK)
1158                                 | USBA_BF(BK_NUMBER, 1));
1159                 if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) {
1160                         set_protocol_stall(udc, ep);
1161                         dev_err(dev, "Test_SE0_NAK: ep0 not mapped\n");
1162                 } else {
1163                         usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
1164                         dev_info(dev, "Entering Test_SE0_NAK mode...\n");
1165                 }
1166                 break;
1167         case 0x0400:
1168                 /* Test_Packet */
1169                 ep = &udc->usba_ep[0];
1170                 usba_ep_writel(ep, CFG,
1171                                 USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
1172                                 | USBA_EPT_DIR_IN
1173                                 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK)
1174                                 | USBA_BF(BK_NUMBER, 1));
1175                 if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) {
1176                         set_protocol_stall(udc, ep);
1177                         dev_err(dev, "Test_Packet: ep0 not mapped\n");
1178                 } else {
1179                         usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
1180                         usba_writel(udc, TST, USBA_TST_PKT_MODE);
1181                         memcpy_toio(ep->fifo, test_packet_buffer,
1182                                         sizeof(test_packet_buffer));
1183                         usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
1184                         dev_info(dev, "Entering Test_Packet mode...\n");
1185                 }
1186                 break;
1187         default:
1188                 dev_err(dev, "Invalid test mode: 0x%04x\n", test_mode);
1189                 return -EINVAL;
1190         }
1191
1192         return 0;
1193 }
1194
1195 /* Avoid overly long expressions */
1196 static inline bool feature_is_dev_remote_wakeup(struct usb_ctrlrequest *crq)
1197 {
1198         if (crq->wValue == cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP))
1199                 return true;
1200         return false;
1201 }
1202
1203 static inline bool feature_is_dev_test_mode(struct usb_ctrlrequest *crq)
1204 {
1205         if (crq->wValue == cpu_to_le16(USB_DEVICE_TEST_MODE))
1206                 return true;
1207         return false;
1208 }
1209
1210 static inline bool feature_is_ep_halt(struct usb_ctrlrequest *crq)
1211 {
1212         if (crq->wValue == cpu_to_le16(USB_ENDPOINT_HALT))
1213                 return true;
1214         return false;
1215 }
1216
1217 static int handle_ep0_setup(struct usba_udc *udc, struct usba_ep *ep,
1218                 struct usb_ctrlrequest *crq)
1219 {
1220         int retval = 0;
1221
1222         switch (crq->bRequest) {
1223         case USB_REQ_GET_STATUS: {
1224                 u16 status;
1225
1226                 if (crq->bRequestType == (USB_DIR_IN | USB_RECIP_DEVICE)) {
1227                         status = cpu_to_le16(udc->devstatus);
1228                 } else if (crq->bRequestType
1229                                 == (USB_DIR_IN | USB_RECIP_INTERFACE)) {
1230                         status = cpu_to_le16(0);
1231                 } else if (crq->bRequestType
1232                                 == (USB_DIR_IN | USB_RECIP_ENDPOINT)) {
1233                         struct usba_ep *target;
1234
1235                         target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
1236                         if (!target)
1237                                 goto stall;
1238
1239                         status = 0;
1240                         if (is_stalled(udc, target))
1241                                 status |= cpu_to_le16(1);
1242                 } else
1243                         goto delegate;
1244
1245                 /* Write directly to the FIFO. No queueing is done. */
1246                 if (crq->wLength != cpu_to_le16(sizeof(status)))
1247                         goto stall;
1248                 ep->state = DATA_STAGE_IN;
1249                 usba_io_writew(status, ep->fifo);
1250                 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
1251                 break;
1252         }
1253
1254         case USB_REQ_CLEAR_FEATURE: {
1255                 if (crq->bRequestType == USB_RECIP_DEVICE) {
1256                         if (feature_is_dev_remote_wakeup(crq))
1257                                 udc->devstatus
1258                                         &= ~(1 << USB_DEVICE_REMOTE_WAKEUP);
1259                         else
1260                                 /* Can't CLEAR_FEATURE TEST_MODE */
1261                                 goto stall;
1262                 } else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
1263                         struct usba_ep *target;
1264
1265                         if (crq->wLength != cpu_to_le16(0)
1266                                         || !feature_is_ep_halt(crq))
1267                                 goto stall;
1268                         target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
1269                         if (!target)
1270                                 goto stall;
1271
1272                         usba_ep_writel(target, CLR_STA, USBA_FORCE_STALL);
1273                         if (target->index != 0)
1274                                 usba_ep_writel(target, CLR_STA,
1275                                                 USBA_TOGGLE_CLR);
1276                 } else {
1277                         goto delegate;
1278                 }
1279
1280                 send_status(udc, ep);
1281                 break;
1282         }
1283
1284         case USB_REQ_SET_FEATURE: {
1285                 if (crq->bRequestType == USB_RECIP_DEVICE) {
1286                         if (feature_is_dev_test_mode(crq)) {
1287                                 send_status(udc, ep);
1288                                 ep->state = STATUS_STAGE_TEST;
1289                                 udc->test_mode = le16_to_cpu(crq->wIndex);
1290                                 return 0;
1291                         } else if (feature_is_dev_remote_wakeup(crq)) {
1292                                 udc->devstatus |= 1 << USB_DEVICE_REMOTE_WAKEUP;
1293                         } else {
1294                                 goto stall;
1295                         }
1296                 } else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
1297                         struct usba_ep *target;
1298
1299                         if (crq->wLength != cpu_to_le16(0)
1300                                         || !feature_is_ep_halt(crq))
1301                                 goto stall;
1302
1303                         target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
1304                         if (!target)
1305                                 goto stall;
1306
1307                         usba_ep_writel(target, SET_STA, USBA_FORCE_STALL);
1308                 } else
1309                         goto delegate;
1310
1311                 send_status(udc, ep);
1312                 break;
1313         }
1314
1315         case USB_REQ_SET_ADDRESS:
1316                 if (crq->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
1317                         goto delegate;
1318
1319                 set_address(udc, le16_to_cpu(crq->wValue));
1320                 send_status(udc, ep);
1321                 ep->state = STATUS_STAGE_ADDR;
1322                 break;
1323
1324         default:
1325 delegate:
1326                 spin_unlock(&udc->lock);
1327                 retval = udc->driver->setup(&udc->gadget, crq);
1328                 spin_lock(&udc->lock);
1329         }
1330
1331         return retval;
1332
1333 stall:
1334         pr_err("udc: %s: Invalid setup request: %02x.%02x v%04x i%04x l%d, "
1335                 "halting endpoint...\n",
1336                 ep->ep.name, crq->bRequestType, crq->bRequest,
1337                 le16_to_cpu(crq->wValue), le16_to_cpu(crq->wIndex),
1338                 le16_to_cpu(crq->wLength));
1339         set_protocol_stall(udc, ep);
1340         return -1;
1341 }
1342
1343 static void usba_control_irq(struct usba_udc *udc, struct usba_ep *ep)
1344 {
1345         struct usba_request *req;
1346         u32 epstatus;
1347         u32 epctrl;
1348
1349 restart:
1350         epstatus = usba_ep_readl(ep, STA);
1351         epctrl = usba_ep_readl(ep, CTL);
1352
1353         DBG(DBG_INT, "%s [%d]: s/%08x c/%08x\n",
1354                         ep->ep.name, ep->state, epstatus, epctrl);
1355
1356         req = NULL;
1357         if (!list_empty(&ep->queue))
1358                 req = list_entry(ep->queue.next,
1359                                  struct usba_request, queue);
1360
1361         if ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) {
1362                 if (req->submitted)
1363                         next_fifo_transaction(ep, req);
1364                 else
1365                         submit_request(ep, req);
1366
1367                 if (req->last_transaction) {
1368                         usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
1369                         usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
1370                 }
1371                 goto restart;
1372         }
1373         if ((epstatus & epctrl) & USBA_TX_COMPLETE) {
1374                 usba_ep_writel(ep, CLR_STA, USBA_TX_COMPLETE);
1375
1376                 switch (ep->state) {
1377                 case DATA_STAGE_IN:
1378                         usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY);
1379                         usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1380                         ep->state = STATUS_STAGE_OUT;
1381                         break;
1382                 case STATUS_STAGE_ADDR:
1383                         /* Activate our new address */
1384                         usba_writel(udc, CTRL, (usba_readl(udc, CTRL)
1385                                                 | USBA_FADDR_EN));
1386                         usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1387                         ep->state = WAIT_FOR_SETUP;
1388                         break;
1389                 case STATUS_STAGE_IN:
1390                         if (req) {
1391                                 list_del_init(&req->queue);
1392                                 request_complete(ep, req, 0);
1393                                 submit_next_request(ep);
1394                         }
1395                         usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1396                         ep->state = WAIT_FOR_SETUP;
1397                         break;
1398                 case STATUS_STAGE_TEST:
1399                         usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1400                         ep->state = WAIT_FOR_SETUP;
1401                         if (do_test_mode(udc))
1402                                 set_protocol_stall(udc, ep);
1403                         break;
1404                 default:
1405                         pr_err("udc: %s: TXCOMP: Invalid endpoint state %d, "
1406                                 "halting endpoint...\n",
1407                                 ep->ep.name, ep->state);
1408                         set_protocol_stall(udc, ep);
1409                         break;
1410                 }
1411
1412                 goto restart;
1413         }
1414         if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
1415                 switch (ep->state) {
1416                 case STATUS_STAGE_OUT:
1417                         usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
1418                         usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
1419
1420                         if (req) {
1421                                 list_del_init(&req->queue);
1422                                 request_complete(ep, req, 0);
1423                         }
1424                         ep->state = WAIT_FOR_SETUP;
1425                         break;
1426
1427                 case DATA_STAGE_OUT:
1428                         receive_data(ep);
1429                         break;
1430
1431                 default:
1432                         usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
1433                         usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
1434                         pr_err("udc: %s: RXRDY: Invalid endpoint state %d, "
1435                                 "halting endpoint...\n",
1436                                 ep->ep.name, ep->state);
1437                         set_protocol_stall(udc, ep);
1438                         break;
1439                 }
1440
1441                 goto restart;
1442         }
1443         if (epstatus & USBA_RX_SETUP) {
1444                 union {
1445                         struct usb_ctrlrequest crq;
1446                         unsigned long data[2];
1447                 } crq;
1448                 unsigned int pkt_len;
1449                 int ret;
1450
1451                 if (ep->state != WAIT_FOR_SETUP) {
1452                         /*
1453                          * Didn't expect a SETUP packet at this
1454                          * point. Clean up any pending requests (which
1455                          * may be successful).
1456                          */
1457                         int status = -EPROTO;
1458
1459                         /*
1460                          * RXRDY and TXCOMP are dropped when SETUP
1461                          * packets arrive.  Just pretend we received
1462                          * the status packet.
1463                          */
1464                         if (ep->state == STATUS_STAGE_OUT
1465                                         || ep->state == STATUS_STAGE_IN) {
1466                                 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
1467                                 status = 0;
1468                         }
1469
1470                         if (req) {
1471                                 list_del_init(&req->queue);
1472                                 request_complete(ep, req, status);
1473                         }
1474                 }
1475
1476                 pkt_len = USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
1477                 DBG(DBG_HW, "Packet length: %u\n", pkt_len);
1478                 if (pkt_len != sizeof(crq)) {
1479                         pr_warning("udc: Invalid packet length %u "
1480                                 "(expected %zu)\n", pkt_len, sizeof(crq));
1481                         set_protocol_stall(udc, ep);
1482                         return;
1483                 }
1484
1485                 DBG(DBG_FIFO, "Copying ctrl request from 0x%p:\n", ep->fifo);
1486                 memcpy_fromio(crq.data, ep->fifo, sizeof(crq));
1487
1488                 /* Free up one bank in the FIFO so that we can
1489                  * generate or receive a reply right away. */
1490                 usba_ep_writel(ep, CLR_STA, USBA_RX_SETUP);
1491
1492                 /* printk(KERN_DEBUG "setup: %d: %02x.%02x\n",
1493                         ep->state, crq.crq.bRequestType,
1494                         crq.crq.bRequest); */
1495
1496                 if (crq.crq.bRequestType & USB_DIR_IN) {
1497                         /*
1498                          * The USB 2.0 spec states that "if wLength is
1499                          * zero, there is no data transfer phase."
1500                          * However, testusb #14 seems to actually
1501                          * expect a data phase even if wLength = 0...
1502                          */
1503                         ep->state = DATA_STAGE_IN;
1504                 } else {
1505                         if (crq.crq.wLength != cpu_to_le16(0))
1506                                 ep->state = DATA_STAGE_OUT;
1507                         else
1508                                 ep->state = STATUS_STAGE_IN;
1509                 }
1510
1511                 ret = -1;
1512                 if (ep->index == 0)
1513                         ret = handle_ep0_setup(udc, ep, &crq.crq);
1514                 else {
1515                         spin_unlock(&udc->lock);
1516                         ret = udc->driver->setup(&udc->gadget, &crq.crq);
1517                         spin_lock(&udc->lock);
1518                 }
1519
1520                 DBG(DBG_BUS, "req %02x.%02x, length %d, state %d, ret %d\n",
1521                         crq.crq.bRequestType, crq.crq.bRequest,
1522                         le16_to_cpu(crq.crq.wLength), ep->state, ret);
1523
1524                 if (ret < 0) {
1525                         /* Let the host know that we failed */
1526                         set_protocol_stall(udc, ep);
1527                 }
1528         }
1529 }
1530
1531 static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep)
1532 {
1533         struct usba_request *req;
1534         u32 epstatus;
1535         u32 epctrl;
1536
1537         epstatus = usba_ep_readl(ep, STA);
1538         epctrl = usba_ep_readl(ep, CTL);
1539
1540         DBG(DBG_INT, "%s: interrupt, status: 0x%08x\n", ep->ep.name, epstatus);
1541
1542         while ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) {
1543                 DBG(DBG_BUS, "%s: TX PK ready\n", ep->ep.name);
1544
1545                 if (list_empty(&ep->queue)) {
1546                         dev_warn(&udc->pdev->dev, "ep_irq: queue empty\n");
1547                         usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
1548                         return;
1549                 }
1550
1551                 req = list_entry(ep->queue.next, struct usba_request, queue);
1552
1553                 if (req->using_dma) {
1554                         /* Send a zero-length packet */
1555                         usba_ep_writel(ep, SET_STA,
1556                                         USBA_TX_PK_RDY);
1557                         usba_ep_writel(ep, CTL_DIS,
1558                                         USBA_TX_PK_RDY);
1559                         list_del_init(&req->queue);
1560                         submit_next_request(ep);
1561                         request_complete(ep, req, 0);
1562                 } else {
1563                         if (req->submitted)
1564                                 next_fifo_transaction(ep, req);
1565                         else
1566                                 submit_request(ep, req);
1567
1568                         if (req->last_transaction) {
1569                                 list_del_init(&req->queue);
1570                                 submit_next_request(ep);
1571                                 request_complete(ep, req, 0);
1572                         }
1573                 }
1574
1575                 epstatus = usba_ep_readl(ep, STA);
1576                 epctrl = usba_ep_readl(ep, CTL);
1577         }
1578         if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
1579                 DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name);
1580                 receive_data(ep);
1581         }
1582 }
1583
1584 static void usba_dma_irq(struct usba_udc *udc, struct usba_ep *ep)
1585 {
1586         struct usba_request *req;
1587         u32 status, control, pending;
1588
1589         status = usba_dma_readl(ep, STATUS);
1590         control = usba_dma_readl(ep, CONTROL);
1591 #ifdef CONFIG_USB_GADGET_DEBUG_FS
1592         ep->last_dma_status = status;
1593 #endif
1594         pending = status & control;
1595         DBG(DBG_INT | DBG_DMA, "dma irq, s/%#08x, c/%#08x\n", status, control);
1596
1597         if (status & USBA_DMA_CH_EN) {
1598                 dev_err(&udc->pdev->dev,
1599                         "DMA_CH_EN is set after transfer is finished!\n");
1600                 dev_err(&udc->pdev->dev,
1601                         "status=%#08x, pending=%#08x, control=%#08x\n",
1602                         status, pending, control);
1603
1604                 /*
1605                  * try to pretend nothing happened. We might have to
1606                  * do something here...
1607                  */
1608         }
1609
1610         if (list_empty(&ep->queue))
1611                 /* Might happen if a reset comes along at the right moment */
1612                 return;
1613
1614         if (pending & (USBA_DMA_END_TR_ST | USBA_DMA_END_BUF_ST)) {
1615                 req = list_entry(ep->queue.next, struct usba_request, queue);
1616                 usba_update_req(ep, req, status);
1617
1618                 list_del_init(&req->queue);
1619                 submit_next_request(ep);
1620                 request_complete(ep, req, 0);
1621         }
1622 }
1623
1624 static irqreturn_t usba_udc_irq(int irq, void *devid)
1625 {
1626         struct usba_udc *udc = devid;
1627         u32 status, int_enb;
1628         u32 dma_status;
1629         u32 ep_status;
1630
1631         spin_lock(&udc->lock);
1632
1633         int_enb = usba_int_enb_get(udc);
1634         status = usba_readl(udc, INT_STA) & (int_enb | USBA_HIGH_SPEED);
1635         DBG(DBG_INT, "irq, status=%#08x\n", status);
1636
1637         if (status & USBA_DET_SUSPEND) {
1638                 toggle_bias(udc, 0);
1639                 usba_writel(udc, INT_CLR, USBA_DET_SUSPEND);
1640                 usba_int_enb_set(udc, int_enb | USBA_WAKE_UP);
1641                 udc->bias_pulse_needed = true;
1642                 DBG(DBG_BUS, "Suspend detected\n");
1643                 if (udc->gadget.speed != USB_SPEED_UNKNOWN
1644                                 && udc->driver && udc->driver->suspend) {
1645                         spin_unlock(&udc->lock);
1646                         udc->driver->suspend(&udc->gadget);
1647                         spin_lock(&udc->lock);
1648                 }
1649         }
1650
1651         if (status & USBA_WAKE_UP) {
1652                 toggle_bias(udc, 1);
1653                 usba_writel(udc, INT_CLR, USBA_WAKE_UP);
1654                 usba_int_enb_set(udc, int_enb & ~USBA_WAKE_UP);
1655                 DBG(DBG_BUS, "Wake Up CPU detected\n");
1656         }
1657
1658         if (status & USBA_END_OF_RESUME) {
1659                 usba_writel(udc, INT_CLR, USBA_END_OF_RESUME);
1660                 generate_bias_pulse(udc);
1661                 DBG(DBG_BUS, "Resume detected\n");
1662                 if (udc->gadget.speed != USB_SPEED_UNKNOWN
1663                                 && udc->driver && udc->driver->resume) {
1664                         spin_unlock(&udc->lock);
1665                         udc->driver->resume(&udc->gadget);
1666                         spin_lock(&udc->lock);
1667                 }
1668         }
1669
1670         dma_status = USBA_BFEXT(DMA_INT, status);
1671         if (dma_status) {
1672                 int i;
1673
1674                 for (i = 1; i <= USBA_NR_DMAS; i++)
1675                         if (dma_status & (1 << i))
1676                                 usba_dma_irq(udc, &udc->usba_ep[i]);
1677         }
1678
1679         ep_status = USBA_BFEXT(EPT_INT, status);
1680         if (ep_status) {
1681                 int i;
1682
1683                 for (i = 0; i < udc->num_ep; i++)
1684                         if (ep_status & (1 << i)) {
1685                                 if (ep_is_control(&udc->usba_ep[i]))
1686                                         usba_control_irq(udc, &udc->usba_ep[i]);
1687                                 else
1688                                         usba_ep_irq(udc, &udc->usba_ep[i]);
1689                         }
1690         }
1691
1692         if (status & USBA_END_OF_RESET) {
1693                 struct usba_ep *ep0;
1694
1695                 usba_writel(udc, INT_CLR, USBA_END_OF_RESET);
1696                 generate_bias_pulse(udc);
1697                 reset_all_endpoints(udc);
1698
1699                 if (udc->gadget.speed != USB_SPEED_UNKNOWN && udc->driver) {
1700                         udc->gadget.speed = USB_SPEED_UNKNOWN;
1701                         spin_unlock(&udc->lock);
1702                         usb_gadget_udc_reset(&udc->gadget, udc->driver);
1703                         spin_lock(&udc->lock);
1704                 }
1705
1706                 if (status & USBA_HIGH_SPEED)
1707                         udc->gadget.speed = USB_SPEED_HIGH;
1708                 else
1709                         udc->gadget.speed = USB_SPEED_FULL;
1710                 DBG(DBG_BUS, "%s bus reset detected\n",
1711                     usb_speed_string(udc->gadget.speed));
1712
1713                 ep0 = &udc->usba_ep[0];
1714                 ep0->ep.desc = &usba_ep0_desc;
1715                 ep0->state = WAIT_FOR_SETUP;
1716                 usba_ep_writel(ep0, CFG,
1717                                 (USBA_BF(EPT_SIZE, EP0_EPT_SIZE)
1718                                 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL)
1719                                 | USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE)));
1720                 usba_ep_writel(ep0, CTL_ENB,
1721                                 USBA_EPT_ENABLE | USBA_RX_SETUP);
1722                 usba_int_enb_set(udc, int_enb | USBA_BF(EPT_INT, 1) |
1723                                       USBA_DET_SUSPEND | USBA_END_OF_RESUME);
1724
1725                 /*
1726                  * Unclear why we hit this irregularly, e.g. in usbtest,
1727                  * but it's clearly harmless...
1728                  */
1729                 if (!(usba_ep_readl(ep0, CFG) & USBA_EPT_MAPPED))
1730                         dev_dbg(&udc->pdev->dev,
1731                                  "ODD: EP0 configuration is invalid!\n");
1732         }
1733
1734         spin_unlock(&udc->lock);
1735
1736         return IRQ_HANDLED;
1737 }
1738
1739 static int start_clock(struct usba_udc *udc)
1740 {
1741         int ret;
1742
1743         if (udc->clocked)
1744                 return 0;
1745
1746         ret = clk_prepare_enable(udc->pclk);
1747         if (ret)
1748                 return ret;
1749         ret = clk_prepare_enable(udc->hclk);
1750         if (ret) {
1751                 clk_disable_unprepare(udc->pclk);
1752                 return ret;
1753         }
1754
1755         udc->clocked = true;
1756         return 0;
1757 }
1758
1759 static void stop_clock(struct usba_udc *udc)
1760 {
1761         if (!udc->clocked)
1762                 return;
1763
1764         clk_disable_unprepare(udc->hclk);
1765         clk_disable_unprepare(udc->pclk);
1766
1767         udc->clocked = false;
1768 }
1769
1770 static int usba_start(struct usba_udc *udc)
1771 {
1772         unsigned long flags;
1773         int ret;
1774
1775         ret = start_clock(udc);
1776         if (ret)
1777                 return ret;
1778
1779         spin_lock_irqsave(&udc->lock, flags);
1780         toggle_bias(udc, 1);
1781         usba_writel(udc, CTRL, USBA_ENABLE_MASK);
1782         usba_int_enb_set(udc, USBA_END_OF_RESET);
1783         spin_unlock_irqrestore(&udc->lock, flags);
1784
1785         return 0;
1786 }
1787
1788 static void usba_stop(struct usba_udc *udc)
1789 {
1790         unsigned long flags;
1791
1792         spin_lock_irqsave(&udc->lock, flags);
1793         udc->gadget.speed = USB_SPEED_UNKNOWN;
1794         reset_all_endpoints(udc);
1795
1796         /* This will also disable the DP pullup */
1797         toggle_bias(udc, 0);
1798         usba_writel(udc, CTRL, USBA_DISABLE_MASK);
1799         spin_unlock_irqrestore(&udc->lock, flags);
1800
1801         stop_clock(udc);
1802 }
1803
1804 static irqreturn_t usba_vbus_irq_thread(int irq, void *devid)
1805 {
1806         struct usba_udc *udc = devid;
1807         int vbus;
1808
1809         /* debounce */
1810         udelay(10);
1811
1812         mutex_lock(&udc->vbus_mutex);
1813
1814         vbus = vbus_is_present(udc);
1815         if (vbus != udc->vbus_prev) {
1816                 if (vbus) {
1817                         usba_start(udc);
1818                 } else {
1819                         usba_stop(udc);
1820
1821                         if (udc->driver->disconnect)
1822                                 udc->driver->disconnect(&udc->gadget);
1823                 }
1824                 udc->vbus_prev = vbus;
1825         }
1826
1827         mutex_unlock(&udc->vbus_mutex);
1828         return IRQ_HANDLED;
1829 }
1830
1831 static int atmel_usba_start(struct usb_gadget *gadget,
1832                 struct usb_gadget_driver *driver)
1833 {
1834         int ret;
1835         struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget);
1836         unsigned long flags;
1837
1838         spin_lock_irqsave(&udc->lock, flags);
1839         udc->devstatus = 1 << USB_DEVICE_SELF_POWERED;
1840         udc->driver = driver;
1841         spin_unlock_irqrestore(&udc->lock, flags);
1842
1843         mutex_lock(&udc->vbus_mutex);
1844
1845         if (gpio_is_valid(udc->vbus_pin))
1846                 enable_irq(gpio_to_irq(udc->vbus_pin));
1847
1848         /* If Vbus is present, enable the controller and wait for reset */
1849         udc->vbus_prev = vbus_is_present(udc);
1850         if (udc->vbus_prev) {
1851                 ret = usba_start(udc);
1852                 if (ret)
1853                         goto err;
1854         }
1855
1856         mutex_unlock(&udc->vbus_mutex);
1857         return 0;
1858
1859 err:
1860         if (gpio_is_valid(udc->vbus_pin))
1861                 disable_irq(gpio_to_irq(udc->vbus_pin));
1862
1863         mutex_unlock(&udc->vbus_mutex);
1864
1865         spin_lock_irqsave(&udc->lock, flags);
1866         udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
1867         udc->driver = NULL;
1868         spin_unlock_irqrestore(&udc->lock, flags);
1869         return ret;
1870 }
1871
1872 static int atmel_usba_stop(struct usb_gadget *gadget)
1873 {
1874         struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget);
1875
1876         if (gpio_is_valid(udc->vbus_pin))
1877                 disable_irq(gpio_to_irq(udc->vbus_pin));
1878
1879         usba_stop(udc);
1880
1881         udc->driver = NULL;
1882
1883         return 0;
1884 }
1885
1886 #ifdef CONFIG_OF
1887 static void at91sam9rl_toggle_bias(struct usba_udc *udc, int is_on)
1888 {
1889         unsigned int uckr = at91_pmc_read(AT91_CKGR_UCKR);
1890
1891         if (is_on)
1892                 at91_pmc_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN);
1893         else
1894                 at91_pmc_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
1895 }
1896
1897 static void at91sam9g45_pulse_bias(struct usba_udc *udc)
1898 {
1899         unsigned int uckr = at91_pmc_read(AT91_CKGR_UCKR);
1900
1901         at91_pmc_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
1902         at91_pmc_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN);
1903 }
1904
1905 static const struct usba_udc_errata at91sam9rl_errata = {
1906         .toggle_bias = at91sam9rl_toggle_bias,
1907 };
1908
1909 static const struct usba_udc_errata at91sam9g45_errata = {
1910         .pulse_bias = at91sam9g45_pulse_bias,
1911 };
1912
1913 static const struct of_device_id atmel_udc_dt_ids[] = {
1914         { .compatible = "atmel,at91sam9rl-udc", .data = &at91sam9rl_errata },
1915         { .compatible = "atmel,at91sam9g45-udc", .data = &at91sam9g45_errata },
1916         { .compatible = "atmel,sama5d3-udc" },
1917         { /* sentinel */ }
1918 };
1919
1920 MODULE_DEVICE_TABLE(of, atmel_udc_dt_ids);
1921
1922 static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
1923                                                     struct usba_udc *udc)
1924 {
1925         u32 val;
1926         const char *name;
1927         enum of_gpio_flags flags;
1928         struct device_node *np = pdev->dev.of_node;
1929         const struct of_device_id *match;
1930         struct device_node *pp;
1931         int i, ret;
1932         struct usba_ep *eps, *ep;
1933
1934         match = of_match_node(atmel_udc_dt_ids, np);
1935         if (!match)
1936                 return ERR_PTR(-EINVAL);
1937
1938         udc->errata = match->data;
1939
1940         udc->num_ep = 0;
1941
1942         udc->vbus_pin = of_get_named_gpio_flags(np, "atmel,vbus-gpio", 0,
1943                                                 &flags);
1944         udc->vbus_pin_inverted = (flags & OF_GPIO_ACTIVE_LOW) ? 1 : 0;
1945
1946         pp = NULL;
1947         while ((pp = of_get_next_child(np, pp)))
1948                 udc->num_ep++;
1949
1950         eps = devm_kzalloc(&pdev->dev, sizeof(struct usba_ep) * udc->num_ep,
1951                            GFP_KERNEL);
1952         if (!eps)
1953                 return ERR_PTR(-ENOMEM);
1954
1955         udc->gadget.ep0 = &eps[0].ep;
1956
1957         INIT_LIST_HEAD(&eps[0].ep.ep_list);
1958
1959         pp = NULL;
1960         i = 0;
1961         while ((pp = of_get_next_child(np, pp))) {
1962                 ep = &eps[i];
1963
1964                 ret = of_property_read_u32(pp, "reg", &val);
1965                 if (ret) {
1966                         dev_err(&pdev->dev, "of_probe: reg error(%d)\n", ret);
1967                         goto err;
1968                 }
1969                 ep->index = val;
1970
1971                 ret = of_property_read_u32(pp, "atmel,fifo-size", &val);
1972                 if (ret) {
1973                         dev_err(&pdev->dev, "of_probe: fifo-size error(%d)\n", ret);
1974                         goto err;
1975                 }
1976                 ep->fifo_size = val;
1977
1978                 ret = of_property_read_u32(pp, "atmel,nb-banks", &val);
1979                 if (ret) {
1980                         dev_err(&pdev->dev, "of_probe: nb-banks error(%d)\n", ret);
1981                         goto err;
1982                 }
1983                 ep->nr_banks = val;
1984
1985                 ep->can_dma = of_property_read_bool(pp, "atmel,can-dma");
1986                 ep->can_isoc = of_property_read_bool(pp, "atmel,can-isoc");
1987
1988                 ret = of_property_read_string(pp, "name", &name);
1989                 if (ret) {
1990                         dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
1991                         goto err;
1992                 }
1993                 ep->ep.name = name;
1994
1995                 ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
1996                 ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
1997                 ep->fifo = udc->fifo + USBA_FIFO_BASE(i);
1998                 ep->ep.ops = &usba_ep_ops;
1999                 usb_ep_set_maxpacket_limit(&ep->ep, ep->fifo_size);
2000                 ep->udc = udc;
2001                 INIT_LIST_HEAD(&ep->queue);
2002
2003                 if (ep->index == 0) {
2004                         ep->ep.caps.type_control = true;
2005                 } else {
2006                         ep->ep.caps.type_iso = ep->can_isoc;
2007                         ep->ep.caps.type_bulk = true;
2008                         ep->ep.caps.type_int = true;
2009                 }
2010
2011                 ep->ep.caps.dir_in = true;
2012                 ep->ep.caps.dir_out = true;
2013
2014                 if (i)
2015                         list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
2016
2017                 i++;
2018         }
2019
2020         if (i == 0) {
2021                 dev_err(&pdev->dev, "of_probe: no endpoint specified\n");
2022                 ret = -EINVAL;
2023                 goto err;
2024         }
2025
2026         return eps;
2027 err:
2028         return ERR_PTR(ret);
2029 }
2030 #else
2031 static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
2032                                                     struct usba_udc *udc)
2033 {
2034         return ERR_PTR(-ENOSYS);
2035 }
2036 #endif
2037
2038 static struct usba_ep * usba_udc_pdata(struct platform_device *pdev,
2039                                                  struct usba_udc *udc)
2040 {
2041         struct usba_platform_data *pdata = dev_get_platdata(&pdev->dev);
2042         struct usba_ep *eps;
2043         int i;
2044
2045         if (!pdata)
2046                 return ERR_PTR(-ENXIO);
2047
2048         eps = devm_kzalloc(&pdev->dev, sizeof(struct usba_ep) * pdata->num_ep,
2049                            GFP_KERNEL);
2050         if (!eps)
2051                 return ERR_PTR(-ENOMEM);
2052
2053         udc->gadget.ep0 = &eps[0].ep;
2054
2055         udc->vbus_pin = pdata->vbus_pin;
2056         udc->vbus_pin_inverted = pdata->vbus_pin_inverted;
2057         udc->num_ep = pdata->num_ep;
2058
2059         INIT_LIST_HEAD(&eps[0].ep.ep_list);
2060
2061         for (i = 0; i < pdata->num_ep; i++) {
2062                 struct usba_ep *ep = &eps[i];
2063
2064                 ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
2065                 ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
2066                 ep->fifo = udc->fifo + USBA_FIFO_BASE(i);
2067                 ep->ep.ops = &usba_ep_ops;
2068                 ep->ep.name = pdata->ep[i].name;
2069                 ep->fifo_size = pdata->ep[i].fifo_size;
2070                 usb_ep_set_maxpacket_limit(&ep->ep, ep->fifo_size);
2071                 ep->udc = udc;
2072                 INIT_LIST_HEAD(&ep->queue);
2073                 ep->nr_banks = pdata->ep[i].nr_banks;
2074                 ep->index = pdata->ep[i].index;
2075                 ep->can_dma = pdata->ep[i].can_dma;
2076                 ep->can_isoc = pdata->ep[i].can_isoc;
2077
2078                 if (i == 0) {
2079                         ep->ep.caps.type_control = true;
2080                 } else {
2081                         ep->ep.caps.type_iso = ep->can_isoc;
2082                         ep->ep.caps.type_bulk = true;
2083                         ep->ep.caps.type_int = true;
2084                 }
2085
2086                 ep->ep.caps.dir_in = true;
2087                 ep->ep.caps.dir_out = true;
2088
2089                 if (i)
2090                         list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
2091         }
2092
2093         return eps;
2094 }
2095
2096 static int usba_udc_probe(struct platform_device *pdev)
2097 {
2098         struct resource *regs, *fifo;
2099         struct clk *pclk, *hclk;
2100         struct usba_udc *udc;
2101         int irq, ret, i;
2102
2103         udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
2104         if (!udc)
2105                 return -ENOMEM;
2106
2107         udc->gadget = usba_gadget_template;
2108         INIT_LIST_HEAD(&udc->gadget.ep_list);
2109
2110         regs = platform_get_resource(pdev, IORESOURCE_MEM, CTRL_IOMEM_ID);
2111         fifo = platform_get_resource(pdev, IORESOURCE_MEM, FIFO_IOMEM_ID);
2112         if (!regs || !fifo)
2113                 return -ENXIO;
2114
2115         irq = platform_get_irq(pdev, 0);
2116         if (irq < 0)
2117                 return irq;
2118
2119         pclk = devm_clk_get(&pdev->dev, "pclk");
2120         if (IS_ERR(pclk))
2121                 return PTR_ERR(pclk);
2122         hclk = devm_clk_get(&pdev->dev, "hclk");
2123         if (IS_ERR(hclk))
2124                 return PTR_ERR(hclk);
2125
2126         spin_lock_init(&udc->lock);
2127         mutex_init(&udc->vbus_mutex);
2128         udc->pdev = pdev;
2129         udc->pclk = pclk;
2130         udc->hclk = hclk;
2131         udc->vbus_pin = -ENODEV;
2132
2133         ret = -ENOMEM;
2134         udc->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
2135         if (!udc->regs) {
2136                 dev_err(&pdev->dev, "Unable to map I/O memory, aborting.\n");
2137                 return ret;
2138         }
2139         dev_info(&pdev->dev, "MMIO registers at 0x%08lx mapped at %p\n",
2140                  (unsigned long)regs->start, udc->regs);
2141         udc->fifo = devm_ioremap(&pdev->dev, fifo->start, resource_size(fifo));
2142         if (!udc->fifo) {
2143                 dev_err(&pdev->dev, "Unable to map FIFO, aborting.\n");
2144                 return ret;
2145         }
2146         dev_info(&pdev->dev, "FIFO at 0x%08lx mapped at %p\n",
2147                  (unsigned long)fifo->start, udc->fifo);
2148
2149         platform_set_drvdata(pdev, udc);
2150
2151         /* Make sure we start from a clean slate */
2152         ret = clk_prepare_enable(pclk);
2153         if (ret) {
2154                 dev_err(&pdev->dev, "Unable to enable pclk, aborting.\n");
2155                 return ret;
2156         }
2157
2158         usba_writel(udc, CTRL, USBA_DISABLE_MASK);
2159         clk_disable_unprepare(pclk);
2160
2161         if (pdev->dev.of_node)
2162                 udc->usba_ep = atmel_udc_of_init(pdev, udc);
2163         else
2164                 udc->usba_ep = usba_udc_pdata(pdev, udc);
2165
2166         toggle_bias(udc, 0);
2167
2168         if (IS_ERR(udc->usba_ep))
2169                 return PTR_ERR(udc->usba_ep);
2170
2171         ret = devm_request_irq(&pdev->dev, irq, usba_udc_irq, 0,
2172                                 "atmel_usba_udc", udc);
2173         if (ret) {
2174                 dev_err(&pdev->dev, "Cannot request irq %d (error %d)\n",
2175                         irq, ret);
2176                 return ret;
2177         }
2178         udc->irq = irq;
2179
2180         if (gpio_is_valid(udc->vbus_pin)) {
2181                 if (!devm_gpio_request(&pdev->dev, udc->vbus_pin, "atmel_usba_udc")) {
2182                         irq_set_status_flags(gpio_to_irq(udc->vbus_pin),
2183                                         IRQ_NOAUTOEN);
2184                         ret = devm_request_threaded_irq(&pdev->dev,
2185                                         gpio_to_irq(udc->vbus_pin), NULL,
2186                                         usba_vbus_irq_thread, IRQF_ONESHOT,
2187                                         "atmel_usba_udc", udc);
2188                         if (ret) {
2189                                 udc->vbus_pin = -ENODEV;
2190                                 dev_warn(&udc->pdev->dev,
2191                                          "failed to request vbus irq; "
2192                                          "assuming always on\n");
2193                         }
2194                 } else {
2195                         /* gpio_request fail so use -EINVAL for gpio_is_valid */
2196                         udc->vbus_pin = -EINVAL;
2197                 }
2198         }
2199
2200         ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
2201         if (ret)
2202                 return ret;
2203         device_init_wakeup(&pdev->dev, 1);
2204
2205         usba_init_debugfs(udc);
2206         for (i = 1; i < udc->num_ep; i++)
2207                 usba_ep_init_debugfs(udc, &udc->usba_ep[i]);
2208
2209         return 0;
2210 }
2211
2212 static int usba_udc_remove(struct platform_device *pdev)
2213 {
2214         struct usba_udc *udc;
2215         int i;
2216
2217         udc = platform_get_drvdata(pdev);
2218
2219         device_init_wakeup(&pdev->dev, 0);
2220         usb_del_gadget_udc(&udc->gadget);
2221
2222         for (i = 1; i < udc->num_ep; i++)
2223                 usba_ep_cleanup_debugfs(&udc->usba_ep[i]);
2224         usba_cleanup_debugfs(udc);
2225
2226         return 0;
2227 }
2228
2229 #ifdef CONFIG_PM_SLEEP
2230 static int usba_udc_suspend(struct device *dev)
2231 {
2232         struct usba_udc *udc = dev_get_drvdata(dev);
2233
2234         /* Not started */
2235         if (!udc->driver)
2236                 return 0;
2237
2238         mutex_lock(&udc->vbus_mutex);
2239
2240         if (!device_may_wakeup(dev)) {
2241                 usba_stop(udc);
2242                 goto out;
2243         }
2244
2245         /*
2246          * Device may wake up. We stay clocked if we failed
2247          * to request vbus irq, assuming always on.
2248          */
2249         if (gpio_is_valid(udc->vbus_pin)) {
2250                 usba_stop(udc);
2251                 enable_irq_wake(gpio_to_irq(udc->vbus_pin));
2252         }
2253
2254 out:
2255         mutex_unlock(&udc->vbus_mutex);
2256         return 0;
2257 }
2258
2259 static int usba_udc_resume(struct device *dev)
2260 {
2261         struct usba_udc *udc = dev_get_drvdata(dev);
2262
2263         /* Not started */
2264         if (!udc->driver)
2265                 return 0;
2266
2267         if (device_may_wakeup(dev) && gpio_is_valid(udc->vbus_pin))
2268                 disable_irq_wake(gpio_to_irq(udc->vbus_pin));
2269
2270         /* If Vbus is present, enable the controller and wait for reset */
2271         mutex_lock(&udc->vbus_mutex);
2272         udc->vbus_prev = vbus_is_present(udc);
2273         if (udc->vbus_prev)
2274                 usba_start(udc);
2275         mutex_unlock(&udc->vbus_mutex);
2276
2277         return 0;
2278 }
2279 #endif
2280
2281 static SIMPLE_DEV_PM_OPS(usba_udc_pm_ops, usba_udc_suspend, usba_udc_resume);
2282
2283 static struct platform_driver udc_driver = {
2284         .remove         = usba_udc_remove,
2285         .driver         = {
2286                 .name           = "atmel_usba_udc",
2287                 .pm             = &usba_udc_pm_ops,
2288                 .of_match_table = of_match_ptr(atmel_udc_dt_ids),
2289         },
2290 };
2291
2292 module_platform_driver_probe(udc_driver, usba_udc_probe);
2293
2294 MODULE_DESCRIPTION("Atmel USBA UDC driver");
2295 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
2296 MODULE_LICENSE("GPL");
2297 MODULE_ALIAS("platform:atmel_usba_udc");