2 * MUSB OTG driver peripheral support
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <linux/kernel.h>
37 #include <linux/list.h>
38 #include <linux/timer.h>
39 #include <linux/module.h>
40 #include <linux/smp.h>
41 #include <linux/spinlock.h>
42 #include <linux/delay.h>
43 #include <linux/moduleparam.h>
44 #include <linux/stat.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/slab.h>
48 #include "musb_core.h"
51 /* MUSB PERIPHERAL status 3-mar-2006:
53 * - EP0 seems solid. It passes both USBCV and usbtest control cases.
56 * + remote wakeup to Linux hosts work, but saw USBCV failures;
57 * in one test run (operator error?)
58 * + endpoint halt tests -- in both usbtest and usbcv -- seem
59 * to break when dma is enabled ... is something wrongly
62 * - Mass storage behaved ok when last tested. Network traffic patterns
63 * (with lots of short transfers etc) need retesting; they turn up the
64 * worst cases of the DMA, since short packets are typical but are not
68 * + both pio and dma behave in with network and g_zero tests
69 * + no cppi throughput issues other than no-hw-queueing
70 * + failed with FLAT_REG (DaVinci)
71 * + seems to behave with double buffering, PIO -and- CPPI
72 * + with gadgetfs + AIO, requests got lost?
75 * + both pio and dma behave in with network and g_zero tests
76 * + dma is slow in typical case (short_not_ok is clear)
77 * + double buffering ok with PIO
78 * + double buffering *FAILS* with CPPI, wrong data bytes sometimes
79 * + request lossage observed with gadgetfs
81 * - ISO not tested ... might work, but only weakly isochronous
83 * - Gadget driver disabling of softconnect during bind() is ignored; so
84 * drivers can't hold off host requests until userspace is ready.
85 * (Workaround: they can turn it off later.)
87 * - PORTABILITY (assumes PIO works):
88 * + DaVinci, basically works with cppi dma
89 * + OMAP 2430, ditto with mentor dma
90 * + TUSB 6010, platform-specific dma in the works
93 /* ----------------------------------------------------------------------- */
96 * Immediately complete a request.
98 * @param request the request to complete
99 * @param status the status to complete the request with
100 * Context: controller locked, IRQs blocked.
102 void musb_g_giveback(
104 struct usb_request *request,
106 __releases(ep->musb->lock)
107 __acquires(ep->musb->lock)
109 struct musb_request *req;
113 req = to_musb_request(request);
115 list_del(&request->list);
116 if (req->request.status == -EINPROGRESS)
117 req->request.status = status;
121 spin_unlock(&musb->lock);
122 if (is_dma_capable()) {
124 dma_unmap_single(musb->controller,
130 req->request.dma = DMA_ADDR_INVALID;
132 } else if (req->request.dma != DMA_ADDR_INVALID)
133 dma_sync_single_for_cpu(musb->controller,
140 if (request->status == 0)
141 DBG(5, "%s done request %p, %d/%d\n",
142 ep->end_point.name, request,
143 req->request.actual, req->request.length);
145 DBG(2, "%s request %p, %d/%d fault %d\n",
146 ep->end_point.name, request,
147 req->request.actual, req->request.length,
149 req->request.complete(&req->ep->end_point, &req->request);
150 spin_lock(&musb->lock);
154 /* ----------------------------------------------------------------------- */
157 * Abort requests queued to an endpoint using the status. Synchronous.
158 * caller locked controller and blocked irqs, and selected this ep.
160 static void nuke(struct musb_ep *ep, const int status)
162 struct musb_request *req = NULL;
163 void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
167 if (is_dma_capable() && ep->dma) {
168 struct dma_controller *c = ep->musb->dma_controller;
173 * The programming guide says that we must not clear
174 * the DMAMODE bit before DMAENAB, so we only
175 * clear it in the second write...
177 musb_writew(epio, MUSB_TXCSR,
178 MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
179 musb_writew(epio, MUSB_TXCSR,
180 0 | MUSB_TXCSR_FLUSHFIFO);
182 musb_writew(epio, MUSB_RXCSR,
183 0 | MUSB_RXCSR_FLUSHFIFO);
184 musb_writew(epio, MUSB_RXCSR,
185 0 | MUSB_RXCSR_FLUSHFIFO);
188 value = c->channel_abort(ep->dma);
189 DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value);
190 c->channel_release(ep->dma);
194 while (!list_empty(&(ep->req_list))) {
195 req = container_of(ep->req_list.next, struct musb_request,
197 musb_g_giveback(ep, &req->request, status);
201 /* ----------------------------------------------------------------------- */
203 /* Data transfers - pure PIO, pure DMA, or mixed mode */
206 * This assumes the separate CPPI engine is responding to DMA requests
207 * from the usb core ... sequenced a bit differently from mentor dma.
210 static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
212 if (can_bulk_split(musb, ep->type))
213 return ep->hw_ep->max_packet_sz_tx;
215 return ep->packet_sz;
219 #ifdef CONFIG_USB_INVENTRA_DMA
221 /* Peripheral tx (IN) using Mentor DMA works as follows:
222 Only mode 0 is used for transfers <= wPktSize,
223 mode 1 is used for larger transfers,
225 One of the following happens:
226 - Host sends IN token which causes an endpoint interrupt
228 -> if DMA is currently busy, exit.
229 -> if queue is non-empty, txstate().
231 - Request is queued by the gadget driver.
232 -> if queue was previously empty, txstate()
237 | (data is transferred to the FIFO, then sent out when
238 | IN token(s) are recd from Host.
239 | -> DMA interrupt on completion
241 | -> stop DMA, ~DMAENAB,
242 | -> set TxPktRdy for last short pkt or zlp
243 | -> Complete Request
244 | -> Continue next request (call txstate)
245 |___________________________________|
247 * Non-Mentor DMA engines can of course work differently, such as by
248 * upleveling from irq-per-packet to irq-per-buffer.
254 * An endpoint is transmitting data. This can be called either from
255 * the IRQ routine or from ep.queue() to kickstart a request on an
258 * Context: controller locked, IRQs blocked, endpoint selected
260 static void txstate(struct musb *musb, struct musb_request *req)
262 u8 epnum = req->epnum;
263 struct musb_ep *musb_ep;
264 void __iomem *epio = musb->endpoints[epnum].regs;
265 struct usb_request *request;
266 u16 fifo_count = 0, csr;
271 /* we shouldn't get here while DMA is active ... but we do ... */
272 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
273 DBG(4, "dma pending...\n");
277 /* read TXCSR before */
278 csr = musb_readw(epio, MUSB_TXCSR);
280 request = &req->request;
281 fifo_count = min(max_ep_writesize(musb, musb_ep),
282 (int)(request->length - request->actual));
284 if (csr & MUSB_TXCSR_TXPKTRDY) {
285 DBG(5, "%s old packet still ready , txcsr %03x\n",
286 musb_ep->end_point.name, csr);
290 if (csr & MUSB_TXCSR_P_SENDSTALL) {
291 DBG(5, "%s stalling, txcsr %03x\n",
292 musb_ep->end_point.name, csr);
296 DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
297 epnum, musb_ep->packet_sz, fifo_count,
300 #ifndef CONFIG_MUSB_PIO_ONLY
301 if (is_dma_capable() && musb_ep->dma) {
302 struct dma_controller *c = musb->dma_controller;
304 use_dma = (request->dma != DMA_ADDR_INVALID);
306 /* MUSB_TXCSR_P_ISO is still set correctly */
308 #ifdef CONFIG_USB_INVENTRA_DMA
312 /* setup DMA, then program endpoint CSR */
313 request_size = min_t(size_t, request->length,
314 musb_ep->dma->max_len);
315 if (request_size < musb_ep->packet_sz)
316 musb_ep->dma->desired_mode = 0;
318 musb_ep->dma->desired_mode = 1;
320 use_dma = use_dma && c->channel_program(
321 musb_ep->dma, musb_ep->packet_sz,
322 musb_ep->dma->desired_mode,
323 request->dma + request->actual, request_size);
325 if (musb_ep->dma->desired_mode == 0) {
327 * We must not clear the DMAMODE bit
328 * before the DMAENAB bit -- and the
329 * latter doesn't always get cleared
330 * before we get here...
332 csr &= ~(MUSB_TXCSR_AUTOSET
333 | MUSB_TXCSR_DMAENAB);
334 musb_writew(epio, MUSB_TXCSR, csr
335 | MUSB_TXCSR_P_WZC_BITS);
336 csr &= ~MUSB_TXCSR_DMAMODE;
337 csr |= (MUSB_TXCSR_DMAENAB |
339 /* against programming guide */
341 csr |= (MUSB_TXCSR_AUTOSET
346 csr &= ~MUSB_TXCSR_P_UNDERRUN;
347 musb_writew(epio, MUSB_TXCSR, csr);
351 #elif defined(CONFIG_USB_TI_CPPI_DMA)
352 /* program endpoint CSR first, then setup DMA */
353 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
354 csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
356 musb_writew(epio, MUSB_TXCSR,
357 (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
360 /* ensure writebuffer is empty */
361 csr = musb_readw(epio, MUSB_TXCSR);
363 /* NOTE host side sets DMAENAB later than this; both are
364 * OK since the transfer dma glue (between CPPI and Mentor
365 * fifos) just tells CPPI it could start. Data only moves
366 * to the USB TX fifo when both fifos are ready.
369 /* "mode" is irrelevant here; handle terminating ZLPs like
370 * PIO does, since the hardware RNDIS mode seems unreliable
371 * except for the last-packet-is-already-short case.
373 use_dma = use_dma && c->channel_program(
374 musb_ep->dma, musb_ep->packet_sz,
379 c->channel_release(musb_ep->dma);
381 csr &= ~MUSB_TXCSR_DMAENAB;
382 musb_writew(epio, MUSB_TXCSR, csr);
383 /* invariant: prequest->buf is non-null */
385 #elif defined(CONFIG_USB_TUSB_OMAP_DMA)
386 use_dma = use_dma && c->channel_program(
387 musb_ep->dma, musb_ep->packet_sz,
396 musb_write_fifo(musb_ep->hw_ep, fifo_count,
397 (u8 *) (request->buf + request->actual));
398 request->actual += fifo_count;
399 csr |= MUSB_TXCSR_TXPKTRDY;
400 csr &= ~MUSB_TXCSR_P_UNDERRUN;
401 musb_writew(epio, MUSB_TXCSR, csr);
404 /* host may already have the data when this message shows... */
405 DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
406 musb_ep->end_point.name, use_dma ? "dma" : "pio",
407 request->actual, request->length,
408 musb_readw(epio, MUSB_TXCSR),
410 musb_readw(epio, MUSB_TXMAXP));
414 * FIFO state update (e.g. data ready).
415 * Called from IRQ, with controller locked.
417 void musb_g_tx(struct musb *musb, u8 epnum)
420 struct usb_request *request;
421 u8 __iomem *mbase = musb->mregs;
422 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
423 void __iomem *epio = musb->endpoints[epnum].regs;
424 struct dma_channel *dma;
426 musb_ep_select(mbase, epnum);
427 request = next_request(musb_ep);
429 csr = musb_readw(epio, MUSB_TXCSR);
430 DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
432 dma = is_dma_capable() ? musb_ep->dma : NULL;
435 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
436 * probably rates reporting as a host error.
438 if (csr & MUSB_TXCSR_P_SENTSTALL) {
439 csr |= MUSB_TXCSR_P_WZC_BITS;
440 csr &= ~MUSB_TXCSR_P_SENTSTALL;
441 musb_writew(epio, MUSB_TXCSR, csr);
445 if (csr & MUSB_TXCSR_P_UNDERRUN) {
446 /* We NAKed, no big deal... little reason to care. */
447 csr |= MUSB_TXCSR_P_WZC_BITS;
448 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
449 musb_writew(epio, MUSB_TXCSR, csr);
450 DBG(20, "underrun on ep%d, req %p\n", epnum, request);
453 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
455 * SHOULD NOT HAPPEN... has with CPPI though, after
456 * changing SENDSTALL (and other cases); harmless?
458 DBG(5, "%s dma still busy?\n", musb_ep->end_point.name);
465 if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
467 csr |= MUSB_TXCSR_P_WZC_BITS;
468 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
469 MUSB_TXCSR_TXPKTRDY);
470 musb_writew(epio, MUSB_TXCSR, csr);
471 /* Ensure writebuffer is empty. */
472 csr = musb_readw(epio, MUSB_TXCSR);
473 request->actual += musb_ep->dma->actual_len;
474 DBG(4, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
475 epnum, csr, musb_ep->dma->actual_len, request);
478 if (is_dma || request->actual == request->length) {
480 * First, maybe a terminating short packet. Some DMA
481 * engines might handle this by themselves.
483 if ((request->zero && request->length
484 && request->length % musb_ep->packet_sz == 0)
485 #ifdef CONFIG_USB_INVENTRA_DMA
486 || (is_dma && (!dma->desired_mode ||
488 (musb_ep->packet_sz - 1))))
492 * On DMA completion, FIFO may not be
495 if (csr & MUSB_TXCSR_TXPKTRDY)
498 DBG(4, "sending zero pkt\n");
499 musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
500 | MUSB_TXCSR_TXPKTRDY);
504 /* ... or if not, then complete it. */
505 musb_g_giveback(musb_ep, request, 0);
508 * Kickstart next transfer if appropriate;
509 * the packet that just completed might not
510 * be transmitted for hours or days.
511 * REVISIT for double buffering...
512 * FIXME revisit for stalls too...
514 musb_ep_select(mbase, epnum);
515 csr = musb_readw(epio, MUSB_TXCSR);
516 if (csr & MUSB_TXCSR_FIFONOTEMPTY)
519 request = musb_ep->desc ? next_request(musb_ep) : NULL;
521 DBG(4, "%s idle now\n",
522 musb_ep->end_point.name);
527 txstate(musb, to_musb_request(request));
531 /* ------------------------------------------------------------ */
533 #ifdef CONFIG_USB_INVENTRA_DMA
535 /* Peripheral rx (OUT) using Mentor DMA works as follows:
536 - Only mode 0 is used.
538 - Request is queued by the gadget class driver.
539 -> if queue was previously empty, rxstate()
541 - Host sends OUT token which causes an endpoint interrupt
543 | -> if request queued, call rxstate
545 | | -> DMA interrupt on completion
549 | | -> if data recd = max expected
550 | | by the request, or host
551 | | sent a short packet,
552 | | complete the request,
553 | | and start the next one.
554 | |_____________________________________|
555 | else just wait for the host
556 | to send the next OUT token.
557 |__________________________________________________|
559 * Non-Mentor DMA engines can of course work differently.
565 * Context: controller locked, IRQs blocked, endpoint selected
567 static void rxstate(struct musb *musb, struct musb_request *req)
569 const u8 epnum = req->epnum;
570 struct usb_request *request = &req->request;
571 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
572 void __iomem *epio = musb->endpoints[epnum].regs;
573 unsigned fifo_count = 0;
574 u16 len = musb_ep->packet_sz;
575 u16 csr = musb_readw(epio, MUSB_RXCSR);
577 /* We shouldn't get here while DMA is active, but we do... */
578 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
579 DBG(4, "DMA pending...\n");
583 if (csr & MUSB_RXCSR_P_SENDSTALL) {
584 DBG(5, "%s stalling, RXCSR %04x\n",
585 musb_ep->end_point.name, csr);
589 if (is_cppi_enabled() && musb_ep->dma) {
590 struct dma_controller *c = musb->dma_controller;
591 struct dma_channel *channel = musb_ep->dma;
593 /* NOTE: CPPI won't actually stop advancing the DMA
594 * queue after short packet transfers, so this is almost
595 * always going to run as IRQ-per-packet DMA so that
596 * faults will be handled correctly.
598 if (c->channel_program(channel,
600 !request->short_not_ok,
601 request->dma + request->actual,
602 request->length - request->actual)) {
604 /* make sure that if an rxpkt arrived after the irq,
605 * the cppi engine will be ready to take it as soon
608 csr &= ~(MUSB_RXCSR_AUTOCLEAR
609 | MUSB_RXCSR_DMAMODE);
610 csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
611 musb_writew(epio, MUSB_RXCSR, csr);
616 if (csr & MUSB_RXCSR_RXPKTRDY) {
617 len = musb_readw(epio, MUSB_RXCOUNT);
618 if (request->actual < request->length) {
619 #ifdef CONFIG_USB_INVENTRA_DMA
620 if (is_dma_capable() && musb_ep->dma) {
621 struct dma_controller *c;
622 struct dma_channel *channel;
625 c = musb->dma_controller;
626 channel = musb_ep->dma;
628 /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
629 * mode 0 only. So we do not get endpoint interrupts due to DMA
630 * completion. We only get interrupts from DMA controller.
632 * We could operate in DMA mode 1 if we knew the size of the tranfer
633 * in advance. For mass storage class, request->length = what the host
634 * sends, so that'd work. But for pretty much everything else,
635 * request->length is routinely more than what the host sends. For
636 * most these gadgets, end of is signified either by a short packet,
637 * or filling the last byte of the buffer. (Sending extra data in
638 * that last pckate should trigger an overflow fault.) But in mode 1,
639 * we don't get DMA completion interrrupt for short packets.
641 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
642 * to get endpoint interrupt on every DMA req, but that didn't seem
645 * REVISIT an updated g_file_storage can set req->short_not_ok, which
646 * then becomes usable as a runtime "use mode 1" hint...
649 csr |= MUSB_RXCSR_DMAENAB;
651 csr |= MUSB_RXCSR_AUTOCLEAR;
652 /* csr |= MUSB_RXCSR_DMAMODE; */
654 /* this special sequence (enabling and then
655 * disabling MUSB_RXCSR_DMAMODE) is required
656 * to get DMAReq to activate
658 musb_writew(epio, MUSB_RXCSR,
659 csr | MUSB_RXCSR_DMAMODE);
661 musb_writew(epio, MUSB_RXCSR, csr);
663 if (request->actual < request->length) {
664 int transfer_size = 0;
666 transfer_size = min(request->length,
671 if (transfer_size <= musb_ep->packet_sz)
672 musb_ep->dma->desired_mode = 0;
674 musb_ep->dma->desired_mode = 1;
676 use_dma = c->channel_program(
679 channel->desired_mode,
688 #endif /* Mentor's DMA */
690 fifo_count = request->length - request->actual;
691 DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
692 musb_ep->end_point.name,
696 fifo_count = min_t(unsigned, len, fifo_count);
698 #ifdef CONFIG_USB_TUSB_OMAP_DMA
699 if (tusb_dma_omap() && musb_ep->dma) {
700 struct dma_controller *c = musb->dma_controller;
701 struct dma_channel *channel = musb_ep->dma;
702 u32 dma_addr = request->dma + request->actual;
705 ret = c->channel_program(channel,
707 channel->desired_mode,
715 musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
716 (request->buf + request->actual));
717 request->actual += fifo_count;
719 /* REVISIT if we left anything in the fifo, flush
720 * it and report -EOVERFLOW
724 csr |= MUSB_RXCSR_P_WZC_BITS;
725 csr &= ~MUSB_RXCSR_RXPKTRDY;
726 musb_writew(epio, MUSB_RXCSR, csr);
730 /* reach the end or short packet detected */
731 if (request->actual == request->length || len < musb_ep->packet_sz)
732 musb_g_giveback(musb_ep, request, 0);
736 * Data ready for a request; called from IRQ
738 void musb_g_rx(struct musb *musb, u8 epnum)
741 struct usb_request *request;
742 void __iomem *mbase = musb->mregs;
743 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
744 void __iomem *epio = musb->endpoints[epnum].regs;
745 struct dma_channel *dma;
747 musb_ep_select(mbase, epnum);
749 request = next_request(musb_ep);
753 csr = musb_readw(epio, MUSB_RXCSR);
754 dma = is_dma_capable() ? musb_ep->dma : NULL;
756 DBG(4, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
757 csr, dma ? " (dma)" : "", request);
759 if (csr & MUSB_RXCSR_P_SENTSTALL) {
760 csr |= MUSB_RXCSR_P_WZC_BITS;
761 csr &= ~MUSB_RXCSR_P_SENTSTALL;
762 musb_writew(epio, MUSB_RXCSR, csr);
766 if (csr & MUSB_RXCSR_P_OVERRUN) {
767 /* csr |= MUSB_RXCSR_P_WZC_BITS; */
768 csr &= ~MUSB_RXCSR_P_OVERRUN;
769 musb_writew(epio, MUSB_RXCSR, csr);
771 DBG(3, "%s iso overrun on %p\n", musb_ep->name, request);
772 if (request && request->status == -EINPROGRESS)
773 request->status = -EOVERFLOW;
775 if (csr & MUSB_RXCSR_INCOMPRX) {
776 /* REVISIT not necessarily an error */
777 DBG(4, "%s, incomprx\n", musb_ep->end_point.name);
780 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
781 /* "should not happen"; likely RXPKTRDY pending for DMA */
782 DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1,
783 "%s busy, csr %04x\n",
784 musb_ep->end_point.name, csr);
788 if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
789 csr &= ~(MUSB_RXCSR_AUTOCLEAR
791 | MUSB_RXCSR_DMAMODE);
792 musb_writew(epio, MUSB_RXCSR,
793 MUSB_RXCSR_P_WZC_BITS | csr);
795 request->actual += musb_ep->dma->actual_len;
797 DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
799 musb_readw(epio, MUSB_RXCSR),
800 musb_ep->dma->actual_len, request);
802 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
803 /* Autoclear doesn't clear RxPktRdy for short packets */
804 if ((dma->desired_mode == 0)
806 & (musb_ep->packet_sz - 1))) {
808 csr &= ~MUSB_RXCSR_RXPKTRDY;
809 musb_writew(epio, MUSB_RXCSR, csr);
812 /* incomplete, and not short? wait for next IN packet */
813 if ((request->actual < request->length)
814 && (musb_ep->dma->actual_len
815 == musb_ep->packet_sz))
818 musb_g_giveback(musb_ep, request, 0);
820 request = next_request(musb_ep);
825 /* analyze request if the ep is hot */
827 rxstate(musb, to_musb_request(request));
829 DBG(3, "packet waiting for %s%s request\n",
830 musb_ep->desc ? "" : "inactive ",
831 musb_ep->end_point.name);
834 /* ------------------------------------------------------------ */
836 static int musb_gadget_enable(struct usb_ep *ep,
837 const struct usb_endpoint_descriptor *desc)
840 struct musb_ep *musb_ep;
841 struct musb_hw_ep *hw_ep;
848 int status = -EINVAL;
853 musb_ep = to_musb_ep(ep);
854 hw_ep = musb_ep->hw_ep;
856 musb = musb_ep->musb;
858 epnum = musb_ep->current_epnum;
860 spin_lock_irqsave(&musb->lock, flags);
866 musb_ep->type = usb_endpoint_type(desc);
868 /* check direction and (later) maxpacket size against endpoint */
869 if (usb_endpoint_num(desc) != epnum)
872 /* REVISIT this rules out high bandwidth periodic transfers */
873 tmp = le16_to_cpu(desc->wMaxPacketSize);
876 musb_ep->packet_sz = tmp;
878 /* enable the interrupts for the endpoint, set the endpoint
879 * packet size (or fail), set the mode, clear the fifo
881 musb_ep_select(mbase, epnum);
882 if (usb_endpoint_dir_in(desc)) {
883 u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
885 if (hw_ep->is_shared_fifo)
889 if (tmp > hw_ep->max_packet_sz_tx)
892 int_txe |= (1 << epnum);
893 musb_writew(mbase, MUSB_INTRTXE, int_txe);
895 /* REVISIT if can_bulk_split(), use by updating "tmp";
896 * likewise high bandwidth periodic tx
898 /* Set TXMAXP with the FIFO size of the endpoint
899 * to disable double buffering mode. Currently, It seems that double
900 * buffering has problem if musb RTL revision number < 2.0.
902 if (musb->hwvers < MUSB_HWVERS_2000)
903 musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
905 musb_writew(regs, MUSB_TXMAXP, tmp);
907 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
908 if (musb_readw(regs, MUSB_TXCSR)
909 & MUSB_TXCSR_FIFONOTEMPTY)
910 csr |= MUSB_TXCSR_FLUSHFIFO;
911 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
912 csr |= MUSB_TXCSR_P_ISO;
914 /* set twice in case of double buffering */
915 musb_writew(regs, MUSB_TXCSR, csr);
916 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
917 musb_writew(regs, MUSB_TXCSR, csr);
920 u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
922 if (hw_ep->is_shared_fifo)
926 if (tmp > hw_ep->max_packet_sz_rx)
929 int_rxe |= (1 << epnum);
930 musb_writew(mbase, MUSB_INTRRXE, int_rxe);
932 /* REVISIT if can_bulk_combine() use by updating "tmp"
933 * likewise high bandwidth periodic rx
935 /* Set RXMAXP with the FIFO size of the endpoint
936 * to disable double buffering mode.
938 if (musb->hwvers < MUSB_HWVERS_2000)
939 musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_rx);
941 musb_writew(regs, MUSB_RXMAXP, tmp);
943 /* force shared fifo to OUT-only mode */
944 if (hw_ep->is_shared_fifo) {
945 csr = musb_readw(regs, MUSB_TXCSR);
946 csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
947 musb_writew(regs, MUSB_TXCSR, csr);
950 csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
951 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
952 csr |= MUSB_RXCSR_P_ISO;
953 else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
954 csr |= MUSB_RXCSR_DISNYET;
956 /* set twice in case of double buffering */
957 musb_writew(regs, MUSB_RXCSR, csr);
958 musb_writew(regs, MUSB_RXCSR, csr);
961 /* NOTE: all the I/O code _should_ work fine without DMA, in case
962 * for some reason you run out of channels here.
964 if (is_dma_capable() && musb->dma_controller) {
965 struct dma_controller *c = musb->dma_controller;
967 musb_ep->dma = c->channel_alloc(c, hw_ep,
968 (desc->bEndpointAddress & USB_DIR_IN));
972 musb_ep->desc = desc;
977 pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
978 musb_driver_name, musb_ep->end_point.name,
979 ({ char *s; switch (musb_ep->type) {
980 case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
981 case USB_ENDPOINT_XFER_INT: s = "int"; break;
982 default: s = "iso"; break;
984 musb_ep->is_in ? "IN" : "OUT",
985 musb_ep->dma ? "dma, " : "",
988 schedule_work(&musb->irq_work);
991 spin_unlock_irqrestore(&musb->lock, flags);
996 * Disable an endpoint flushing all requests queued.
998 static int musb_gadget_disable(struct usb_ep *ep)
1000 unsigned long flags;
1003 struct musb_ep *musb_ep;
1007 musb_ep = to_musb_ep(ep);
1008 musb = musb_ep->musb;
1009 epnum = musb_ep->current_epnum;
1010 epio = musb->endpoints[epnum].regs;
1012 spin_lock_irqsave(&musb->lock, flags);
1013 musb_ep_select(musb->mregs, epnum);
1015 /* zero the endpoint sizes */
1016 if (musb_ep->is_in) {
1017 u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
1018 int_txe &= ~(1 << epnum);
1019 musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
1020 musb_writew(epio, MUSB_TXMAXP, 0);
1022 u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
1023 int_rxe &= ~(1 << epnum);
1024 musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
1025 musb_writew(epio, MUSB_RXMAXP, 0);
1028 musb_ep->desc = NULL;
1030 /* abort all pending DMA and requests */
1031 nuke(musb_ep, -ESHUTDOWN);
1033 schedule_work(&musb->irq_work);
1035 spin_unlock_irqrestore(&(musb->lock), flags);
1037 DBG(2, "%s\n", musb_ep->end_point.name);
1043 * Allocate a request for an endpoint.
1044 * Reused by ep0 code.
1046 struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1048 struct musb_ep *musb_ep = to_musb_ep(ep);
1049 struct musb_request *request = NULL;
1051 request = kzalloc(sizeof *request, gfp_flags);
1053 INIT_LIST_HEAD(&request->request.list);
1054 request->request.dma = DMA_ADDR_INVALID;
1055 request->epnum = musb_ep->current_epnum;
1056 request->ep = musb_ep;
1059 return &request->request;
1064 * Reused by ep0 code.
1066 void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1068 kfree(to_musb_request(req));
1071 static LIST_HEAD(buffers);
1073 struct free_record {
1074 struct list_head list;
1081 * Context: controller locked, IRQs blocked.
1083 static void musb_ep_restart(struct musb *musb, struct musb_request *req)
1085 DBG(3, "<== %s request %p len %u on hw_ep%d\n",
1086 req->tx ? "TX/IN" : "RX/OUT",
1087 &req->request, req->request.length, req->epnum);
1089 musb_ep_select(musb->mregs, req->epnum);
1096 static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1099 struct musb_ep *musb_ep;
1100 struct musb_request *request;
1103 unsigned long lockflags;
1110 musb_ep = to_musb_ep(ep);
1111 musb = musb_ep->musb;
1113 request = to_musb_request(req);
1114 request->musb = musb;
1116 if (request->ep != musb_ep)
1119 DBG(4, "<== to %s request=%p\n", ep->name, req);
1121 /* request is mine now... */
1122 request->request.actual = 0;
1123 request->request.status = -EINPROGRESS;
1124 request->epnum = musb_ep->current_epnum;
1125 request->tx = musb_ep->is_in;
1127 if (is_dma_capable() && musb_ep->dma) {
1128 if (request->request.dma == DMA_ADDR_INVALID) {
1129 request->request.dma = dma_map_single(
1131 request->request.buf,
1132 request->request.length,
1136 request->mapped = 1;
1138 dma_sync_single_for_device(musb->controller,
1139 request->request.dma,
1140 request->request.length,
1144 request->mapped = 0;
1146 } else if (!req->buf) {
1149 request->mapped = 0;
1151 spin_lock_irqsave(&musb->lock, lockflags);
1153 /* don't queue if the ep is down */
1154 if (!musb_ep->desc) {
1155 DBG(4, "req %p queued to %s while ep %s\n",
1156 req, ep->name, "disabled");
1157 status = -ESHUTDOWN;
1161 /* add request to the list */
1162 list_add_tail(&(request->request.list), &(musb_ep->req_list));
1164 /* it this is the head of the queue, start i/o ... */
1165 if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next)
1166 musb_ep_restart(musb, request);
1169 spin_unlock_irqrestore(&musb->lock, lockflags);
1173 static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1175 struct musb_ep *musb_ep = to_musb_ep(ep);
1176 struct usb_request *r;
1177 unsigned long flags;
1179 struct musb *musb = musb_ep->musb;
1181 if (!ep || !request || to_musb_request(request)->ep != musb_ep)
1184 spin_lock_irqsave(&musb->lock, flags);
1186 list_for_each_entry(r, &musb_ep->req_list, list) {
1191 DBG(3, "request %p not queued to %s\n", request, ep->name);
1196 /* if the hardware doesn't have the request, easy ... */
1197 if (musb_ep->req_list.next != &request->list || musb_ep->busy)
1198 musb_g_giveback(musb_ep, request, -ECONNRESET);
1200 /* ... else abort the dma transfer ... */
1201 else if (is_dma_capable() && musb_ep->dma) {
1202 struct dma_controller *c = musb->dma_controller;
1204 musb_ep_select(musb->mregs, musb_ep->current_epnum);
1205 if (c->channel_abort)
1206 status = c->channel_abort(musb_ep->dma);
1210 musb_g_giveback(musb_ep, request, -ECONNRESET);
1212 /* NOTE: by sticking to easily tested hardware/driver states,
1213 * we leave counting of in-flight packets imprecise.
1215 musb_g_giveback(musb_ep, request, -ECONNRESET);
1219 spin_unlock_irqrestore(&musb->lock, flags);
1224 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1225 * data but will queue requests.
1227 * exported to ep0 code
1229 static int musb_gadget_set_halt(struct usb_ep *ep, int value)
1231 struct musb_ep *musb_ep = to_musb_ep(ep);
1232 u8 epnum = musb_ep->current_epnum;
1233 struct musb *musb = musb_ep->musb;
1234 void __iomem *epio = musb->endpoints[epnum].regs;
1235 void __iomem *mbase;
1236 unsigned long flags;
1238 struct musb_request *request;
1243 mbase = musb->mregs;
1245 spin_lock_irqsave(&musb->lock, flags);
1247 if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1252 musb_ep_select(mbase, epnum);
1254 request = to_musb_request(next_request(musb_ep));
1257 DBG(3, "request in progress, cannot halt %s\n",
1262 /* Cannot portably stall with non-empty FIFO */
1263 if (musb_ep->is_in) {
1264 csr = musb_readw(epio, MUSB_TXCSR);
1265 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1266 DBG(3, "FIFO busy, cannot halt %s\n", ep->name);
1272 musb_ep->wedged = 0;
1274 /* set/clear the stall and toggle bits */
1275 DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1276 if (musb_ep->is_in) {
1277 csr = musb_readw(epio, MUSB_TXCSR);
1278 csr |= MUSB_TXCSR_P_WZC_BITS
1279 | MUSB_TXCSR_CLRDATATOG;
1281 csr |= MUSB_TXCSR_P_SENDSTALL;
1283 csr &= ~(MUSB_TXCSR_P_SENDSTALL
1284 | MUSB_TXCSR_P_SENTSTALL);
1285 csr &= ~MUSB_TXCSR_TXPKTRDY;
1286 musb_writew(epio, MUSB_TXCSR, csr);
1288 csr = musb_readw(epio, MUSB_RXCSR);
1289 csr |= MUSB_RXCSR_P_WZC_BITS
1290 | MUSB_RXCSR_FLUSHFIFO
1291 | MUSB_RXCSR_CLRDATATOG;
1293 csr |= MUSB_RXCSR_P_SENDSTALL;
1295 csr &= ~(MUSB_RXCSR_P_SENDSTALL
1296 | MUSB_RXCSR_P_SENTSTALL);
1297 musb_writew(epio, MUSB_RXCSR, csr);
1300 /* maybe start the first request in the queue */
1301 if (!musb_ep->busy && !value && request) {
1302 DBG(3, "restarting the request\n");
1303 musb_ep_restart(musb, request);
1307 spin_unlock_irqrestore(&musb->lock, flags);
1312 * Sets the halt feature with the clear requests ignored
1314 static int musb_gadget_set_wedge(struct usb_ep *ep)
1316 struct musb_ep *musb_ep = to_musb_ep(ep);
1321 musb_ep->wedged = 1;
1323 return usb_ep_set_halt(ep);
1326 static int musb_gadget_fifo_status(struct usb_ep *ep)
1328 struct musb_ep *musb_ep = to_musb_ep(ep);
1329 void __iomem *epio = musb_ep->hw_ep->regs;
1330 int retval = -EINVAL;
1332 if (musb_ep->desc && !musb_ep->is_in) {
1333 struct musb *musb = musb_ep->musb;
1334 int epnum = musb_ep->current_epnum;
1335 void __iomem *mbase = musb->mregs;
1336 unsigned long flags;
1338 spin_lock_irqsave(&musb->lock, flags);
1340 musb_ep_select(mbase, epnum);
1341 /* FIXME return zero unless RXPKTRDY is set */
1342 retval = musb_readw(epio, MUSB_RXCOUNT);
1344 spin_unlock_irqrestore(&musb->lock, flags);
1349 static void musb_gadget_fifo_flush(struct usb_ep *ep)
1351 struct musb_ep *musb_ep = to_musb_ep(ep);
1352 struct musb *musb = musb_ep->musb;
1353 u8 epnum = musb_ep->current_epnum;
1354 void __iomem *epio = musb->endpoints[epnum].regs;
1355 void __iomem *mbase;
1356 unsigned long flags;
1359 mbase = musb->mregs;
1361 spin_lock_irqsave(&musb->lock, flags);
1362 musb_ep_select(mbase, (u8) epnum);
1364 /* disable interrupts */
1365 int_txe = musb_readw(mbase, MUSB_INTRTXE);
1366 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
1368 if (musb_ep->is_in) {
1369 csr = musb_readw(epio, MUSB_TXCSR);
1370 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1371 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1372 musb_writew(epio, MUSB_TXCSR, csr);
1373 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1374 musb_writew(epio, MUSB_TXCSR, csr);
1377 csr = musb_readw(epio, MUSB_RXCSR);
1378 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1379 musb_writew(epio, MUSB_RXCSR, csr);
1380 musb_writew(epio, MUSB_RXCSR, csr);
1383 /* re-enable interrupt */
1384 musb_writew(mbase, MUSB_INTRTXE, int_txe);
1385 spin_unlock_irqrestore(&musb->lock, flags);
1388 static const struct usb_ep_ops musb_ep_ops = {
1389 .enable = musb_gadget_enable,
1390 .disable = musb_gadget_disable,
1391 .alloc_request = musb_alloc_request,
1392 .free_request = musb_free_request,
1393 .queue = musb_gadget_queue,
1394 .dequeue = musb_gadget_dequeue,
1395 .set_halt = musb_gadget_set_halt,
1396 .set_wedge = musb_gadget_set_wedge,
1397 .fifo_status = musb_gadget_fifo_status,
1398 .fifo_flush = musb_gadget_fifo_flush
1401 /* ----------------------------------------------------------------------- */
1403 static int musb_gadget_get_frame(struct usb_gadget *gadget)
1405 struct musb *musb = gadget_to_musb(gadget);
1407 return (int)musb_readw(musb->mregs, MUSB_FRAME);
1410 static int musb_gadget_wakeup(struct usb_gadget *gadget)
1412 struct musb *musb = gadget_to_musb(gadget);
1413 void __iomem *mregs = musb->mregs;
1414 unsigned long flags;
1415 int status = -EINVAL;
1419 spin_lock_irqsave(&musb->lock, flags);
1421 switch (musb->xceiv->state) {
1422 case OTG_STATE_B_PERIPHERAL:
1423 /* NOTE: OTG state machine doesn't include B_SUSPENDED;
1424 * that's part of the standard usb 1.1 state machine, and
1425 * doesn't affect OTG transitions.
1427 if (musb->may_wakeup && musb->is_suspended)
1430 case OTG_STATE_B_IDLE:
1431 /* Start SRP ... OTG not required. */
1432 devctl = musb_readb(mregs, MUSB_DEVCTL);
1433 DBG(2, "Sending SRP: devctl: %02x\n", devctl);
1434 devctl |= MUSB_DEVCTL_SESSION;
1435 musb_writeb(mregs, MUSB_DEVCTL, devctl);
1436 devctl = musb_readb(mregs, MUSB_DEVCTL);
1438 while (!(devctl & MUSB_DEVCTL_SESSION)) {
1439 devctl = musb_readb(mregs, MUSB_DEVCTL);
1444 while (devctl & MUSB_DEVCTL_SESSION) {
1445 devctl = musb_readb(mregs, MUSB_DEVCTL);
1450 /* Block idling for at least 1s */
1451 musb_platform_try_idle(musb,
1452 jiffies + msecs_to_jiffies(1 * HZ));
1457 DBG(2, "Unhandled wake: %s\n", otg_state_string(musb));
1463 power = musb_readb(mregs, MUSB_POWER);
1464 power |= MUSB_POWER_RESUME;
1465 musb_writeb(mregs, MUSB_POWER, power);
1466 DBG(2, "issue wakeup\n");
1468 /* FIXME do this next chunk in a timer callback, no udelay */
1471 power = musb_readb(mregs, MUSB_POWER);
1472 power &= ~MUSB_POWER_RESUME;
1473 musb_writeb(mregs, MUSB_POWER, power);
1475 spin_unlock_irqrestore(&musb->lock, flags);
1480 musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1482 struct musb *musb = gadget_to_musb(gadget);
1484 musb->is_self_powered = !!is_selfpowered;
1488 static void musb_pullup(struct musb *musb, int is_on)
1492 power = musb_readb(musb->mregs, MUSB_POWER);
1494 power |= MUSB_POWER_SOFTCONN;
1496 power &= ~MUSB_POWER_SOFTCONN;
1498 /* FIXME if on, HdrcStart; if off, HdrcStop */
1500 DBG(3, "gadget %s D+ pullup %s\n",
1501 musb->gadget_driver->function, is_on ? "on" : "off");
1502 musb_writeb(musb->mregs, MUSB_POWER, power);
1506 static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1508 DBG(2, "<= %s =>\n", __func__);
1511 * FIXME iff driver's softconnect flag is set (as it is during probe,
1512 * though that can clear it), just musb_pullup().
1519 static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1521 struct musb *musb = gadget_to_musb(gadget);
1523 if (!musb->xceiv->set_power)
1525 return otg_set_power(musb->xceiv, mA);
1528 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1530 struct musb *musb = gadget_to_musb(gadget);
1531 unsigned long flags;
1535 /* NOTE: this assumes we are sensing vbus; we'd rather
1536 * not pullup unless the B-session is active.
1538 spin_lock_irqsave(&musb->lock, flags);
1539 if (is_on != musb->softconnect) {
1540 musb->softconnect = is_on;
1541 musb_pullup(musb, is_on);
1543 spin_unlock_irqrestore(&musb->lock, flags);
1547 static const struct usb_gadget_ops musb_gadget_operations = {
1548 .get_frame = musb_gadget_get_frame,
1549 .wakeup = musb_gadget_wakeup,
1550 .set_selfpowered = musb_gadget_set_self_powered,
1551 /* .vbus_session = musb_gadget_vbus_session, */
1552 .vbus_draw = musb_gadget_vbus_draw,
1553 .pullup = musb_gadget_pullup,
1556 /* ----------------------------------------------------------------------- */
1560 /* Only this registration code "knows" the rule (from USB standards)
1561 * about there being only one external upstream port. It assumes
1562 * all peripheral ports are external...
1564 static struct musb *the_gadget;
1566 static void musb_gadget_release(struct device *dev)
1568 /* kref_put(WHAT) */
1569 dev_dbg(dev, "%s\n", __func__);
1574 init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1576 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1578 memset(ep, 0, sizeof *ep);
1580 ep->current_epnum = epnum;
1585 INIT_LIST_HEAD(&ep->req_list);
1587 sprintf(ep->name, "ep%d%s", epnum,
1588 (!epnum || hw_ep->is_shared_fifo) ? "" : (
1589 is_in ? "in" : "out"));
1590 ep->end_point.name = ep->name;
1591 INIT_LIST_HEAD(&ep->end_point.ep_list);
1593 ep->end_point.maxpacket = 64;
1594 ep->end_point.ops = &musb_g_ep0_ops;
1595 musb->g.ep0 = &ep->end_point;
1598 ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
1600 ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
1601 ep->end_point.ops = &musb_ep_ops;
1602 list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1607 * Initialize the endpoints exposed to peripheral drivers, with backlinks
1608 * to the rest of the driver state.
1610 static inline void __init musb_g_init_endpoints(struct musb *musb)
1613 struct musb_hw_ep *hw_ep;
1616 /* intialize endpoint list just once */
1617 INIT_LIST_HEAD(&(musb->g.ep_list));
1619 for (epnum = 0, hw_ep = musb->endpoints;
1620 epnum < musb->nr_endpoints;
1622 if (hw_ep->is_shared_fifo /* || !epnum */) {
1623 init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1626 if (hw_ep->max_packet_sz_tx) {
1627 init_peripheral_ep(musb, &hw_ep->ep_in,
1631 if (hw_ep->max_packet_sz_rx) {
1632 init_peripheral_ep(musb, &hw_ep->ep_out,
1640 /* called once during driver setup to initialize and link into
1641 * the driver model; memory is zeroed.
1643 int __init musb_gadget_setup(struct musb *musb)
1647 /* REVISIT minor race: if (erroneously) setting up two
1648 * musb peripherals at the same time, only the bus lock
1655 musb->g.ops = &musb_gadget_operations;
1656 musb->g.is_dualspeed = 1;
1657 musb->g.speed = USB_SPEED_UNKNOWN;
1659 /* this "gadget" abstracts/virtualizes the controller */
1660 dev_set_name(&musb->g.dev, "gadget");
1661 musb->g.dev.parent = musb->controller;
1662 musb->g.dev.dma_mask = musb->controller->dma_mask;
1663 musb->g.dev.release = musb_gadget_release;
1664 musb->g.name = musb_driver_name;
1666 if (is_otg_enabled(musb))
1669 musb_g_init_endpoints(musb);
1671 musb->is_active = 0;
1672 musb_platform_try_idle(musb, 0);
1674 status = device_register(&musb->g.dev);
1680 void musb_gadget_cleanup(struct musb *musb)
1682 if (musb != the_gadget)
1685 device_unregister(&musb->g.dev);
1690 * Register the gadget driver. Used by gadget drivers when
1691 * registering themselves with the controller.
1693 * -EINVAL something went wrong (not driver)
1694 * -EBUSY another gadget is already using the controller
1695 * -ENOMEM no memeory to perform the operation
1697 * @param driver the gadget driver
1698 * @return <0 if error, 0 if everything is fine
1700 int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1703 unsigned long flags;
1704 struct musb *musb = the_gadget;
1707 || driver->speed != USB_SPEED_HIGH
1712 /* driver must be initialized to support peripheral mode */
1714 DBG(1, "%s, no dev??\n", __func__);
1718 DBG(3, "registering driver %s\n", driver->function);
1719 spin_lock_irqsave(&musb->lock, flags);
1721 if (musb->gadget_driver) {
1722 DBG(1, "%s is already bound to %s\n",
1724 musb->gadget_driver->driver.name);
1727 musb->gadget_driver = driver;
1728 musb->g.dev.driver = &driver->driver;
1729 driver->driver.bus = NULL;
1730 musb->softconnect = 1;
1734 spin_unlock_irqrestore(&musb->lock, flags);
1737 retval = driver->bind(&musb->g);
1739 DBG(3, "bind to driver %s failed --> %d\n",
1740 driver->driver.name, retval);
1741 musb->gadget_driver = NULL;
1742 musb->g.dev.driver = NULL;
1745 spin_lock_irqsave(&musb->lock, flags);
1747 otg_set_peripheral(musb->xceiv, &musb->g);
1748 musb->xceiv->state = OTG_STATE_B_IDLE;
1749 musb->is_active = 1;
1751 /* FIXME this ignores the softconnect flag. Drivers are
1752 * allowed hold the peripheral inactive until for example
1753 * userspace hooks up printer hardware or DSP codecs, so
1754 * hosts only see fully functional devices.
1757 if (!is_otg_enabled(musb))
1760 otg_set_peripheral(musb->xceiv, &musb->g);
1762 spin_unlock_irqrestore(&musb->lock, flags);
1764 if (is_otg_enabled(musb)) {
1765 DBG(3, "OTG startup...\n");
1767 /* REVISIT: funcall to other code, which also
1768 * handles power budgeting ... this way also
1769 * ensures HdrcStart is indirectly called.
1771 retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
1773 DBG(1, "add_hcd failed, %d\n", retval);
1774 spin_lock_irqsave(&musb->lock, flags);
1775 otg_set_peripheral(musb->xceiv, NULL);
1776 musb->gadget_driver = NULL;
1777 musb->g.dev.driver = NULL;
1778 spin_unlock_irqrestore(&musb->lock, flags);
1785 EXPORT_SYMBOL(usb_gadget_register_driver);
1787 static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
1790 struct musb_hw_ep *hw_ep;
1792 /* don't disconnect if it's not connected */
1793 if (musb->g.speed == USB_SPEED_UNKNOWN)
1796 musb->g.speed = USB_SPEED_UNKNOWN;
1798 /* deactivate the hardware */
1799 if (musb->softconnect) {
1800 musb->softconnect = 0;
1801 musb_pullup(musb, 0);
1805 /* killing any outstanding requests will quiesce the driver;
1806 * then report disconnect
1809 for (i = 0, hw_ep = musb->endpoints;
1810 i < musb->nr_endpoints;
1812 musb_ep_select(musb->mregs, i);
1813 if (hw_ep->is_shared_fifo /* || !epnum */) {
1814 nuke(&hw_ep->ep_in, -ESHUTDOWN);
1816 if (hw_ep->max_packet_sz_tx)
1817 nuke(&hw_ep->ep_in, -ESHUTDOWN);
1818 if (hw_ep->max_packet_sz_rx)
1819 nuke(&hw_ep->ep_out, -ESHUTDOWN);
1823 spin_unlock(&musb->lock);
1824 driver->disconnect(&musb->g);
1825 spin_lock(&musb->lock);
1830 * Unregister the gadget driver. Used by gadget drivers when
1831 * unregistering themselves from the controller.
1833 * @param driver the gadget driver to unregister
1835 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1837 unsigned long flags;
1839 struct musb *musb = the_gadget;
1841 if (!driver || !driver->unbind || !musb)
1844 /* REVISIT always use otg_set_peripheral() here too;
1845 * this needs to shut down the OTG engine.
1848 spin_lock_irqsave(&musb->lock, flags);
1850 #ifdef CONFIG_USB_MUSB_OTG
1851 musb_hnp_stop(musb);
1854 if (musb->gadget_driver == driver) {
1856 (void) musb_gadget_vbus_draw(&musb->g, 0);
1858 musb->xceiv->state = OTG_STATE_UNDEFINED;
1859 stop_activity(musb, driver);
1860 otg_set_peripheral(musb->xceiv, NULL);
1862 DBG(3, "unregistering driver %s\n", driver->function);
1863 spin_unlock_irqrestore(&musb->lock, flags);
1864 driver->unbind(&musb->g);
1865 spin_lock_irqsave(&musb->lock, flags);
1867 musb->gadget_driver = NULL;
1868 musb->g.dev.driver = NULL;
1870 musb->is_active = 0;
1871 musb_platform_try_idle(musb, 0);
1874 spin_unlock_irqrestore(&musb->lock, flags);
1876 if (is_otg_enabled(musb) && retval == 0) {
1877 usb_remove_hcd(musb_to_hcd(musb));
1878 /* FIXME we need to be able to register another
1879 * gadget driver here and have everything work;
1880 * that currently misbehaves.
1886 EXPORT_SYMBOL(usb_gadget_unregister_driver);
1889 /* ----------------------------------------------------------------------- */
1891 /* lifecycle operations called through plat_uds.c */
1893 void musb_g_resume(struct musb *musb)
1895 musb->is_suspended = 0;
1896 switch (musb->xceiv->state) {
1897 case OTG_STATE_B_IDLE:
1899 case OTG_STATE_B_WAIT_ACON:
1900 case OTG_STATE_B_PERIPHERAL:
1901 musb->is_active = 1;
1902 if (musb->gadget_driver && musb->gadget_driver->resume) {
1903 spin_unlock(&musb->lock);
1904 musb->gadget_driver->resume(&musb->g);
1905 spin_lock(&musb->lock);
1909 WARNING("unhandled RESUME transition (%s)\n",
1910 otg_state_string(musb));
1914 /* called when SOF packets stop for 3+ msec */
1915 void musb_g_suspend(struct musb *musb)
1919 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1920 DBG(3, "devctl %02x\n", devctl);
1922 switch (musb->xceiv->state) {
1923 case OTG_STATE_B_IDLE:
1924 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
1925 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
1927 case OTG_STATE_B_PERIPHERAL:
1928 musb->is_suspended = 1;
1929 if (musb->gadget_driver && musb->gadget_driver->suspend) {
1930 spin_unlock(&musb->lock);
1931 musb->gadget_driver->suspend(&musb->g);
1932 spin_lock(&musb->lock);
1936 /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
1937 * A_PERIPHERAL may need care too
1939 WARNING("unhandled SUSPEND transition (%s)\n",
1940 otg_state_string(musb));
1944 /* Called during SRP */
1945 void musb_g_wakeup(struct musb *musb)
1947 musb_gadget_wakeup(&musb->g);
1950 /* called when VBUS drops below session threshold, and in other cases */
1951 void musb_g_disconnect(struct musb *musb)
1953 void __iomem *mregs = musb->mregs;
1954 u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
1956 DBG(3, "devctl %02x\n", devctl);
1959 musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
1961 /* don't draw vbus until new b-default session */
1962 (void) musb_gadget_vbus_draw(&musb->g, 0);
1964 musb->g.speed = USB_SPEED_UNKNOWN;
1965 if (musb->gadget_driver && musb->gadget_driver->disconnect) {
1966 spin_unlock(&musb->lock);
1967 musb->gadget_driver->disconnect(&musb->g);
1968 spin_lock(&musb->lock);
1971 switch (musb->xceiv->state) {
1973 #ifdef CONFIG_USB_MUSB_OTG
1974 DBG(2, "Unhandled disconnect %s, setting a_idle\n",
1975 otg_state_string(musb));
1976 musb->xceiv->state = OTG_STATE_A_IDLE;
1977 MUSB_HST_MODE(musb);
1979 case OTG_STATE_A_PERIPHERAL:
1980 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
1981 MUSB_HST_MODE(musb);
1983 case OTG_STATE_B_WAIT_ACON:
1984 case OTG_STATE_B_HOST:
1986 case OTG_STATE_B_PERIPHERAL:
1987 case OTG_STATE_B_IDLE:
1988 musb->xceiv->state = OTG_STATE_B_IDLE;
1990 case OTG_STATE_B_SRP_INIT:
1994 musb->is_active = 0;
1997 void musb_g_reset(struct musb *musb)
1998 __releases(musb->lock)
1999 __acquires(musb->lock)
2001 void __iomem *mbase = musb->mregs;
2002 u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
2005 DBG(3, "<== %s addr=%x driver '%s'\n",
2006 (devctl & MUSB_DEVCTL_BDEVICE)
2007 ? "B-Device" : "A-Device",
2008 musb_readb(mbase, MUSB_FADDR),
2010 ? musb->gadget_driver->driver.name
2014 /* report disconnect, if we didn't already (flushing EP state) */
2015 if (musb->g.speed != USB_SPEED_UNKNOWN)
2016 musb_g_disconnect(musb);
2019 else if (devctl & MUSB_DEVCTL_HR)
2020 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
2023 /* what speed did we negotiate? */
2024 power = musb_readb(mbase, MUSB_POWER);
2025 musb->g.speed = (power & MUSB_POWER_HSMODE)
2026 ? USB_SPEED_HIGH : USB_SPEED_FULL;
2028 /* start in USB_STATE_DEFAULT */
2029 musb->is_active = 1;
2030 musb->is_suspended = 0;
2031 MUSB_DEV_MODE(musb);
2033 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2035 musb->may_wakeup = 0;
2036 musb->g.b_hnp_enable = 0;
2037 musb->g.a_alt_hnp_support = 0;
2038 musb->g.a_hnp_support = 0;
2040 /* Normal reset, as B-Device;
2041 * or else after HNP, as A-Device
2043 if (devctl & MUSB_DEVCTL_BDEVICE) {
2044 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2045 musb->g.is_a_peripheral = 0;
2046 } else if (is_otg_enabled(musb)) {
2047 musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
2048 musb->g.is_a_peripheral = 1;
2052 /* start with default limits on VBUS power draw */
2053 (void) musb_gadget_vbus_draw(&musb->g,
2054 is_otg_enabled(musb) ? 8 : 100);