2 * MUSB OTG driver peripheral support
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <linux/kernel.h>
37 #include <linux/list.h>
38 #include <linux/timer.h>
39 #include <linux/module.h>
40 #include <linux/smp.h>
41 #include <linux/spinlock.h>
42 #include <linux/delay.h>
43 #include <linux/moduleparam.h>
44 #include <linux/stat.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/slab.h>
48 #include "musb_core.h"
51 /* MUSB PERIPHERAL status 3-mar-2006:
53 * - EP0 seems solid. It passes both USBCV and usbtest control cases.
56 * + remote wakeup to Linux hosts work, but saw USBCV failures;
57 * in one test run (operator error?)
58 * + endpoint halt tests -- in both usbtest and usbcv -- seem
59 * to break when dma is enabled ... is something wrongly
62 * - Mass storage behaved ok when last tested. Network traffic patterns
63 * (with lots of short transfers etc) need retesting; they turn up the
64 * worst cases of the DMA, since short packets are typical but are not
68 * + both pio and dma behave in with network and g_zero tests
69 * + no cppi throughput issues other than no-hw-queueing
70 * + failed with FLAT_REG (DaVinci)
71 * + seems to behave with double buffering, PIO -and- CPPI
72 * + with gadgetfs + AIO, requests got lost?
75 * + both pio and dma behave in with network and g_zero tests
76 * + dma is slow in typical case (short_not_ok is clear)
77 * + double buffering ok with PIO
78 * + double buffering *FAILS* with CPPI, wrong data bytes sometimes
79 * + request lossage observed with gadgetfs
81 * - ISO not tested ... might work, but only weakly isochronous
83 * - Gadget driver disabling of softconnect during bind() is ignored; so
84 * drivers can't hold off host requests until userspace is ready.
85 * (Workaround: they can turn it off later.)
87 * - PORTABILITY (assumes PIO works):
88 * + DaVinci, basically works with cppi dma
89 * + OMAP 2430, ditto with mentor dma
90 * + TUSB 6010, platform-specific dma in the works
93 /* ----------------------------------------------------------------------- */
96 * Immediately complete a request.
98 * @param request the request to complete
99 * @param status the status to complete the request with
100 * Context: controller locked, IRQs blocked.
102 void musb_g_giveback(
104 struct usb_request *request,
106 __releases(ep->musb->lock)
107 __acquires(ep->musb->lock)
109 struct musb_request *req;
113 req = to_musb_request(request);
115 list_del(&request->list);
116 if (req->request.status == -EINPROGRESS)
117 req->request.status = status;
121 spin_unlock(&musb->lock);
122 if (is_dma_capable()) {
124 dma_unmap_single(musb->controller,
130 req->request.dma = DMA_ADDR_INVALID;
132 } else if (req->request.dma != DMA_ADDR_INVALID)
133 dma_sync_single_for_cpu(musb->controller,
140 if (request->status == 0)
141 DBG(5, "%s done request %p, %d/%d\n",
142 ep->end_point.name, request,
143 req->request.actual, req->request.length);
145 DBG(2, "%s request %p, %d/%d fault %d\n",
146 ep->end_point.name, request,
147 req->request.actual, req->request.length,
149 req->request.complete(&req->ep->end_point, &req->request);
150 spin_lock(&musb->lock);
154 /* ----------------------------------------------------------------------- */
157 * Abort requests queued to an endpoint using the status. Synchronous.
158 * caller locked controller and blocked irqs, and selected this ep.
160 static void nuke(struct musb_ep *ep, const int status)
162 struct musb_request *req = NULL;
163 void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
167 if (is_dma_capable() && ep->dma) {
168 struct dma_controller *c = ep->musb->dma_controller;
173 * The programming guide says that we must not clear
174 * the DMAMODE bit before DMAENAB, so we only
175 * clear it in the second write...
177 musb_writew(epio, MUSB_TXCSR,
178 MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
179 musb_writew(epio, MUSB_TXCSR,
180 0 | MUSB_TXCSR_FLUSHFIFO);
182 musb_writew(epio, MUSB_RXCSR,
183 0 | MUSB_RXCSR_FLUSHFIFO);
184 musb_writew(epio, MUSB_RXCSR,
185 0 | MUSB_RXCSR_FLUSHFIFO);
188 value = c->channel_abort(ep->dma);
189 DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value);
190 c->channel_release(ep->dma);
194 while (!list_empty(&(ep->req_list))) {
195 req = container_of(ep->req_list.next, struct musb_request,
197 musb_g_giveback(ep, &req->request, status);
201 /* ----------------------------------------------------------------------- */
203 /* Data transfers - pure PIO, pure DMA, or mixed mode */
206 * This assumes the separate CPPI engine is responding to DMA requests
207 * from the usb core ... sequenced a bit differently from mentor dma.
210 static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
212 if (can_bulk_split(musb, ep->type))
213 return ep->hw_ep->max_packet_sz_tx;
215 return ep->packet_sz;
219 #ifdef CONFIG_USB_INVENTRA_DMA
221 /* Peripheral tx (IN) using Mentor DMA works as follows:
222 Only mode 0 is used for transfers <= wPktSize,
223 mode 1 is used for larger transfers,
225 One of the following happens:
226 - Host sends IN token which causes an endpoint interrupt
228 -> if DMA is currently busy, exit.
229 -> if queue is non-empty, txstate().
231 - Request is queued by the gadget driver.
232 -> if queue was previously empty, txstate()
237 | (data is transferred to the FIFO, then sent out when
238 | IN token(s) are recd from Host.
239 | -> DMA interrupt on completion
241 | -> stop DMA, ~DMAENAB,
242 | -> set TxPktRdy for last short pkt or zlp
243 | -> Complete Request
244 | -> Continue next request (call txstate)
245 |___________________________________|
247 * Non-Mentor DMA engines can of course work differently, such as by
248 * upleveling from irq-per-packet to irq-per-buffer.
254 * An endpoint is transmitting data. This can be called either from
255 * the IRQ routine or from ep.queue() to kickstart a request on an
258 * Context: controller locked, IRQs blocked, endpoint selected
260 static void txstate(struct musb *musb, struct musb_request *req)
262 u8 epnum = req->epnum;
263 struct musb_ep *musb_ep;
264 void __iomem *epio = musb->endpoints[epnum].regs;
265 struct usb_request *request;
266 u16 fifo_count = 0, csr;
271 /* we shouldn't get here while DMA is active ... but we do ... */
272 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
273 DBG(4, "dma pending...\n");
277 /* read TXCSR before */
278 csr = musb_readw(epio, MUSB_TXCSR);
280 request = &req->request;
281 fifo_count = min(max_ep_writesize(musb, musb_ep),
282 (int)(request->length - request->actual));
284 if (csr & MUSB_TXCSR_TXPKTRDY) {
285 DBG(5, "%s old packet still ready , txcsr %03x\n",
286 musb_ep->end_point.name, csr);
290 if (csr & MUSB_TXCSR_P_SENDSTALL) {
291 DBG(5, "%s stalling, txcsr %03x\n",
292 musb_ep->end_point.name, csr);
296 DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
297 epnum, musb_ep->packet_sz, fifo_count,
300 #ifndef CONFIG_MUSB_PIO_ONLY
301 if (is_dma_capable() && musb_ep->dma) {
302 struct dma_controller *c = musb->dma_controller;
304 use_dma = (request->dma != DMA_ADDR_INVALID);
306 /* MUSB_TXCSR_P_ISO is still set correctly */
308 #ifdef CONFIG_USB_INVENTRA_DMA
312 /* setup DMA, then program endpoint CSR */
313 request_size = min_t(size_t, request->length,
314 musb_ep->dma->max_len);
315 if (request_size < musb_ep->packet_sz)
316 musb_ep->dma->desired_mode = 0;
318 musb_ep->dma->desired_mode = 1;
320 use_dma = use_dma && c->channel_program(
321 musb_ep->dma, musb_ep->packet_sz,
322 musb_ep->dma->desired_mode,
323 request->dma + request->actual, request_size);
325 if (musb_ep->dma->desired_mode == 0) {
327 * We must not clear the DMAMODE bit
328 * before the DMAENAB bit -- and the
329 * latter doesn't always get cleared
330 * before we get here...
332 csr &= ~(MUSB_TXCSR_AUTOSET
333 | MUSB_TXCSR_DMAENAB);
334 musb_writew(epio, MUSB_TXCSR, csr
335 | MUSB_TXCSR_P_WZC_BITS);
336 csr &= ~MUSB_TXCSR_DMAMODE;
337 csr |= (MUSB_TXCSR_DMAENAB |
339 /* against programming guide */
341 csr |= (MUSB_TXCSR_AUTOSET
346 csr &= ~MUSB_TXCSR_P_UNDERRUN;
347 musb_writew(epio, MUSB_TXCSR, csr);
351 #elif defined(CONFIG_USB_TI_CPPI_DMA)
352 /* program endpoint CSR first, then setup DMA */
353 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
354 csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
356 musb_writew(epio, MUSB_TXCSR,
357 (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
360 /* ensure writebuffer is empty */
361 csr = musb_readw(epio, MUSB_TXCSR);
363 /* NOTE host side sets DMAENAB later than this; both are
364 * OK since the transfer dma glue (between CPPI and Mentor
365 * fifos) just tells CPPI it could start. Data only moves
366 * to the USB TX fifo when both fifos are ready.
369 /* "mode" is irrelevant here; handle terminating ZLPs like
370 * PIO does, since the hardware RNDIS mode seems unreliable
371 * except for the last-packet-is-already-short case.
373 use_dma = use_dma && c->channel_program(
374 musb_ep->dma, musb_ep->packet_sz,
379 c->channel_release(musb_ep->dma);
381 csr &= ~MUSB_TXCSR_DMAENAB;
382 musb_writew(epio, MUSB_TXCSR, csr);
383 /* invariant: prequest->buf is non-null */
385 #elif defined(CONFIG_USB_TUSB_OMAP_DMA)
386 use_dma = use_dma && c->channel_program(
387 musb_ep->dma, musb_ep->packet_sz,
396 musb_write_fifo(musb_ep->hw_ep, fifo_count,
397 (u8 *) (request->buf + request->actual));
398 request->actual += fifo_count;
399 csr |= MUSB_TXCSR_TXPKTRDY;
400 csr &= ~MUSB_TXCSR_P_UNDERRUN;
401 musb_writew(epio, MUSB_TXCSR, csr);
404 /* host may already have the data when this message shows... */
405 DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
406 musb_ep->end_point.name, use_dma ? "dma" : "pio",
407 request->actual, request->length,
408 musb_readw(epio, MUSB_TXCSR),
410 musb_readw(epio, MUSB_TXMAXP));
414 * FIFO state update (e.g. data ready).
415 * Called from IRQ, with controller locked.
417 void musb_g_tx(struct musb *musb, u8 epnum)
420 struct usb_request *request;
421 u8 __iomem *mbase = musb->mregs;
422 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
423 void __iomem *epio = musb->endpoints[epnum].regs;
424 struct dma_channel *dma;
426 musb_ep_select(mbase, epnum);
427 request = next_request(musb_ep);
429 csr = musb_readw(epio, MUSB_TXCSR);
430 DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
432 dma = is_dma_capable() ? musb_ep->dma : NULL;
435 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
436 * probably rates reporting as a host error.
438 if (csr & MUSB_TXCSR_P_SENTSTALL) {
439 csr |= MUSB_TXCSR_P_WZC_BITS;
440 csr &= ~MUSB_TXCSR_P_SENTSTALL;
441 musb_writew(epio, MUSB_TXCSR, csr);
445 if (csr & MUSB_TXCSR_P_UNDERRUN) {
446 /* We NAKed, no big deal... little reason to care. */
447 csr |= MUSB_TXCSR_P_WZC_BITS;
448 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
449 musb_writew(epio, MUSB_TXCSR, csr);
450 DBG(20, "underrun on ep%d, req %p\n", epnum, request);
453 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
455 * SHOULD NOT HAPPEN... has with CPPI though, after
456 * changing SENDSTALL (and other cases); harmless?
458 DBG(5, "%s dma still busy?\n", musb_ep->end_point.name);
465 if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
467 csr |= MUSB_TXCSR_P_WZC_BITS;
468 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
469 MUSB_TXCSR_TXPKTRDY);
470 musb_writew(epio, MUSB_TXCSR, csr);
471 /* Ensure writebuffer is empty. */
472 csr = musb_readw(epio, MUSB_TXCSR);
473 request->actual += musb_ep->dma->actual_len;
474 DBG(4, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
475 epnum, csr, musb_ep->dma->actual_len, request);
478 if (is_dma || request->actual == request->length) {
480 * First, maybe a terminating short packet. Some DMA
481 * engines might handle this by themselves.
483 if ((request->zero && request->length
484 && request->length % musb_ep->packet_sz == 0)
485 #ifdef CONFIG_USB_INVENTRA_DMA
486 || (is_dma && (!dma->desired_mode ||
488 (musb_ep->packet_sz - 1))))
492 * On DMA completion, FIFO may not be
495 if (csr & MUSB_TXCSR_TXPKTRDY)
498 DBG(4, "sending zero pkt\n");
499 musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
500 | MUSB_TXCSR_TXPKTRDY);
504 /* ... or if not, then complete it. */
505 musb_g_giveback(musb_ep, request, 0);
508 * Kickstart next transfer if appropriate;
509 * the packet that just completed might not
510 * be transmitted for hours or days.
511 * REVISIT for double buffering...
512 * FIXME revisit for stalls too...
514 musb_ep_select(mbase, epnum);
515 csr = musb_readw(epio, MUSB_TXCSR);
516 if (csr & MUSB_TXCSR_FIFONOTEMPTY)
519 request = musb_ep->desc ? next_request(musb_ep) : NULL;
521 DBG(4, "%s idle now\n",
522 musb_ep->end_point.name);
527 txstate(musb, to_musb_request(request));
531 /* ------------------------------------------------------------ */
533 #ifdef CONFIG_USB_INVENTRA_DMA
535 /* Peripheral rx (OUT) using Mentor DMA works as follows:
536 - Only mode 0 is used.
538 - Request is queued by the gadget class driver.
539 -> if queue was previously empty, rxstate()
541 - Host sends OUT token which causes an endpoint interrupt
543 | -> if request queued, call rxstate
545 | | -> DMA interrupt on completion
549 | | -> if data recd = max expected
550 | | by the request, or host
551 | | sent a short packet,
552 | | complete the request,
553 | | and start the next one.
554 | |_____________________________________|
555 | else just wait for the host
556 | to send the next OUT token.
557 |__________________________________________________|
559 * Non-Mentor DMA engines can of course work differently.
565 * Context: controller locked, IRQs blocked, endpoint selected
567 static void rxstate(struct musb *musb, struct musb_request *req)
569 const u8 epnum = req->epnum;
570 struct usb_request *request = &req->request;
571 struct musb_ep *musb_ep;
572 void __iomem *epio = musb->endpoints[epnum].regs;
573 unsigned fifo_count = 0;
575 u16 csr = musb_readw(epio, MUSB_RXCSR);
576 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
578 if (hw_ep->is_shared_fifo)
579 musb_ep = &hw_ep->ep_in;
581 musb_ep = &hw_ep->ep_out;
583 len = musb_ep->packet_sz;
585 /* We shouldn't get here while DMA is active, but we do... */
586 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
587 DBG(4, "DMA pending...\n");
591 if (csr & MUSB_RXCSR_P_SENDSTALL) {
592 DBG(5, "%s stalling, RXCSR %04x\n",
593 musb_ep->end_point.name, csr);
597 if (is_cppi_enabled() && musb_ep->dma) {
598 struct dma_controller *c = musb->dma_controller;
599 struct dma_channel *channel = musb_ep->dma;
601 /* NOTE: CPPI won't actually stop advancing the DMA
602 * queue after short packet transfers, so this is almost
603 * always going to run as IRQ-per-packet DMA so that
604 * faults will be handled correctly.
606 if (c->channel_program(channel,
608 !request->short_not_ok,
609 request->dma + request->actual,
610 request->length - request->actual)) {
612 /* make sure that if an rxpkt arrived after the irq,
613 * the cppi engine will be ready to take it as soon
616 csr &= ~(MUSB_RXCSR_AUTOCLEAR
617 | MUSB_RXCSR_DMAMODE);
618 csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
619 musb_writew(epio, MUSB_RXCSR, csr);
624 if (csr & MUSB_RXCSR_RXPKTRDY) {
625 len = musb_readw(epio, MUSB_RXCOUNT);
626 if (request->actual < request->length) {
627 #ifdef CONFIG_USB_INVENTRA_DMA
628 if (is_dma_capable() && musb_ep->dma) {
629 struct dma_controller *c;
630 struct dma_channel *channel;
633 c = musb->dma_controller;
634 channel = musb_ep->dma;
636 /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
637 * mode 0 only. So we do not get endpoint interrupts due to DMA
638 * completion. We only get interrupts from DMA controller.
640 * We could operate in DMA mode 1 if we knew the size of the tranfer
641 * in advance. For mass storage class, request->length = what the host
642 * sends, so that'd work. But for pretty much everything else,
643 * request->length is routinely more than what the host sends. For
644 * most these gadgets, end of is signified either by a short packet,
645 * or filling the last byte of the buffer. (Sending extra data in
646 * that last pckate should trigger an overflow fault.) But in mode 1,
647 * we don't get DMA completion interrrupt for short packets.
649 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
650 * to get endpoint interrupt on every DMA req, but that didn't seem
653 * REVISIT an updated g_file_storage can set req->short_not_ok, which
654 * then becomes usable as a runtime "use mode 1" hint...
657 csr |= MUSB_RXCSR_DMAENAB;
659 csr |= MUSB_RXCSR_AUTOCLEAR;
660 /* csr |= MUSB_RXCSR_DMAMODE; */
662 /* this special sequence (enabling and then
663 * disabling MUSB_RXCSR_DMAMODE) is required
664 * to get DMAReq to activate
666 musb_writew(epio, MUSB_RXCSR,
667 csr | MUSB_RXCSR_DMAMODE);
669 musb_writew(epio, MUSB_RXCSR, csr);
671 if (request->actual < request->length) {
672 int transfer_size = 0;
674 transfer_size = min(request->length,
679 if (transfer_size <= musb_ep->packet_sz)
680 musb_ep->dma->desired_mode = 0;
682 musb_ep->dma->desired_mode = 1;
684 use_dma = c->channel_program(
687 channel->desired_mode,
696 #endif /* Mentor's DMA */
698 fifo_count = request->length - request->actual;
699 DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
700 musb_ep->end_point.name,
704 fifo_count = min_t(unsigned, len, fifo_count);
706 #ifdef CONFIG_USB_TUSB_OMAP_DMA
707 if (tusb_dma_omap() && musb_ep->dma) {
708 struct dma_controller *c = musb->dma_controller;
709 struct dma_channel *channel = musb_ep->dma;
710 u32 dma_addr = request->dma + request->actual;
713 ret = c->channel_program(channel,
715 channel->desired_mode,
723 musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
724 (request->buf + request->actual));
725 request->actual += fifo_count;
727 /* REVISIT if we left anything in the fifo, flush
728 * it and report -EOVERFLOW
732 csr |= MUSB_RXCSR_P_WZC_BITS;
733 csr &= ~MUSB_RXCSR_RXPKTRDY;
734 musb_writew(epio, MUSB_RXCSR, csr);
738 /* reach the end or short packet detected */
739 if (request->actual == request->length || len < musb_ep->packet_sz)
740 musb_g_giveback(musb_ep, request, 0);
744 * Data ready for a request; called from IRQ
746 void musb_g_rx(struct musb *musb, u8 epnum)
749 struct usb_request *request;
750 void __iomem *mbase = musb->mregs;
751 struct musb_ep *musb_ep;
752 void __iomem *epio = musb->endpoints[epnum].regs;
753 struct dma_channel *dma;
754 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
756 if (hw_ep->is_shared_fifo)
757 musb_ep = &hw_ep->ep_in;
759 musb_ep = &hw_ep->ep_out;
761 musb_ep_select(mbase, epnum);
763 request = next_request(musb_ep);
767 csr = musb_readw(epio, MUSB_RXCSR);
768 dma = is_dma_capable() ? musb_ep->dma : NULL;
770 DBG(4, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
771 csr, dma ? " (dma)" : "", request);
773 if (csr & MUSB_RXCSR_P_SENTSTALL) {
774 csr |= MUSB_RXCSR_P_WZC_BITS;
775 csr &= ~MUSB_RXCSR_P_SENTSTALL;
776 musb_writew(epio, MUSB_RXCSR, csr);
780 if (csr & MUSB_RXCSR_P_OVERRUN) {
781 /* csr |= MUSB_RXCSR_P_WZC_BITS; */
782 csr &= ~MUSB_RXCSR_P_OVERRUN;
783 musb_writew(epio, MUSB_RXCSR, csr);
785 DBG(3, "%s iso overrun on %p\n", musb_ep->name, request);
786 if (request && request->status == -EINPROGRESS)
787 request->status = -EOVERFLOW;
789 if (csr & MUSB_RXCSR_INCOMPRX) {
790 /* REVISIT not necessarily an error */
791 DBG(4, "%s, incomprx\n", musb_ep->end_point.name);
794 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
795 /* "should not happen"; likely RXPKTRDY pending for DMA */
796 DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1,
797 "%s busy, csr %04x\n",
798 musb_ep->end_point.name, csr);
802 if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
803 csr &= ~(MUSB_RXCSR_AUTOCLEAR
805 | MUSB_RXCSR_DMAMODE);
806 musb_writew(epio, MUSB_RXCSR,
807 MUSB_RXCSR_P_WZC_BITS | csr);
809 request->actual += musb_ep->dma->actual_len;
811 DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
813 musb_readw(epio, MUSB_RXCSR),
814 musb_ep->dma->actual_len, request);
816 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
817 /* Autoclear doesn't clear RxPktRdy for short packets */
818 if ((dma->desired_mode == 0)
820 & (musb_ep->packet_sz - 1))) {
822 csr &= ~MUSB_RXCSR_RXPKTRDY;
823 musb_writew(epio, MUSB_RXCSR, csr);
826 /* incomplete, and not short? wait for next IN packet */
827 if ((request->actual < request->length)
828 && (musb_ep->dma->actual_len
829 == musb_ep->packet_sz))
832 musb_g_giveback(musb_ep, request, 0);
834 request = next_request(musb_ep);
839 /* analyze request if the ep is hot */
841 rxstate(musb, to_musb_request(request));
843 DBG(3, "packet waiting for %s%s request\n",
844 musb_ep->desc ? "" : "inactive ",
845 musb_ep->end_point.name);
849 /* ------------------------------------------------------------ */
851 static int musb_gadget_enable(struct usb_ep *ep,
852 const struct usb_endpoint_descriptor *desc)
855 struct musb_ep *musb_ep;
856 struct musb_hw_ep *hw_ep;
863 int status = -EINVAL;
868 musb_ep = to_musb_ep(ep);
869 hw_ep = musb_ep->hw_ep;
871 musb = musb_ep->musb;
873 epnum = musb_ep->current_epnum;
875 spin_lock_irqsave(&musb->lock, flags);
881 musb_ep->type = usb_endpoint_type(desc);
883 /* check direction and (later) maxpacket size against endpoint */
884 if (usb_endpoint_num(desc) != epnum)
887 /* REVISIT this rules out high bandwidth periodic transfers */
888 tmp = le16_to_cpu(desc->wMaxPacketSize);
891 musb_ep->packet_sz = tmp;
893 /* enable the interrupts for the endpoint, set the endpoint
894 * packet size (or fail), set the mode, clear the fifo
896 musb_ep_select(mbase, epnum);
897 if (usb_endpoint_dir_in(desc)) {
898 u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
900 if (hw_ep->is_shared_fifo)
904 if (tmp > hw_ep->max_packet_sz_tx)
907 int_txe |= (1 << epnum);
908 musb_writew(mbase, MUSB_INTRTXE, int_txe);
910 /* REVISIT if can_bulk_split(), use by updating "tmp";
911 * likewise high bandwidth periodic tx
913 /* Set TXMAXP with the FIFO size of the endpoint
914 * to disable double buffering mode. Currently, It seems that double
915 * buffering has problem if musb RTL revision number < 2.0.
917 if (musb->hwvers < MUSB_HWVERS_2000)
918 musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
920 musb_writew(regs, MUSB_TXMAXP, tmp);
922 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
923 if (musb_readw(regs, MUSB_TXCSR)
924 & MUSB_TXCSR_FIFONOTEMPTY)
925 csr |= MUSB_TXCSR_FLUSHFIFO;
926 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
927 csr |= MUSB_TXCSR_P_ISO;
929 /* set twice in case of double buffering */
930 musb_writew(regs, MUSB_TXCSR, csr);
931 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
932 musb_writew(regs, MUSB_TXCSR, csr);
935 u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
937 if (hw_ep->is_shared_fifo)
941 if (tmp > hw_ep->max_packet_sz_rx)
944 int_rxe |= (1 << epnum);
945 musb_writew(mbase, MUSB_INTRRXE, int_rxe);
947 /* REVISIT if can_bulk_combine() use by updating "tmp"
948 * likewise high bandwidth periodic rx
950 /* Set RXMAXP with the FIFO size of the endpoint
951 * to disable double buffering mode.
953 if (musb->hwvers < MUSB_HWVERS_2000)
954 musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_rx);
956 musb_writew(regs, MUSB_RXMAXP, tmp);
958 /* force shared fifo to OUT-only mode */
959 if (hw_ep->is_shared_fifo) {
960 csr = musb_readw(regs, MUSB_TXCSR);
961 csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
962 musb_writew(regs, MUSB_TXCSR, csr);
965 csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
966 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
967 csr |= MUSB_RXCSR_P_ISO;
968 else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
969 csr |= MUSB_RXCSR_DISNYET;
971 /* set twice in case of double buffering */
972 musb_writew(regs, MUSB_RXCSR, csr);
973 musb_writew(regs, MUSB_RXCSR, csr);
976 /* NOTE: all the I/O code _should_ work fine without DMA, in case
977 * for some reason you run out of channels here.
979 if (is_dma_capable() && musb->dma_controller) {
980 struct dma_controller *c = musb->dma_controller;
982 musb_ep->dma = c->channel_alloc(c, hw_ep,
983 (desc->bEndpointAddress & USB_DIR_IN));
987 musb_ep->desc = desc;
992 pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
993 musb_driver_name, musb_ep->end_point.name,
994 ({ char *s; switch (musb_ep->type) {
995 case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
996 case USB_ENDPOINT_XFER_INT: s = "int"; break;
997 default: s = "iso"; break;
999 musb_ep->is_in ? "IN" : "OUT",
1000 musb_ep->dma ? "dma, " : "",
1001 musb_ep->packet_sz);
1003 schedule_work(&musb->irq_work);
1006 spin_unlock_irqrestore(&musb->lock, flags);
1011 * Disable an endpoint flushing all requests queued.
1013 static int musb_gadget_disable(struct usb_ep *ep)
1015 unsigned long flags;
1018 struct musb_ep *musb_ep;
1022 musb_ep = to_musb_ep(ep);
1023 musb = musb_ep->musb;
1024 epnum = musb_ep->current_epnum;
1025 epio = musb->endpoints[epnum].regs;
1027 spin_lock_irqsave(&musb->lock, flags);
1028 musb_ep_select(musb->mregs, epnum);
1030 /* zero the endpoint sizes */
1031 if (musb_ep->is_in) {
1032 u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
1033 int_txe &= ~(1 << epnum);
1034 musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
1035 musb_writew(epio, MUSB_TXMAXP, 0);
1037 u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
1038 int_rxe &= ~(1 << epnum);
1039 musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
1040 musb_writew(epio, MUSB_RXMAXP, 0);
1043 musb_ep->desc = NULL;
1045 /* abort all pending DMA and requests */
1046 nuke(musb_ep, -ESHUTDOWN);
1048 schedule_work(&musb->irq_work);
1050 spin_unlock_irqrestore(&(musb->lock), flags);
1052 DBG(2, "%s\n", musb_ep->end_point.name);
1058 * Allocate a request for an endpoint.
1059 * Reused by ep0 code.
1061 struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1063 struct musb_ep *musb_ep = to_musb_ep(ep);
1064 struct musb_request *request = NULL;
1066 request = kzalloc(sizeof *request, gfp_flags);
1068 INIT_LIST_HEAD(&request->request.list);
1069 request->request.dma = DMA_ADDR_INVALID;
1070 request->epnum = musb_ep->current_epnum;
1071 request->ep = musb_ep;
1074 return &request->request;
1079 * Reused by ep0 code.
1081 void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1083 kfree(to_musb_request(req));
1086 static LIST_HEAD(buffers);
1088 struct free_record {
1089 struct list_head list;
1096 * Context: controller locked, IRQs blocked.
1098 void musb_ep_restart(struct musb *musb, struct musb_request *req)
1100 DBG(3, "<== %s request %p len %u on hw_ep%d\n",
1101 req->tx ? "TX/IN" : "RX/OUT",
1102 &req->request, req->request.length, req->epnum);
1104 musb_ep_select(musb->mregs, req->epnum);
1111 static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1114 struct musb_ep *musb_ep;
1115 struct musb_request *request;
1118 unsigned long lockflags;
1125 musb_ep = to_musb_ep(ep);
1126 musb = musb_ep->musb;
1128 request = to_musb_request(req);
1129 request->musb = musb;
1131 if (request->ep != musb_ep)
1134 DBG(4, "<== to %s request=%p\n", ep->name, req);
1136 /* request is mine now... */
1137 request->request.actual = 0;
1138 request->request.status = -EINPROGRESS;
1139 request->epnum = musb_ep->current_epnum;
1140 request->tx = musb_ep->is_in;
1142 if (is_dma_capable() && musb_ep->dma) {
1143 if (request->request.dma == DMA_ADDR_INVALID) {
1144 request->request.dma = dma_map_single(
1146 request->request.buf,
1147 request->request.length,
1151 request->mapped = 1;
1153 dma_sync_single_for_device(musb->controller,
1154 request->request.dma,
1155 request->request.length,
1159 request->mapped = 0;
1161 } else if (!req->buf) {
1164 request->mapped = 0;
1166 spin_lock_irqsave(&musb->lock, lockflags);
1168 /* don't queue if the ep is down */
1169 if (!musb_ep->desc) {
1170 DBG(4, "req %p queued to %s while ep %s\n",
1171 req, ep->name, "disabled");
1172 status = -ESHUTDOWN;
1176 /* add request to the list */
1177 list_add_tail(&(request->request.list), &(musb_ep->req_list));
1179 /* it this is the head of the queue, start i/o ... */
1180 if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next)
1181 musb_ep_restart(musb, request);
1184 spin_unlock_irqrestore(&musb->lock, lockflags);
1188 static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1190 struct musb_ep *musb_ep = to_musb_ep(ep);
1191 struct usb_request *r;
1192 unsigned long flags;
1194 struct musb *musb = musb_ep->musb;
1196 if (!ep || !request || to_musb_request(request)->ep != musb_ep)
1199 spin_lock_irqsave(&musb->lock, flags);
1201 list_for_each_entry(r, &musb_ep->req_list, list) {
1206 DBG(3, "request %p not queued to %s\n", request, ep->name);
1211 /* if the hardware doesn't have the request, easy ... */
1212 if (musb_ep->req_list.next != &request->list || musb_ep->busy)
1213 musb_g_giveback(musb_ep, request, -ECONNRESET);
1215 /* ... else abort the dma transfer ... */
1216 else if (is_dma_capable() && musb_ep->dma) {
1217 struct dma_controller *c = musb->dma_controller;
1219 musb_ep_select(musb->mregs, musb_ep->current_epnum);
1220 if (c->channel_abort)
1221 status = c->channel_abort(musb_ep->dma);
1225 musb_g_giveback(musb_ep, request, -ECONNRESET);
1227 /* NOTE: by sticking to easily tested hardware/driver states,
1228 * we leave counting of in-flight packets imprecise.
1230 musb_g_giveback(musb_ep, request, -ECONNRESET);
1234 spin_unlock_irqrestore(&musb->lock, flags);
1239 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1240 * data but will queue requests.
1242 * exported to ep0 code
1244 static int musb_gadget_set_halt(struct usb_ep *ep, int value)
1246 struct musb_ep *musb_ep = to_musb_ep(ep);
1247 u8 epnum = musb_ep->current_epnum;
1248 struct musb *musb = musb_ep->musb;
1249 void __iomem *epio = musb->endpoints[epnum].regs;
1250 void __iomem *mbase;
1251 unsigned long flags;
1253 struct musb_request *request;
1258 mbase = musb->mregs;
1260 spin_lock_irqsave(&musb->lock, flags);
1262 if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1267 musb_ep_select(mbase, epnum);
1269 request = to_musb_request(next_request(musb_ep));
1272 DBG(3, "request in progress, cannot halt %s\n",
1277 /* Cannot portably stall with non-empty FIFO */
1278 if (musb_ep->is_in) {
1279 csr = musb_readw(epio, MUSB_TXCSR);
1280 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1281 DBG(3, "FIFO busy, cannot halt %s\n", ep->name);
1287 musb_ep->wedged = 0;
1289 /* set/clear the stall and toggle bits */
1290 DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1291 if (musb_ep->is_in) {
1292 csr = musb_readw(epio, MUSB_TXCSR);
1293 csr |= MUSB_TXCSR_P_WZC_BITS
1294 | MUSB_TXCSR_CLRDATATOG;
1296 csr |= MUSB_TXCSR_P_SENDSTALL;
1298 csr &= ~(MUSB_TXCSR_P_SENDSTALL
1299 | MUSB_TXCSR_P_SENTSTALL);
1300 csr &= ~MUSB_TXCSR_TXPKTRDY;
1301 musb_writew(epio, MUSB_TXCSR, csr);
1303 csr = musb_readw(epio, MUSB_RXCSR);
1304 csr |= MUSB_RXCSR_P_WZC_BITS
1305 | MUSB_RXCSR_FLUSHFIFO
1306 | MUSB_RXCSR_CLRDATATOG;
1308 csr |= MUSB_RXCSR_P_SENDSTALL;
1310 csr &= ~(MUSB_RXCSR_P_SENDSTALL
1311 | MUSB_RXCSR_P_SENTSTALL);
1312 musb_writew(epio, MUSB_RXCSR, csr);
1315 /* maybe start the first request in the queue */
1316 if (!musb_ep->busy && !value && request) {
1317 DBG(3, "restarting the request\n");
1318 musb_ep_restart(musb, request);
1322 spin_unlock_irqrestore(&musb->lock, flags);
1327 * Sets the halt feature with the clear requests ignored
1329 static int musb_gadget_set_wedge(struct usb_ep *ep)
1331 struct musb_ep *musb_ep = to_musb_ep(ep);
1336 musb_ep->wedged = 1;
1338 return usb_ep_set_halt(ep);
1341 static int musb_gadget_fifo_status(struct usb_ep *ep)
1343 struct musb_ep *musb_ep = to_musb_ep(ep);
1344 void __iomem *epio = musb_ep->hw_ep->regs;
1345 int retval = -EINVAL;
1347 if (musb_ep->desc && !musb_ep->is_in) {
1348 struct musb *musb = musb_ep->musb;
1349 int epnum = musb_ep->current_epnum;
1350 void __iomem *mbase = musb->mregs;
1351 unsigned long flags;
1353 spin_lock_irqsave(&musb->lock, flags);
1355 musb_ep_select(mbase, epnum);
1356 /* FIXME return zero unless RXPKTRDY is set */
1357 retval = musb_readw(epio, MUSB_RXCOUNT);
1359 spin_unlock_irqrestore(&musb->lock, flags);
1364 static void musb_gadget_fifo_flush(struct usb_ep *ep)
1366 struct musb_ep *musb_ep = to_musb_ep(ep);
1367 struct musb *musb = musb_ep->musb;
1368 u8 epnum = musb_ep->current_epnum;
1369 void __iomem *epio = musb->endpoints[epnum].regs;
1370 void __iomem *mbase;
1371 unsigned long flags;
1374 mbase = musb->mregs;
1376 spin_lock_irqsave(&musb->lock, flags);
1377 musb_ep_select(mbase, (u8) epnum);
1379 /* disable interrupts */
1380 int_txe = musb_readw(mbase, MUSB_INTRTXE);
1381 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
1383 if (musb_ep->is_in) {
1384 csr = musb_readw(epio, MUSB_TXCSR);
1385 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1386 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1387 musb_writew(epio, MUSB_TXCSR, csr);
1388 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1389 musb_writew(epio, MUSB_TXCSR, csr);
1392 csr = musb_readw(epio, MUSB_RXCSR);
1393 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1394 musb_writew(epio, MUSB_RXCSR, csr);
1395 musb_writew(epio, MUSB_RXCSR, csr);
1398 /* re-enable interrupt */
1399 musb_writew(mbase, MUSB_INTRTXE, int_txe);
1400 spin_unlock_irqrestore(&musb->lock, flags);
1403 static const struct usb_ep_ops musb_ep_ops = {
1404 .enable = musb_gadget_enable,
1405 .disable = musb_gadget_disable,
1406 .alloc_request = musb_alloc_request,
1407 .free_request = musb_free_request,
1408 .queue = musb_gadget_queue,
1409 .dequeue = musb_gadget_dequeue,
1410 .set_halt = musb_gadget_set_halt,
1411 .set_wedge = musb_gadget_set_wedge,
1412 .fifo_status = musb_gadget_fifo_status,
1413 .fifo_flush = musb_gadget_fifo_flush
1416 /* ----------------------------------------------------------------------- */
1418 static int musb_gadget_get_frame(struct usb_gadget *gadget)
1420 struct musb *musb = gadget_to_musb(gadget);
1422 return (int)musb_readw(musb->mregs, MUSB_FRAME);
1425 static int musb_gadget_wakeup(struct usb_gadget *gadget)
1427 struct musb *musb = gadget_to_musb(gadget);
1428 void __iomem *mregs = musb->mregs;
1429 unsigned long flags;
1430 int status = -EINVAL;
1434 spin_lock_irqsave(&musb->lock, flags);
1436 switch (musb->xceiv->state) {
1437 case OTG_STATE_B_PERIPHERAL:
1438 /* NOTE: OTG state machine doesn't include B_SUSPENDED;
1439 * that's part of the standard usb 1.1 state machine, and
1440 * doesn't affect OTG transitions.
1442 if (musb->may_wakeup && musb->is_suspended)
1445 case OTG_STATE_B_IDLE:
1446 /* Start SRP ... OTG not required. */
1447 devctl = musb_readb(mregs, MUSB_DEVCTL);
1448 DBG(2, "Sending SRP: devctl: %02x\n", devctl);
1449 devctl |= MUSB_DEVCTL_SESSION;
1450 musb_writeb(mregs, MUSB_DEVCTL, devctl);
1451 devctl = musb_readb(mregs, MUSB_DEVCTL);
1453 while (!(devctl & MUSB_DEVCTL_SESSION)) {
1454 devctl = musb_readb(mregs, MUSB_DEVCTL);
1459 while (devctl & MUSB_DEVCTL_SESSION) {
1460 devctl = musb_readb(mregs, MUSB_DEVCTL);
1465 /* Block idling for at least 1s */
1466 musb_platform_try_idle(musb,
1467 jiffies + msecs_to_jiffies(1 * HZ));
1472 DBG(2, "Unhandled wake: %s\n", otg_state_string(musb));
1478 power = musb_readb(mregs, MUSB_POWER);
1479 power |= MUSB_POWER_RESUME;
1480 musb_writeb(mregs, MUSB_POWER, power);
1481 DBG(2, "issue wakeup\n");
1483 /* FIXME do this next chunk in a timer callback, no udelay */
1486 power = musb_readb(mregs, MUSB_POWER);
1487 power &= ~MUSB_POWER_RESUME;
1488 musb_writeb(mregs, MUSB_POWER, power);
1490 spin_unlock_irqrestore(&musb->lock, flags);
1495 musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1497 struct musb *musb = gadget_to_musb(gadget);
1499 musb->is_self_powered = !!is_selfpowered;
1503 static void musb_pullup(struct musb *musb, int is_on)
1507 power = musb_readb(musb->mregs, MUSB_POWER);
1509 power |= MUSB_POWER_SOFTCONN;
1511 power &= ~MUSB_POWER_SOFTCONN;
1513 /* FIXME if on, HdrcStart; if off, HdrcStop */
1515 DBG(3, "gadget %s D+ pullup %s\n",
1516 musb->gadget_driver->function, is_on ? "on" : "off");
1517 musb_writeb(musb->mregs, MUSB_POWER, power);
1521 static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1523 DBG(2, "<= %s =>\n", __func__);
1526 * FIXME iff driver's softconnect flag is set (as it is during probe,
1527 * though that can clear it), just musb_pullup().
1534 static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1536 struct musb *musb = gadget_to_musb(gadget);
1538 if (!musb->xceiv->set_power)
1540 return otg_set_power(musb->xceiv, mA);
1543 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1545 struct musb *musb = gadget_to_musb(gadget);
1546 unsigned long flags;
1550 /* NOTE: this assumes we are sensing vbus; we'd rather
1551 * not pullup unless the B-session is active.
1553 spin_lock_irqsave(&musb->lock, flags);
1554 if (is_on != musb->softconnect) {
1555 musb->softconnect = is_on;
1556 musb_pullup(musb, is_on);
1558 spin_unlock_irqrestore(&musb->lock, flags);
1562 static const struct usb_gadget_ops musb_gadget_operations = {
1563 .get_frame = musb_gadget_get_frame,
1564 .wakeup = musb_gadget_wakeup,
1565 .set_selfpowered = musb_gadget_set_self_powered,
1566 /* .vbus_session = musb_gadget_vbus_session, */
1567 .vbus_draw = musb_gadget_vbus_draw,
1568 .pullup = musb_gadget_pullup,
1571 /* ----------------------------------------------------------------------- */
1575 /* Only this registration code "knows" the rule (from USB standards)
1576 * about there being only one external upstream port. It assumes
1577 * all peripheral ports are external...
1579 static struct musb *the_gadget;
1581 static void musb_gadget_release(struct device *dev)
1583 /* kref_put(WHAT) */
1584 dev_dbg(dev, "%s\n", __func__);
1589 init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1591 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1593 memset(ep, 0, sizeof *ep);
1595 ep->current_epnum = epnum;
1600 INIT_LIST_HEAD(&ep->req_list);
1602 sprintf(ep->name, "ep%d%s", epnum,
1603 (!epnum || hw_ep->is_shared_fifo) ? "" : (
1604 is_in ? "in" : "out"));
1605 ep->end_point.name = ep->name;
1606 INIT_LIST_HEAD(&ep->end_point.ep_list);
1608 ep->end_point.maxpacket = 64;
1609 ep->end_point.ops = &musb_g_ep0_ops;
1610 musb->g.ep0 = &ep->end_point;
1613 ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
1615 ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
1616 ep->end_point.ops = &musb_ep_ops;
1617 list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1622 * Initialize the endpoints exposed to peripheral drivers, with backlinks
1623 * to the rest of the driver state.
1625 static inline void __init musb_g_init_endpoints(struct musb *musb)
1628 struct musb_hw_ep *hw_ep;
1631 /* intialize endpoint list just once */
1632 INIT_LIST_HEAD(&(musb->g.ep_list));
1634 for (epnum = 0, hw_ep = musb->endpoints;
1635 epnum < musb->nr_endpoints;
1637 if (hw_ep->is_shared_fifo /* || !epnum */) {
1638 init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1641 if (hw_ep->max_packet_sz_tx) {
1642 init_peripheral_ep(musb, &hw_ep->ep_in,
1646 if (hw_ep->max_packet_sz_rx) {
1647 init_peripheral_ep(musb, &hw_ep->ep_out,
1655 /* called once during driver setup to initialize and link into
1656 * the driver model; memory is zeroed.
1658 int __init musb_gadget_setup(struct musb *musb)
1662 /* REVISIT minor race: if (erroneously) setting up two
1663 * musb peripherals at the same time, only the bus lock
1670 musb->g.ops = &musb_gadget_operations;
1671 musb->g.is_dualspeed = 1;
1672 musb->g.speed = USB_SPEED_UNKNOWN;
1674 /* this "gadget" abstracts/virtualizes the controller */
1675 dev_set_name(&musb->g.dev, "gadget");
1676 musb->g.dev.parent = musb->controller;
1677 musb->g.dev.dma_mask = musb->controller->dma_mask;
1678 musb->g.dev.release = musb_gadget_release;
1679 musb->g.name = musb_driver_name;
1681 if (is_otg_enabled(musb))
1684 musb_g_init_endpoints(musb);
1686 musb->is_active = 0;
1687 musb_platform_try_idle(musb, 0);
1689 status = device_register(&musb->g.dev);
1695 void musb_gadget_cleanup(struct musb *musb)
1697 if (musb != the_gadget)
1700 device_unregister(&musb->g.dev);
1705 * Register the gadget driver. Used by gadget drivers when
1706 * registering themselves with the controller.
1708 * -EINVAL something went wrong (not driver)
1709 * -EBUSY another gadget is already using the controller
1710 * -ENOMEM no memeory to perform the operation
1712 * @param driver the gadget driver
1713 * @return <0 if error, 0 if everything is fine
1715 int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1718 unsigned long flags;
1719 struct musb *musb = the_gadget;
1722 || driver->speed != USB_SPEED_HIGH
1727 /* driver must be initialized to support peripheral mode */
1729 DBG(1, "%s, no dev??\n", __func__);
1733 DBG(3, "registering driver %s\n", driver->function);
1734 spin_lock_irqsave(&musb->lock, flags);
1736 if (musb->gadget_driver) {
1737 DBG(1, "%s is already bound to %s\n",
1739 musb->gadget_driver->driver.name);
1742 musb->gadget_driver = driver;
1743 musb->g.dev.driver = &driver->driver;
1744 driver->driver.bus = NULL;
1745 musb->softconnect = 1;
1749 spin_unlock_irqrestore(&musb->lock, flags);
1752 retval = driver->bind(&musb->g);
1754 DBG(3, "bind to driver %s failed --> %d\n",
1755 driver->driver.name, retval);
1756 musb->gadget_driver = NULL;
1757 musb->g.dev.driver = NULL;
1760 spin_lock_irqsave(&musb->lock, flags);
1762 otg_set_peripheral(musb->xceiv, &musb->g);
1763 musb->xceiv->state = OTG_STATE_B_IDLE;
1764 musb->is_active = 1;
1766 /* FIXME this ignores the softconnect flag. Drivers are
1767 * allowed hold the peripheral inactive until for example
1768 * userspace hooks up printer hardware or DSP codecs, so
1769 * hosts only see fully functional devices.
1772 if (!is_otg_enabled(musb))
1775 otg_set_peripheral(musb->xceiv, &musb->g);
1777 spin_unlock_irqrestore(&musb->lock, flags);
1779 if (is_otg_enabled(musb)) {
1780 DBG(3, "OTG startup...\n");
1782 /* REVISIT: funcall to other code, which also
1783 * handles power budgeting ... this way also
1784 * ensures HdrcStart is indirectly called.
1786 retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
1788 DBG(1, "add_hcd failed, %d\n", retval);
1789 spin_lock_irqsave(&musb->lock, flags);
1790 otg_set_peripheral(musb->xceiv, NULL);
1791 musb->gadget_driver = NULL;
1792 musb->g.dev.driver = NULL;
1793 spin_unlock_irqrestore(&musb->lock, flags);
1800 EXPORT_SYMBOL(usb_gadget_register_driver);
1802 static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
1805 struct musb_hw_ep *hw_ep;
1807 /* don't disconnect if it's not connected */
1808 if (musb->g.speed == USB_SPEED_UNKNOWN)
1811 musb->g.speed = USB_SPEED_UNKNOWN;
1813 /* deactivate the hardware */
1814 if (musb->softconnect) {
1815 musb->softconnect = 0;
1816 musb_pullup(musb, 0);
1820 /* killing any outstanding requests will quiesce the driver;
1821 * then report disconnect
1824 for (i = 0, hw_ep = musb->endpoints;
1825 i < musb->nr_endpoints;
1827 musb_ep_select(musb->mregs, i);
1828 if (hw_ep->is_shared_fifo /* || !epnum */) {
1829 nuke(&hw_ep->ep_in, -ESHUTDOWN);
1831 if (hw_ep->max_packet_sz_tx)
1832 nuke(&hw_ep->ep_in, -ESHUTDOWN);
1833 if (hw_ep->max_packet_sz_rx)
1834 nuke(&hw_ep->ep_out, -ESHUTDOWN);
1838 spin_unlock(&musb->lock);
1839 driver->disconnect(&musb->g);
1840 spin_lock(&musb->lock);
1845 * Unregister the gadget driver. Used by gadget drivers when
1846 * unregistering themselves from the controller.
1848 * @param driver the gadget driver to unregister
1850 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1852 unsigned long flags;
1854 struct musb *musb = the_gadget;
1856 if (!driver || !driver->unbind || !musb)
1859 /* REVISIT always use otg_set_peripheral() here too;
1860 * this needs to shut down the OTG engine.
1863 spin_lock_irqsave(&musb->lock, flags);
1865 #ifdef CONFIG_USB_MUSB_OTG
1866 musb_hnp_stop(musb);
1869 if (musb->gadget_driver == driver) {
1871 (void) musb_gadget_vbus_draw(&musb->g, 0);
1873 musb->xceiv->state = OTG_STATE_UNDEFINED;
1874 stop_activity(musb, driver);
1875 otg_set_peripheral(musb->xceiv, NULL);
1877 DBG(3, "unregistering driver %s\n", driver->function);
1878 spin_unlock_irqrestore(&musb->lock, flags);
1879 driver->unbind(&musb->g);
1880 spin_lock_irqsave(&musb->lock, flags);
1882 musb->gadget_driver = NULL;
1883 musb->g.dev.driver = NULL;
1885 musb->is_active = 0;
1886 musb_platform_try_idle(musb, 0);
1889 spin_unlock_irqrestore(&musb->lock, flags);
1891 if (is_otg_enabled(musb) && retval == 0) {
1892 usb_remove_hcd(musb_to_hcd(musb));
1893 /* FIXME we need to be able to register another
1894 * gadget driver here and have everything work;
1895 * that currently misbehaves.
1901 EXPORT_SYMBOL(usb_gadget_unregister_driver);
1904 /* ----------------------------------------------------------------------- */
1906 /* lifecycle operations called through plat_uds.c */
1908 void musb_g_resume(struct musb *musb)
1910 musb->is_suspended = 0;
1911 switch (musb->xceiv->state) {
1912 case OTG_STATE_B_IDLE:
1914 case OTG_STATE_B_WAIT_ACON:
1915 case OTG_STATE_B_PERIPHERAL:
1916 musb->is_active = 1;
1917 if (musb->gadget_driver && musb->gadget_driver->resume) {
1918 spin_unlock(&musb->lock);
1919 musb->gadget_driver->resume(&musb->g);
1920 spin_lock(&musb->lock);
1924 WARNING("unhandled RESUME transition (%s)\n",
1925 otg_state_string(musb));
1929 /* called when SOF packets stop for 3+ msec */
1930 void musb_g_suspend(struct musb *musb)
1934 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1935 DBG(3, "devctl %02x\n", devctl);
1937 switch (musb->xceiv->state) {
1938 case OTG_STATE_B_IDLE:
1939 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
1940 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
1942 case OTG_STATE_B_PERIPHERAL:
1943 musb->is_suspended = 1;
1944 if (musb->gadget_driver && musb->gadget_driver->suspend) {
1945 spin_unlock(&musb->lock);
1946 musb->gadget_driver->suspend(&musb->g);
1947 spin_lock(&musb->lock);
1951 /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
1952 * A_PERIPHERAL may need care too
1954 WARNING("unhandled SUSPEND transition (%s)\n",
1955 otg_state_string(musb));
1959 /* Called during SRP */
1960 void musb_g_wakeup(struct musb *musb)
1962 musb_gadget_wakeup(&musb->g);
1965 /* called when VBUS drops below session threshold, and in other cases */
1966 void musb_g_disconnect(struct musb *musb)
1968 void __iomem *mregs = musb->mregs;
1969 u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
1971 DBG(3, "devctl %02x\n", devctl);
1974 musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
1976 /* don't draw vbus until new b-default session */
1977 (void) musb_gadget_vbus_draw(&musb->g, 0);
1979 musb->g.speed = USB_SPEED_UNKNOWN;
1980 if (musb->gadget_driver && musb->gadget_driver->disconnect) {
1981 spin_unlock(&musb->lock);
1982 musb->gadget_driver->disconnect(&musb->g);
1983 spin_lock(&musb->lock);
1986 switch (musb->xceiv->state) {
1988 #ifdef CONFIG_USB_MUSB_OTG
1989 DBG(2, "Unhandled disconnect %s, setting a_idle\n",
1990 otg_state_string(musb));
1991 musb->xceiv->state = OTG_STATE_A_IDLE;
1992 MUSB_HST_MODE(musb);
1994 case OTG_STATE_A_PERIPHERAL:
1995 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
1996 MUSB_HST_MODE(musb);
1998 case OTG_STATE_B_WAIT_ACON:
1999 case OTG_STATE_B_HOST:
2001 case OTG_STATE_B_PERIPHERAL:
2002 case OTG_STATE_B_IDLE:
2003 musb->xceiv->state = OTG_STATE_B_IDLE;
2005 case OTG_STATE_B_SRP_INIT:
2009 musb->is_active = 0;
2012 void musb_g_reset(struct musb *musb)
2013 __releases(musb->lock)
2014 __acquires(musb->lock)
2016 void __iomem *mbase = musb->mregs;
2017 u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
2020 DBG(3, "<== %s addr=%x driver '%s'\n",
2021 (devctl & MUSB_DEVCTL_BDEVICE)
2022 ? "B-Device" : "A-Device",
2023 musb_readb(mbase, MUSB_FADDR),
2025 ? musb->gadget_driver->driver.name
2029 /* report disconnect, if we didn't already (flushing EP state) */
2030 if (musb->g.speed != USB_SPEED_UNKNOWN)
2031 musb_g_disconnect(musb);
2034 else if (devctl & MUSB_DEVCTL_HR)
2035 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
2038 /* what speed did we negotiate? */
2039 power = musb_readb(mbase, MUSB_POWER);
2040 musb->g.speed = (power & MUSB_POWER_HSMODE)
2041 ? USB_SPEED_HIGH : USB_SPEED_FULL;
2043 /* start in USB_STATE_DEFAULT */
2044 musb->is_active = 1;
2045 musb->is_suspended = 0;
2046 MUSB_DEV_MODE(musb);
2048 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2050 musb->may_wakeup = 0;
2051 musb->g.b_hnp_enable = 0;
2052 musb->g.a_alt_hnp_support = 0;
2053 musb->g.a_hnp_support = 0;
2055 /* Normal reset, as B-Device;
2056 * or else after HNP, as A-Device
2058 if (devctl & MUSB_DEVCTL_BDEVICE) {
2059 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2060 musb->g.is_a_peripheral = 0;
2061 } else if (is_otg_enabled(musb)) {
2062 musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
2063 musb->g.is_a_peripheral = 1;
2067 /* start with default limits on VBUS power draw */
2068 (void) musb_gadget_vbus_draw(&musb->g,
2069 is_otg_enabled(musb) ? 8 : 100);