2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/irq.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
29 #define DRIVER_AUTHOR "Sarah Sharp"
30 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
32 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
33 static int link_quirk;
34 module_param(link_quirk, int, S_IRUGO | S_IWUSR);
35 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
37 /* TODO: copied from ehci-hcd.c - can this be refactored? */
39 * handshake - spin reading hc until handshake completes or fails
40 * @ptr: address of hc register to be read
41 * @mask: bits to look at in result of read
42 * @done: value of those bits when handshake succeeds
43 * @usec: timeout in microseconds
45 * Returns negative errno, or zero on success
47 * Success happens when the "mask" bits have the specified value (hardware
48 * handshake done). There are two failure modes: "usec" have passed (major
49 * hardware flakeout), or the register reads as all-ones (hardware removed).
51 static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
52 u32 mask, u32 done, int usec)
57 result = xhci_readl(xhci, ptr);
58 if (result == ~(u32)0) /* card removed */
70 * Disable interrupts and begin the xHCI halting process.
72 void xhci_quiesce(struct xhci_hcd *xhci)
79 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
83 cmd = xhci_readl(xhci, &xhci->op_regs->command);
85 xhci_writel(xhci, cmd, &xhci->op_regs->command);
89 * Force HC into halt state.
91 * Disable any IRQs and clear the run/stop bit.
92 * HC will complete any current and actively pipelined transactions, and
93 * should halt within 16 microframes of the run/stop bit being cleared.
94 * Read HC Halted bit in the status register to see when the HC is finished.
95 * XXX: shouldn't we set HC_STATE_HALT here somewhere?
97 int xhci_halt(struct xhci_hcd *xhci)
99 xhci_dbg(xhci, "// Halt the HC\n");
102 return handshake(xhci, &xhci->op_regs->status,
103 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
107 * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
109 * This resets pipelines, timers, counters, state machines, etc.
110 * Transactions will be terminated immediately, and operational registers
111 * will be set to their defaults.
113 int xhci_reset(struct xhci_hcd *xhci)
118 state = xhci_readl(xhci, &xhci->op_regs->status);
119 if ((state & STS_HALT) == 0) {
120 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
124 xhci_dbg(xhci, "// Reset the HC\n");
125 command = xhci_readl(xhci, &xhci->op_regs->command);
126 command |= CMD_RESET;
127 xhci_writel(xhci, command, &xhci->op_regs->command);
128 /* XXX: Why does EHCI set this here? Shouldn't other code do this? */
129 xhci_to_hcd(xhci)->state = HC_STATE_HALT;
131 return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000);
136 /* Set up MSI-X table for entry 0 (may claim other entries later) */
137 static int xhci_setup_msix(struct xhci_hcd *xhci)
140 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
142 xhci->msix_count = 0;
143 /* XXX: did I do this right? ixgbe does kcalloc for more than one */
144 xhci->msix_entries = kmalloc(sizeof(struct msix_entry), GFP_KERNEL);
145 if (!xhci->msix_entries) {
146 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
149 xhci->msix_entries[0].entry = 0;
151 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
153 xhci_err(xhci, "Failed to enable MSI-X\n");
158 * Pass the xhci pointer value as the request_irq "cookie".
159 * If more irqs are added, this will need to be unique for each one.
161 ret = request_irq(xhci->msix_entries[0].vector, &xhci_irq, 0,
162 "xHCI", xhci_to_hcd(xhci));
164 xhci_err(xhci, "Failed to allocate MSI-X interrupt\n");
167 xhci_dbg(xhci, "Finished setting up MSI-X\n");
171 pci_disable_msix(pdev);
173 kfree(xhci->msix_entries);
174 xhci->msix_entries = NULL;
178 /* XXX: code duplication; can xhci_setup_msix call this? */
179 /* Free any IRQs and disable MSI-X */
180 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
182 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
183 if (!xhci->msix_entries)
186 free_irq(xhci->msix_entries[0].vector, xhci);
187 pci_disable_msix(pdev);
188 kfree(xhci->msix_entries);
189 xhci->msix_entries = NULL;
190 xhci_dbg(xhci, "Finished cleaning up MSI-X\n");
195 * Initialize memory for HCD and xHC (one-time init).
197 * Program the PAGESIZE register, initialize the device context array, create
198 * device contexts (?), set up a command ring segment (or two?), create event
199 * ring (one for now).
201 int xhci_init(struct usb_hcd *hcd)
203 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
206 xhci_dbg(xhci, "xhci_init\n");
207 spin_lock_init(&xhci->lock);
209 xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
210 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
212 xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
214 retval = xhci_mem_init(xhci, GFP_KERNEL);
215 xhci_dbg(xhci, "Finished xhci_init\n");
221 * Called in interrupt context when there might be work
222 * queued on the event ring
224 * xhci->lock must be held by caller.
226 static void xhci_work(struct xhci_hcd *xhci)
232 * Clear the op reg interrupt status first,
233 * so we can receive interrupts from other MSI-X interrupters.
234 * Write 1 to clear the interrupt status.
236 temp = xhci_readl(xhci, &xhci->op_regs->status);
238 xhci_writel(xhci, temp, &xhci->op_regs->status);
239 /* FIXME when MSI-X is supported and there are multiple vectors */
240 /* Clear the MSI-X event interrupt status */
242 /* Acknowledge the interrupt */
243 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
245 xhci_writel(xhci, temp, &xhci->ir_set->irq_pending);
246 /* Flush posted writes */
247 xhci_readl(xhci, &xhci->ir_set->irq_pending);
249 /* FIXME this should be a delayed service routine that clears the EHB */
250 xhci_handle_event(xhci);
252 /* Clear the event handler busy flag (RW1C); the event ring should be empty. */
253 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
254 xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue);
255 /* Flush posted writes -- FIXME is this necessary? */
256 xhci_readl(xhci, &xhci->ir_set->irq_pending);
259 /*-------------------------------------------------------------------------*/
262 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
263 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
264 * indicators of an event TRB error, but we check the status *first* to be safe.
266 irqreturn_t xhci_irq(struct usb_hcd *hcd)
268 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
272 spin_lock(&xhci->lock);
273 trb = xhci->event_ring->dequeue;
274 /* Check if the xHC generated the interrupt, or the irq is shared */
275 temp = xhci_readl(xhci, &xhci->op_regs->status);
276 temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending);
277 if (temp == 0xffffffff && temp2 == 0xffffffff)
280 if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) {
281 spin_unlock(&xhci->lock);
284 xhci_dbg(xhci, "op reg status = %08x\n", temp);
285 xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2);
286 xhci_dbg(xhci, "Event ring dequeue ptr:\n");
287 xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
288 (unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
289 lower_32_bits(trb->link.segment_ptr),
290 upper_32_bits(trb->link.segment_ptr),
291 (unsigned int) trb->link.intr_target,
292 (unsigned int) trb->link.control);
294 if (temp & STS_FATAL) {
295 xhci_warn(xhci, "WARNING: Host System Error\n");
298 xhci_to_hcd(xhci)->state = HC_STATE_HALT;
299 spin_unlock(&xhci->lock);
304 spin_unlock(&xhci->lock);
309 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
310 void xhci_event_ring_work(unsigned long arg)
315 struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
318 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
320 spin_lock_irqsave(&xhci->lock, flags);
321 temp = xhci_readl(xhci, &xhci->op_regs->status);
322 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
323 if (temp == 0xffffffff) {
324 xhci_dbg(xhci, "HW died, polling stopped.\n");
325 spin_unlock_irqrestore(&xhci->lock, flags);
329 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
330 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
331 xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled);
332 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
333 xhci->error_bitmask = 0;
334 xhci_dbg(xhci, "Event ring:\n");
335 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
336 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
337 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
338 temp_64 &= ~ERST_PTR_MASK;
339 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
340 xhci_dbg(xhci, "Command ring:\n");
341 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
342 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
343 xhci_dbg_cmd_ptrs(xhci);
344 for (i = 0; i < MAX_HC_SLOTS; ++i) {
347 for (j = 0; j < 31; ++j) {
348 struct xhci_ring *ring = xhci->devs[i]->eps[j].ring;
351 xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j);
352 xhci_debug_segment(xhci, ring->deq_seg);
356 if (xhci->noops_submitted != NUM_TEST_NOOPS)
357 if (xhci_setup_one_noop(xhci))
358 xhci_ring_cmd_db(xhci);
359 spin_unlock_irqrestore(&xhci->lock, flags);
362 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
364 xhci_dbg(xhci, "Quit polling the event ring.\n");
369 * Start the HC after it was halted.
371 * This function is called by the USB core when the HC driver is added.
372 * Its opposite is xhci_stop().
374 * xhci_init() must be called once before this function can be called.
375 * Reset the HC, enable device slot contexts, program DCBAAP, and
376 * set command ring pointer and event ring pointer.
378 * Setup MSI-X vectors and enable interrupts.
380 int xhci_run(struct usb_hcd *hcd)
384 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
385 void (*doorbell)(struct xhci_hcd *) = NULL;
387 hcd->uses_new_polling = 1;
390 xhci_dbg(xhci, "xhci_run\n");
391 #if 0 /* FIXME: MSI not setup yet */
392 /* Do this at the very last minute */
393 ret = xhci_setup_msix(xhci);
399 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
400 init_timer(&xhci->event_ring_timer);
401 xhci->event_ring_timer.data = (unsigned long) xhci;
402 xhci->event_ring_timer.function = xhci_event_ring_work;
403 /* Poll the event ring */
404 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
406 xhci_dbg(xhci, "Setting event ring polling timer\n");
407 add_timer(&xhci->event_ring_timer);
410 xhci_dbg(xhci, "Command ring memory map follows:\n");
411 xhci_debug_ring(xhci, xhci->cmd_ring);
412 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
413 xhci_dbg_cmd_ptrs(xhci);
415 xhci_dbg(xhci, "ERST memory map follows:\n");
416 xhci_dbg_erst(xhci, &xhci->erst);
417 xhci_dbg(xhci, "Event ring:\n");
418 xhci_debug_ring(xhci, xhci->event_ring);
419 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
420 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
421 temp_64 &= ~ERST_PTR_MASK;
422 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
424 xhci_dbg(xhci, "// Set the interrupt modulation register\n");
425 temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
426 temp &= ~ER_IRQ_INTERVAL_MASK;
428 xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
430 /* Set the HCD state before we enable the irqs */
431 hcd->state = HC_STATE_RUNNING;
432 temp = xhci_readl(xhci, &xhci->op_regs->command);
434 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
436 xhci_writel(xhci, temp, &xhci->op_regs->command);
438 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
439 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
440 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
441 xhci_writel(xhci, ER_IRQ_ENABLE(temp),
442 &xhci->ir_set->irq_pending);
443 xhci_print_ir_set(xhci, xhci->ir_set, 0);
445 if (NUM_TEST_NOOPS > 0)
446 doorbell = xhci_setup_one_noop(xhci);
448 temp = xhci_readl(xhci, &xhci->op_regs->command);
450 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
452 xhci_writel(xhci, temp, &xhci->op_regs->command);
453 /* Flush PCI posted writes */
454 temp = xhci_readl(xhci, &xhci->op_regs->command);
455 xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp);
459 xhci_dbg(xhci, "Finished xhci_run\n");
466 * This function is called by the USB core when the HC driver is removed.
467 * Its opposite is xhci_run().
469 * Disable device contexts, disable IRQs, and quiesce the HC.
470 * Reset the HC, finish any completed transactions, and cleanup memory.
472 void xhci_stop(struct usb_hcd *hcd)
475 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
477 spin_lock_irq(&xhci->lock);
480 spin_unlock_irq(&xhci->lock);
482 #if 0 /* No MSI yet */
483 xhci_cleanup_msix(xhci);
485 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
486 /* Tell the event ring poll function not to reschedule */
488 del_timer_sync(&xhci->event_ring_timer);
491 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
492 temp = xhci_readl(xhci, &xhci->op_regs->status);
493 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
494 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
495 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
496 &xhci->ir_set->irq_pending);
497 xhci_print_ir_set(xhci, xhci->ir_set, 0);
499 xhci_dbg(xhci, "cleaning up memory\n");
500 xhci_mem_cleanup(xhci);
501 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
502 xhci_readl(xhci, &xhci->op_regs->status));
506 * Shutdown HC (not bus-specific)
508 * This is called when the machine is rebooting or halting. We assume that the
509 * machine will be powered off, and the HC's internal state will be reset.
510 * Don't bother to free memory.
512 void xhci_shutdown(struct usb_hcd *hcd)
514 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
516 spin_lock_irq(&xhci->lock);
518 spin_unlock_irq(&xhci->lock);
521 xhci_cleanup_msix(xhci);
524 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
525 xhci_readl(xhci, &xhci->op_regs->status));
528 /*-------------------------------------------------------------------------*/
531 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
532 * HCDs. Find the index for an endpoint given its descriptor. Use the return
533 * value to right shift 1 for the bitmask.
535 * Index = (epnum * 2) + direction - 1,
536 * where direction = 0 for OUT, 1 for IN.
537 * For control endpoints, the IN index is used (OUT index is unused), so
538 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
540 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
543 if (usb_endpoint_xfer_control(desc))
544 index = (unsigned int) (usb_endpoint_num(desc)*2);
546 index = (unsigned int) (usb_endpoint_num(desc)*2) +
547 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
551 /* Find the flag for this endpoint (for use in the control context). Use the
552 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
555 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
557 return 1 << (xhci_get_endpoint_index(desc) + 1);
560 /* Find the flag for this endpoint (for use in the control context). Use the
561 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
564 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
566 return 1 << (ep_index + 1);
569 /* Compute the last valid endpoint context index. Basically, this is the
570 * endpoint index plus one. For slot contexts with more than valid endpoint,
571 * we find the most significant bit set in the added contexts flags.
572 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
573 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
575 unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
577 return fls(added_ctxs) - 1;
580 /* Returns 1 if the arguments are OK;
581 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
583 int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
584 struct usb_host_endpoint *ep, int check_ep, const char *func) {
585 if (!hcd || (check_ep && !ep) || !udev) {
586 printk(KERN_DEBUG "xHCI %s called with invalid args\n",
591 printk(KERN_DEBUG "xHCI %s called for root hub\n",
595 if (!udev->slot_id) {
596 printk(KERN_DEBUG "xHCI %s called with unaddressed device\n",
603 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
604 struct usb_device *udev, struct xhci_command *command,
605 bool ctx_change, bool must_succeed);
608 * Full speed devices may have a max packet size greater than 8 bytes, but the
609 * USB core doesn't know that until it reads the first 8 bytes of the
610 * descriptor. If the usb_device's max packet size changes after that point,
611 * we need to issue an evaluate context command and wait on it.
613 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
614 unsigned int ep_index, struct urb *urb)
616 struct xhci_container_ctx *in_ctx;
617 struct xhci_container_ctx *out_ctx;
618 struct xhci_input_control_ctx *ctrl_ctx;
619 struct xhci_ep_ctx *ep_ctx;
621 int hw_max_packet_size;
624 out_ctx = xhci->devs[slot_id]->out_ctx;
625 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
626 hw_max_packet_size = MAX_PACKET_DECODED(ep_ctx->ep_info2);
627 max_packet_size = urb->dev->ep0.desc.wMaxPacketSize;
628 if (hw_max_packet_size != max_packet_size) {
629 xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
630 xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
632 xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
634 xhci_dbg(xhci, "Issuing evaluate context command.\n");
636 /* Set up the modified control endpoint 0 */
637 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
638 xhci->devs[slot_id]->out_ctx, ep_index);
639 in_ctx = xhci->devs[slot_id]->in_ctx;
640 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
641 ep_ctx->ep_info2 &= ~MAX_PACKET_MASK;
642 ep_ctx->ep_info2 |= MAX_PACKET(max_packet_size);
644 /* Set up the input context flags for the command */
645 /* FIXME: This won't work if a non-default control endpoint
646 * changes max packet sizes.
648 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
649 ctrl_ctx->add_flags = EP0_FLAG;
650 ctrl_ctx->drop_flags = 0;
652 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
653 xhci_dbg_ctx(xhci, in_ctx, ep_index);
654 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
655 xhci_dbg_ctx(xhci, out_ctx, ep_index);
657 ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
660 /* Clean up the input context for later use by bandwidth
663 ctrl_ctx->add_flags = SLOT_FLAG;
669 * non-error returns are a promise to giveback() the urb later
670 * we drop ownership so next owner (or urb unlink) can get it
672 int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
674 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
677 unsigned int slot_id, ep_index;
680 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0)
683 slot_id = urb->dev->slot_id;
684 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
686 if (!xhci->devs || !xhci->devs[slot_id]) {
688 dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n");
692 if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
694 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
698 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
699 /* Check to see if the max packet size for the default control
700 * endpoint changed during FS device enumeration
702 if (urb->dev->speed == USB_SPEED_FULL) {
703 ret = xhci_check_maxpacket(xhci, slot_id,
709 /* We have a spinlock and interrupts disabled, so we must pass
710 * atomic context to this function, which may allocate memory.
712 spin_lock_irqsave(&xhci->lock, flags);
713 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
715 spin_unlock_irqrestore(&xhci->lock, flags);
716 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
717 spin_lock_irqsave(&xhci->lock, flags);
718 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
720 spin_unlock_irqrestore(&xhci->lock, flags);
721 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
722 spin_lock_irqsave(&xhci->lock, flags);
723 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
725 spin_unlock_irqrestore(&xhci->lock, flags);
734 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
735 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
736 * should pick up where it left off in the TD, unless a Set Transfer Ring
737 * Dequeue Pointer is issued.
739 * The TRBs that make up the buffers for the canceled URB will be "removed" from
740 * the ring. Since the ring is a contiguous structure, they can't be physically
741 * removed. Instead, there are two options:
743 * 1) If the HC is in the middle of processing the URB to be canceled, we
744 * simply move the ring's dequeue pointer past those TRBs using the Set
745 * Transfer Ring Dequeue Pointer command. This will be the common case,
746 * when drivers timeout on the last submitted URB and attempt to cancel.
748 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
749 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
750 * HC will need to invalidate the any TRBs it has cached after the stop
751 * endpoint command, as noted in the xHCI 0.95 errata.
753 * 3) The TD may have completed by the time the Stop Endpoint Command
754 * completes, so software needs to handle that case too.
756 * This function should protect against the TD enqueueing code ringing the
757 * doorbell while this code is waiting for a Stop Endpoint command to complete.
758 * It also needs to account for multiple cancellations on happening at the same
759 * time for the same endpoint.
761 * Note that this function can be called in any context, or so says
762 * usb_hcd_unlink_urb()
764 int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
769 struct xhci_hcd *xhci;
771 unsigned int ep_index;
772 struct xhci_ring *ep_ring;
773 struct xhci_virt_ep *ep;
775 xhci = hcd_to_xhci(hcd);
776 spin_lock_irqsave(&xhci->lock, flags);
777 /* Make sure the URB hasn't completed or been unlinked already */
778 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
779 if (ret || !urb->hcpriv)
781 temp = xhci_readl(xhci, &xhci->op_regs->status);
782 if (temp == 0xffffffff) {
783 xhci_dbg(xhci, "HW died, freeing TD.\n");
784 td = (struct xhci_td *) urb->hcpriv;
786 usb_hcd_unlink_urb_from_ep(hcd, urb);
787 spin_unlock_irqrestore(&xhci->lock, flags);
788 usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN);
793 xhci_dbg(xhci, "Cancel URB %p\n", urb);
794 xhci_dbg(xhci, "Event ring:\n");
795 xhci_debug_ring(xhci, xhci->event_ring);
796 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
797 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
799 xhci_dbg(xhci, "Endpoint ring:\n");
800 xhci_debug_ring(xhci, ep_ring);
801 td = (struct xhci_td *) urb->hcpriv;
803 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
804 /* Queue a stop endpoint command, but only if this is
805 * the first cancellation to be handled.
807 if (!(ep->ep_state & EP_HALT_PENDING)) {
808 ep->ep_state |= EP_HALT_PENDING;
809 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
810 xhci_ring_cmd_db(xhci);
813 spin_unlock_irqrestore(&xhci->lock, flags);
817 /* Drop an endpoint from a new bandwidth configuration for this device.
818 * Only one call to this function is allowed per endpoint before
819 * check_bandwidth() or reset_bandwidth() must be called.
820 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
821 * add the endpoint to the schedule with possibly new parameters denoted by a
822 * different endpoint descriptor in usb_host_endpoint.
823 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
826 * The USB core will not allow URBs to be queued to an endpoint that is being
827 * disabled, so there's no need for mutual exclusion to protect
828 * the xhci->devs[slot_id] structure.
830 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
831 struct usb_host_endpoint *ep)
833 struct xhci_hcd *xhci;
834 struct xhci_container_ctx *in_ctx, *out_ctx;
835 struct xhci_input_control_ctx *ctrl_ctx;
836 struct xhci_slot_ctx *slot_ctx;
837 unsigned int last_ctx;
838 unsigned int ep_index;
839 struct xhci_ep_ctx *ep_ctx;
841 u32 new_add_flags, new_drop_flags, new_slot_info;
844 ret = xhci_check_args(hcd, udev, ep, 1, __func__);
847 xhci = hcd_to_xhci(hcd);
848 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
850 drop_flag = xhci_get_endpoint_flag(&ep->desc);
851 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
852 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
853 __func__, drop_flag);
857 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
858 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
863 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
864 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
865 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
866 ep_index = xhci_get_endpoint_index(&ep->desc);
867 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
868 /* If the HC already knows the endpoint is disabled,
869 * or the HCD has noted it is disabled, ignore this request
871 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
872 ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
873 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
878 ctrl_ctx->drop_flags |= drop_flag;
879 new_drop_flags = ctrl_ctx->drop_flags;
881 ctrl_ctx->add_flags &= ~drop_flag;
882 new_add_flags = ctrl_ctx->add_flags;
884 last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags);
885 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
886 /* Update the last valid endpoint context, if we deleted the last one */
887 if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
888 slot_ctx->dev_info &= ~LAST_CTX_MASK;
889 slot_ctx->dev_info |= LAST_CTX(last_ctx);
891 new_slot_info = slot_ctx->dev_info;
893 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
895 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
896 (unsigned int) ep->desc.bEndpointAddress,
898 (unsigned int) new_drop_flags,
899 (unsigned int) new_add_flags,
900 (unsigned int) new_slot_info);
904 /* Add an endpoint to a new possible bandwidth configuration for this device.
905 * Only one call to this function is allowed per endpoint before
906 * check_bandwidth() or reset_bandwidth() must be called.
907 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
908 * add the endpoint to the schedule with possibly new parameters denoted by a
909 * different endpoint descriptor in usb_host_endpoint.
910 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
913 * The USB core will not allow URBs to be queued to an endpoint until the
914 * configuration or alt setting is installed in the device, so there's no need
915 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
917 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
918 struct usb_host_endpoint *ep)
920 struct xhci_hcd *xhci;
921 struct xhci_container_ctx *in_ctx, *out_ctx;
922 unsigned int ep_index;
923 struct xhci_ep_ctx *ep_ctx;
924 struct xhci_slot_ctx *slot_ctx;
925 struct xhci_input_control_ctx *ctrl_ctx;
927 unsigned int last_ctx;
928 u32 new_add_flags, new_drop_flags, new_slot_info;
931 ret = xhci_check_args(hcd, udev, ep, 1, __func__);
933 /* So we won't queue a reset ep command for a root hub */
937 xhci = hcd_to_xhci(hcd);
939 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
940 last_ctx = xhci_last_valid_endpoint(added_ctxs);
941 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
942 /* FIXME when we have to issue an evaluate endpoint command to
943 * deal with ep0 max packet size changing once we get the
946 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
947 __func__, added_ctxs);
951 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
952 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
957 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
958 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
959 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
960 ep_index = xhci_get_endpoint_index(&ep->desc);
961 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
962 /* If the HCD has already noted the endpoint is enabled,
963 * ignore this request.
965 if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
966 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
972 * Configuration and alternate setting changes must be done in
973 * process context, not interrupt context (or so documenation
974 * for usb_set_interface() and usb_set_configuration() claim).
976 if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
977 udev, ep, GFP_KERNEL) < 0) {
978 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
979 __func__, ep->desc.bEndpointAddress);
983 ctrl_ctx->add_flags |= added_ctxs;
984 new_add_flags = ctrl_ctx->add_flags;
986 /* If xhci_endpoint_disable() was called for this endpoint, but the
987 * xHC hasn't been notified yet through the check_bandwidth() call,
988 * this re-adds a new state for the endpoint from the new endpoint
989 * descriptors. We must drop and re-add this endpoint, so we leave the
992 new_drop_flags = ctrl_ctx->drop_flags;
994 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
995 /* Update the last valid endpoint context, if we just added one past */
996 if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
997 slot_ctx->dev_info &= ~LAST_CTX_MASK;
998 slot_ctx->dev_info |= LAST_CTX(last_ctx);
1000 new_slot_info = slot_ctx->dev_info;
1002 /* Store the usb_device pointer for later use */
1005 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1006 (unsigned int) ep->desc.bEndpointAddress,
1008 (unsigned int) new_drop_flags,
1009 (unsigned int) new_add_flags,
1010 (unsigned int) new_slot_info);
1014 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1016 struct xhci_input_control_ctx *ctrl_ctx;
1017 struct xhci_ep_ctx *ep_ctx;
1018 struct xhci_slot_ctx *slot_ctx;
1021 /* When a device's add flag and drop flag are zero, any subsequent
1022 * configure endpoint command will leave that endpoint's state
1023 * untouched. Make sure we don't leave any old state in the input
1024 * endpoint contexts.
1026 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1027 ctrl_ctx->drop_flags = 0;
1028 ctrl_ctx->add_flags = 0;
1029 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1030 slot_ctx->dev_info &= ~LAST_CTX_MASK;
1031 /* Endpoint 0 is always valid */
1032 slot_ctx->dev_info |= LAST_CTX(1);
1033 for (i = 1; i < 31; ++i) {
1034 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1035 ep_ctx->ep_info = 0;
1036 ep_ctx->ep_info2 = 0;
1038 ep_ctx->tx_info = 0;
1042 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1043 struct usb_device *udev, int *cmd_status)
1047 switch (*cmd_status) {
1049 dev_warn(&udev->dev, "Not enough host controller resources "
1050 "for new device state.\n");
1052 /* FIXME: can we allocate more resources for the HC? */
1055 dev_warn(&udev->dev, "Not enough bandwidth "
1056 "for new device state.\n");
1058 /* FIXME: can we go back to the old state? */
1061 /* the HCD set up something wrong */
1062 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1064 "and endpoint is not disabled.\n");
1068 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
1072 xhci_err(xhci, "ERROR: unexpected command completion "
1073 "code 0x%x.\n", *cmd_status);
1080 static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1081 struct usb_device *udev, int *cmd_status)
1084 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1086 switch (*cmd_status) {
1088 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
1089 "context command.\n");
1093 dev_warn(&udev->dev, "WARN: slot not enabled for"
1094 "evaluate context command.\n");
1095 case COMP_CTX_STATE:
1096 dev_warn(&udev->dev, "WARN: invalid context state for "
1097 "evaluate context command.\n");
1098 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1102 dev_dbg(&udev->dev, "Successful evaluate context command\n");
1106 xhci_err(xhci, "ERROR: unexpected command completion "
1107 "code 0x%x.\n", *cmd_status);
1114 /* Issue a configure endpoint command or evaluate context command
1115 * and wait for it to finish.
1117 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1118 struct usb_device *udev,
1119 struct xhci_command *command,
1120 bool ctx_change, bool must_succeed)
1124 unsigned long flags;
1125 struct xhci_container_ctx *in_ctx;
1126 struct completion *cmd_completion;
1128 struct xhci_virt_device *virt_dev;
1130 spin_lock_irqsave(&xhci->lock, flags);
1131 virt_dev = xhci->devs[udev->slot_id];
1133 in_ctx = command->in_ctx;
1134 cmd_completion = command->completion;
1135 cmd_status = &command->status;
1136 command->command_trb = xhci->cmd_ring->enqueue;
1137 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
1139 in_ctx = virt_dev->in_ctx;
1140 cmd_completion = &virt_dev->cmd_completion;
1141 cmd_status = &virt_dev->cmd_status;
1145 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
1146 udev->slot_id, must_succeed);
1148 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
1151 spin_unlock_irqrestore(&xhci->lock, flags);
1152 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
1155 xhci_ring_cmd_db(xhci);
1156 spin_unlock_irqrestore(&xhci->lock, flags);
1158 /* Wait for the configure endpoint command to complete */
1159 timeleft = wait_for_completion_interruptible_timeout(
1161 USB_CTRL_SET_TIMEOUT);
1162 if (timeleft <= 0) {
1163 xhci_warn(xhci, "%s while waiting for %s command\n",
1164 timeleft == 0 ? "Timeout" : "Signal",
1166 "configure endpoint" :
1167 "evaluate context");
1168 /* FIXME cancel the configure endpoint command */
1173 return xhci_configure_endpoint_result(xhci, udev, cmd_status);
1174 return xhci_evaluate_context_result(xhci, udev, cmd_status);
1177 /* Called after one or more calls to xhci_add_endpoint() or
1178 * xhci_drop_endpoint(). If this call fails, the USB core is expected
1179 * to call xhci_reset_bandwidth().
1181 * Since we are in the middle of changing either configuration or
1182 * installing a new alt setting, the USB core won't allow URBs to be
1183 * enqueued for any endpoint on the old config or interface. Nothing
1184 * else should be touching the xhci->devs[slot_id] structure, so we
1185 * don't need to take the xhci->lock for manipulating that.
1187 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1191 struct xhci_hcd *xhci;
1192 struct xhci_virt_device *virt_dev;
1193 struct xhci_input_control_ctx *ctrl_ctx;
1194 struct xhci_slot_ctx *slot_ctx;
1196 ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
1199 xhci = hcd_to_xhci(hcd);
1201 if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) {
1202 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1206 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1207 virt_dev = xhci->devs[udev->slot_id];
1209 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
1210 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1211 ctrl_ctx->add_flags |= SLOT_FLAG;
1212 ctrl_ctx->add_flags &= ~EP0_FLAG;
1213 ctrl_ctx->drop_flags &= ~SLOT_FLAG;
1214 ctrl_ctx->drop_flags &= ~EP0_FLAG;
1215 xhci_dbg(xhci, "New Input Control Context:\n");
1216 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1217 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
1218 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
1220 ret = xhci_configure_endpoint(xhci, udev, NULL,
1223 /* Callee should call reset_bandwidth() */
1227 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
1228 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
1229 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
1231 xhci_zero_in_ctx(xhci, virt_dev);
1232 /* Free any old rings */
1233 for (i = 1; i < 31; ++i) {
1234 if (virt_dev->eps[i].new_ring) {
1235 xhci_ring_free(xhci, virt_dev->eps[i].ring);
1236 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
1237 virt_dev->eps[i].new_ring = NULL;
1244 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1246 struct xhci_hcd *xhci;
1247 struct xhci_virt_device *virt_dev;
1250 ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
1253 xhci = hcd_to_xhci(hcd);
1255 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
1256 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1260 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1261 virt_dev = xhci->devs[udev->slot_id];
1262 /* Free any rings allocated for added endpoints */
1263 for (i = 0; i < 31; ++i) {
1264 if (virt_dev->eps[i].new_ring) {
1265 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
1266 virt_dev->eps[i].new_ring = NULL;
1269 xhci_zero_in_ctx(xhci, virt_dev);
1272 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
1273 struct xhci_container_ctx *in_ctx,
1274 struct xhci_container_ctx *out_ctx,
1275 u32 add_flags, u32 drop_flags)
1277 struct xhci_input_control_ctx *ctrl_ctx;
1278 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1279 ctrl_ctx->add_flags = add_flags;
1280 ctrl_ctx->drop_flags = drop_flags;
1281 xhci_slot_copy(xhci, in_ctx, out_ctx);
1282 ctrl_ctx->add_flags |= SLOT_FLAG;
1284 xhci_dbg(xhci, "Input Context:\n");
1285 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
1288 void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
1289 unsigned int slot_id, unsigned int ep_index,
1290 struct xhci_dequeue_state *deq_state)
1292 struct xhci_container_ctx *in_ctx;
1293 struct xhci_ep_ctx *ep_ctx;
1297 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1298 xhci->devs[slot_id]->out_ctx, ep_index);
1299 in_ctx = xhci->devs[slot_id]->in_ctx;
1300 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1301 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
1302 deq_state->new_deq_ptr);
1304 xhci_warn(xhci, "WARN Cannot submit config ep after "
1305 "reset ep command\n");
1306 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
1307 deq_state->new_deq_seg,
1308 deq_state->new_deq_ptr);
1311 ep_ctx->deq = addr | deq_state->new_cycle_state;
1313 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
1314 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
1315 xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
1318 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
1319 struct usb_device *udev, unsigned int ep_index)
1321 struct xhci_dequeue_state deq_state;
1322 struct xhci_virt_ep *ep;
1324 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
1325 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
1326 /* We need to move the HW's dequeue pointer past this TD,
1327 * or it will attempt to resend it on the next doorbell ring.
1329 xhci_find_new_dequeue_state(xhci, udev->slot_id,
1330 ep_index, ep->stopped_td,
1333 /* HW with the reset endpoint quirk will use the saved dequeue state to
1334 * issue a configure endpoint command later.
1336 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
1337 xhci_dbg(xhci, "Queueing new dequeue state\n");
1338 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
1339 ep_index, &deq_state);
1341 /* Better hope no one uses the input context between now and the
1342 * reset endpoint completion!
1344 xhci_dbg(xhci, "Setting up input context for "
1345 "configure endpoint command\n");
1346 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
1347 ep_index, &deq_state);
1351 /* Deal with stalled endpoints. The core should have sent the control message
1352 * to clear the halt condition. However, we need to make the xHCI hardware
1353 * reset its sequence number, since a device will expect a sequence number of
1354 * zero after the halt condition is cleared.
1355 * Context: in_interrupt
1357 void xhci_endpoint_reset(struct usb_hcd *hcd,
1358 struct usb_host_endpoint *ep)
1360 struct xhci_hcd *xhci;
1361 struct usb_device *udev;
1362 unsigned int ep_index;
1363 unsigned long flags;
1365 struct xhci_virt_ep *virt_ep;
1367 xhci = hcd_to_xhci(hcd);
1368 udev = (struct usb_device *) ep->hcpriv;
1369 /* Called with a root hub endpoint (or an endpoint that wasn't added
1370 * with xhci_add_endpoint()
1374 ep_index = xhci_get_endpoint_index(&ep->desc);
1375 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
1376 if (!virt_ep->stopped_td) {
1377 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
1378 ep->desc.bEndpointAddress);
1381 if (usb_endpoint_xfer_control(&ep->desc)) {
1382 xhci_dbg(xhci, "Control endpoint stall already handled.\n");
1386 xhci_dbg(xhci, "Queueing reset endpoint command\n");
1387 spin_lock_irqsave(&xhci->lock, flags);
1388 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
1390 * Can't change the ring dequeue pointer until it's transitioned to the
1391 * stopped state, which is only upon a successful reset endpoint
1392 * command. Better hope that last command worked!
1395 xhci_cleanup_stalled_ring(xhci, udev, ep_index);
1396 kfree(virt_ep->stopped_td);
1397 xhci_ring_cmd_db(xhci);
1399 spin_unlock_irqrestore(&xhci->lock, flags);
1402 xhci_warn(xhci, "FIXME allocate a new ring segment\n");
1406 * At this point, the struct usb_device is about to go away, the device has
1407 * disconnected, and all traffic has been stopped and the endpoints have been
1408 * disabled. Free any HC data structures associated with that device.
1410 void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
1412 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1413 unsigned long flags;
1416 if (udev->slot_id == 0)
1419 spin_lock_irqsave(&xhci->lock, flags);
1420 /* Don't disable the slot if the host controller is dead. */
1421 state = xhci_readl(xhci, &xhci->op_regs->status);
1422 if (state == 0xffffffff) {
1423 xhci_free_virt_device(xhci, udev->slot_id);
1424 spin_unlock_irqrestore(&xhci->lock, flags);
1428 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
1429 spin_unlock_irqrestore(&xhci->lock, flags);
1430 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
1433 xhci_ring_cmd_db(xhci);
1434 spin_unlock_irqrestore(&xhci->lock, flags);
1436 * Event command completion handler will free any data structures
1437 * associated with the slot. XXX Can free sleep?
1442 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
1443 * timed out, or allocating memory failed. Returns 1 on success.
1445 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
1447 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1448 unsigned long flags;
1452 spin_lock_irqsave(&xhci->lock, flags);
1453 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
1455 spin_unlock_irqrestore(&xhci->lock, flags);
1456 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
1459 xhci_ring_cmd_db(xhci);
1460 spin_unlock_irqrestore(&xhci->lock, flags);
1462 /* XXX: how much time for xHC slot assignment? */
1463 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
1464 USB_CTRL_SET_TIMEOUT);
1465 if (timeleft <= 0) {
1466 xhci_warn(xhci, "%s while waiting for a slot\n",
1467 timeleft == 0 ? "Timeout" : "Signal");
1468 /* FIXME cancel the enable slot request */
1472 if (!xhci->slot_id) {
1473 xhci_err(xhci, "Error while assigning device slot ID\n");
1476 /* xhci_alloc_virt_device() does not touch rings; no need to lock */
1477 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
1478 /* Disable slot, if we can do it without mem alloc */
1479 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
1480 spin_lock_irqsave(&xhci->lock, flags);
1481 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
1482 xhci_ring_cmd_db(xhci);
1483 spin_unlock_irqrestore(&xhci->lock, flags);
1486 udev->slot_id = xhci->slot_id;
1487 /* Is this a LS or FS device under a HS hub? */
1488 /* Hub or peripherial? */
1493 * Issue an Address Device command (which will issue a SetAddress request to
1495 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
1496 * we should only issue and wait on one address command at the same time.
1498 * We add one to the device address issued by the hardware because the USB core
1499 * uses address 1 for the root hubs (even though they're not really devices).
1501 int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1503 unsigned long flags;
1505 struct xhci_virt_device *virt_dev;
1507 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1508 struct xhci_slot_ctx *slot_ctx;
1509 struct xhci_input_control_ctx *ctrl_ctx;
1512 if (!udev->slot_id) {
1513 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
1517 virt_dev = xhci->devs[udev->slot_id];
1519 /* If this is a Set Address to an unconfigured device, setup ep 0 */
1521 xhci_setup_addressable_virt_dev(xhci, udev);
1522 /* Otherwise, assume the core has the device configured how it wants */
1523 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
1524 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
1526 spin_lock_irqsave(&xhci->lock, flags);
1527 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
1530 spin_unlock_irqrestore(&xhci->lock, flags);
1531 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
1534 xhci_ring_cmd_db(xhci);
1535 spin_unlock_irqrestore(&xhci->lock, flags);
1537 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
1538 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
1539 USB_CTRL_SET_TIMEOUT);
1540 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
1541 * the SetAddress() "recovery interval" required by USB and aborting the
1542 * command on a timeout.
1544 if (timeleft <= 0) {
1545 xhci_warn(xhci, "%s while waiting for a slot\n",
1546 timeleft == 0 ? "Timeout" : "Signal");
1547 /* FIXME cancel the address device command */
1551 switch (virt_dev->cmd_status) {
1552 case COMP_CTX_STATE:
1554 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
1559 dev_warn(&udev->dev, "Device not responding to set address.\n");
1563 xhci_dbg(xhci, "Successful Address Device command\n");
1566 xhci_err(xhci, "ERROR: unexpected command completion "
1567 "code 0x%x.\n", virt_dev->cmd_status);
1568 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
1569 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
1576 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
1577 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
1578 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
1580 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
1581 (unsigned long long)
1582 xhci->dcbaa->dev_context_ptrs[udev->slot_id]);
1583 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
1584 (unsigned long long)virt_dev->out_ctx->dma);
1585 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
1586 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
1587 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
1588 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
1590 * USB core uses address 1 for the roothubs, so we add one to the
1591 * address given back to us by the HC.
1593 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
1594 udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1;
1595 /* Zero the input context control for later use */
1596 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1597 ctrl_ctx->add_flags = 0;
1598 ctrl_ctx->drop_flags = 0;
1600 xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
1601 /* XXX Meh, not sure if anyone else but choose_address uses this. */
1602 set_bit(udev->devnum, udev->bus->devmap.devicemap);
1607 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
1608 * internal data structures for the device.
1610 int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
1611 struct usb_tt *tt, gfp_t mem_flags)
1613 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1614 struct xhci_virt_device *vdev;
1615 struct xhci_command *config_cmd;
1616 struct xhci_input_control_ctx *ctrl_ctx;
1617 struct xhci_slot_ctx *slot_ctx;
1618 unsigned long flags;
1619 unsigned think_time;
1622 /* Ignore root hubs */
1626 vdev = xhci->devs[hdev->slot_id];
1628 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
1631 config_cmd = xhci_alloc_command(xhci, true, mem_flags);
1633 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
1637 spin_lock_irqsave(&xhci->lock, flags);
1638 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
1639 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
1640 ctrl_ctx->add_flags |= SLOT_FLAG;
1641 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
1642 slot_ctx->dev_info |= DEV_HUB;
1644 slot_ctx->dev_info |= DEV_MTT;
1645 if (xhci->hci_version > 0x95) {
1646 xhci_dbg(xhci, "xHCI version %x needs hub "
1647 "TT think time and number of ports\n",
1648 (unsigned int) xhci->hci_version);
1649 slot_ctx->dev_info2 |= XHCI_MAX_PORTS(hdev->maxchild);
1650 /* Set TT think time - convert from ns to FS bit times.
1651 * 0 = 8 FS bit times, 1 = 16 FS bit times,
1652 * 2 = 24 FS bit times, 3 = 32 FS bit times.
1654 think_time = tt->think_time;
1655 if (think_time != 0)
1656 think_time = (think_time / 666) - 1;
1657 slot_ctx->tt_info |= TT_THINK_TIME(think_time);
1659 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
1660 "TT think time or number of ports\n",
1661 (unsigned int) xhci->hci_version);
1663 slot_ctx->dev_state = 0;
1664 spin_unlock_irqrestore(&xhci->lock, flags);
1666 xhci_dbg(xhci, "Set up %s for hub device.\n",
1667 (xhci->hci_version > 0x95) ?
1668 "configure endpoint" : "evaluate context");
1669 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
1670 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
1672 /* Issue and wait for the configure endpoint or
1673 * evaluate context command.
1675 if (xhci->hci_version > 0x95)
1676 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
1679 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
1682 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
1683 xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
1685 xhci_free_command(xhci, config_cmd);
1689 int xhci_get_frame(struct usb_hcd *hcd)
1691 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1692 /* EHCI mods by the periodic size. Why? */
1693 return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
1696 MODULE_DESCRIPTION(DRIVER_DESC);
1697 MODULE_AUTHOR(DRIVER_AUTHOR);
1698 MODULE_LICENSE("GPL");
1700 static int __init xhci_hcd_init(void)
1705 retval = xhci_register_pci();
1708 printk(KERN_DEBUG "Problem registering PCI driver.");
1713 * Check the compiler generated sizes of structures that must be laid
1714 * out in specific ways for hardware access.
1716 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
1717 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
1718 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
1719 /* xhci_device_control has eight fields, and also
1720 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
1722 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
1723 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
1724 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
1725 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
1726 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
1727 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
1728 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
1729 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
1732 module_init(xhci_hcd_init);
1734 static void __exit xhci_hcd_cleanup(void)
1737 xhci_unregister_pci();
1740 module_exit(xhci_hcd_cleanup);