} else {
/* Clear our internal halted state */
xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
+
+ /* ring doorbell for the endpoint under soft-retry */
+ if (TRB_TSP & le32_to_cpu(trb->generic.field[3])) {
+ xhci_dbg(xhci, "Ring doorbell for slot_id %d ep_index 0x%x\n",
+ slot_id, ep_index);
+ xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
+ }
}
}
return finish_td(xhci, td, event_trb, event, ep, status, false);
}
+static void xhci_endpoint_soft_retry(struct xhci_hcd *xhci, unsigned slot_id,
+ unsigned dci, bool on)
+{
+ struct xhci_virt_device *xdev = xhci->devs[slot_id];
+ struct usb_host_endpoint *ep;
+
+ if (!xhci->shared_hcd || !xhci->shared_hcd->driver ||
+ !xhci->shared_hcd->driver->endpoint_soft_retry)
+ return;
+
+ if (xdev->udev->speed != USB_SPEED_SUPER)
+ return;
+
+ if (dci & 0x1)
+ ep = xdev->udev->ep_in[(dci - 1)/2];
+ else
+ ep = xdev->udev->ep_out[dci/2];
+
+ if (!ep)
+ return;
+
+ xhci->shared_hcd->driver->endpoint_soft_retry(xhci->shared_hcd, ep, on);
+}
+
/*
* If this function returns an error condition, it means it got a Transfer
* event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
int ret = 0;
int td_num = 0;
bool handling_skipped_tds = false;
+ bool disable_u0_ts1_detect;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
xdev = xhci->devs[slot_id];
return -ENODEV;
}
+ disable_u0_ts1_detect =
+ xhci->shared_hcd->driver->is_u0_ts1_detect_disabled(
+ xhci->shared_hcd);
+
/* Endpoint ID is 1 based, our index is zero based */
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
ep = &xdev->eps[ep_index];
* transfer type
*/
case COMP_SUCCESS:
- if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
- break;
+ if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
+ if (disable_u0_ts1_detect)
+ goto check_soft_try;
+ else
+ break;
+ }
if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
trb_comp_code = COMP_SHORT_TX;
else
xhci_warn_ratelimited(xhci,
"WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
case COMP_SHORT_TX:
+check_soft_try:
+ if (disable_u0_ts1_detect && ep_ring->soft_try) {
+ xhci_dbg(xhci, "soft retry completed successfully\n");
+ ep_ring->soft_try = false;
+ xhci_endpoint_soft_retry(xhci,
+ slot_id, ep_index + 1, false);
+ }
break;
case COMP_STOP:
xhci_dbg(xhci, "Stopped on Transfer TRB\n");
status = -EILSEQ;
break;
case COMP_SPLIT_ERR:
+ xhci_dbg(xhci, "Transfer error on endpoint\n");
+ status = -EPROTO;
+ break;
case COMP_TX_ERR:
+ if (disable_u0_ts1_detect &&
+ xdev->udev->speed == USB_SPEED_SUPER &&
+ ep_ring->type != TYPE_ISOC) {
+ if (!ep_ring->soft_try) {
+ xhci_dbg(xhci, "SuperSpeed transfer error, do soft retry\n");
+ ret = xhci_queue_soft_retry(xhci,
+ slot_id, ep_index);
+ if (!ret) {
+ xhci_endpoint_soft_retry(xhci,
+ slot_id, ep_index + 1, true);
+ xhci_ring_cmd_db(xhci);
+ ep_ring->soft_try = true;
+ goto cleanup;
+ }
+ } else {
+ xhci_dbg(xhci, "soft retry complete but transfer still failed\n");
+ ep_ring->soft_try = false;
+ }
+ xhci_endpoint_soft_retry(xhci,
+ slot_id, ep_index + 1, false);
+ }
xhci_dbg(xhci, "Transfer error on endpoint\n");
status = -EPROTO;
break;
return queue_command(xhci, cmd, 0, 0, 0,
trb_slot_id | trb_ep_index | type, false);
}
+
+int xhci_queue_soft_retry(struct xhci_hcd *xhci, int slot_id,
+ unsigned int ep_index)
+{
+ struct xhci_command *command;
+ u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
+ u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
+ u32 type = TRB_TYPE(TRB_RESET_EP) | TRB_TSP;
+
+ command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
+ if (!command)
+ return -EINVAL;
+
+ return queue_command(xhci, command, 0, 0, 0,
+ trb_slot_id | trb_ep_index | type, false);
+}
* xHCI host controller driver
*
* Copyright (C) 2008 Intel Corp.
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
#define XDEV_U0 (0x0 << 5)
#define XDEV_U2 (0x2 << 5)
#define XDEV_U3 (0x3 << 5)
+#define XDEV_RXDETECT (0x5 << 5)
#define XDEV_INACTIVE (0x6 << 5)
#define XDEV_RESUME (0xf << 5)
/* true: port has power (see HCC_PPC) */
/* Block Event Interrupt */
#define TRB_BEI (1<<9)
+/* Transfer State Preserve */
+#define TRB_TSP (1<<9)
/* Control transfer TRB specific fields */
#define TRB_DIR_IN (1<<16)
enum xhci_ring_type type;
bool last_td_was_short;
struct radix_tree_root *trb_address_map;
+ bool soft_try;
};
struct xhci_erst_entry {
dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed);
int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
int slot_id, unsigned int ep_index);
+int xhci_queue_soft_retry(struct xhci_hcd *xhci, int slot_id,
+ unsigned int ep_index);
int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
u32 slot_id);
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,