summaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:40 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:40 +0000
commit8b0a8165cdad0f4133837d753649ef4682e42c3b (patch)
tree5c58f869f31ddb1f7bd6e8bdea269b680b36c5b6 /drivers/usb/host/xhci-ring.c
parentReleasing progress-linux version 6.8.12-1~progress7.99u1. (diff)
downloadlinux-8b0a8165cdad0f4133837d753649ef4682e42c3b.tar.xz
linux-8b0a8165cdad0f4133837d753649ef4682e42c3b.zip
Merging upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c158
1 files changed, 97 insertions, 61 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index b2868217de..48d745e9f9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -113,6 +113,12 @@ static bool last_td_in_urb(struct xhci_td *td)
return urb_priv->num_tds_done == urb_priv->num_tds;
}
+static bool unhandled_event_trb(struct xhci_ring *ring)
+{
+ return ((le32_to_cpu(ring->dequeue->event_cmd.flags) & TRB_CYCLE) ==
+ ring->cycle_state);
+}
+
static void inc_td_cnt(struct urb *urb)
{
struct urb_priv *urb_priv = urb->hcpriv;
@@ -1028,13 +1034,27 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
break;
case TD_DIRTY: /* TD is cached, clear it */
case TD_HALTED:
+ case TD_CLEARING_CACHE_DEFERRED:
+ if (cached_td) {
+ if (cached_td->urb->stream_id != td->urb->stream_id) {
+ /* Multiple streams case, defer move dq */
+ xhci_dbg(xhci,
+ "Move dq deferred: stream %u URB %p\n",
+ td->urb->stream_id, td->urb);
+ td->cancel_status = TD_CLEARING_CACHE_DEFERRED;
+ break;
+ }
+
+ /* Should never happen, but clear the TD if it does */
+ xhci_warn(xhci,
+ "Found multiple active URBs %p and %p in stream %u?\n",
+ td->urb, cached_td->urb,
+ td->urb->stream_id);
+ td_to_noop(xhci, ring, cached_td, false);
+ cached_td->cancel_status = TD_CLEARED;
+ }
+
td->cancel_status = TD_CLEARING_CACHE;
- if (cached_td)
- /* FIXME stream case, several stopped rings */
- xhci_dbg(xhci,
- "Move dq past stream %u URB %p instead of stream %u URB %p\n",
- td->urb->stream_id, td->urb,
- cached_td->urb->stream_id, cached_td->urb);
cached_td = td;
break;
}
@@ -1054,10 +1074,16 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
if (err) {
/* Failed to move past cached td, just set cached TDs to no-op */
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
- if (td->cancel_status != TD_CLEARING_CACHE)
+ /*
+ * Deferred TDs need to have the deq pointer set after the above command
+ * completes, so if that failed we just give up on all of them (and
+ * complain loudly since this could cause issues due to caching).
+ */
+ if (td->cancel_status != TD_CLEARING_CACHE &&
+ td->cancel_status != TD_CLEARING_CACHE_DEFERRED)
continue;
- xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
- td->urb);
+ xhci_warn(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
+ td->urb);
td_to_noop(xhci, ring, td, false);
td->cancel_status = TD_CLEARED;
}
@@ -1154,6 +1180,15 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
break;
ep->ep_state &= ~EP_STOP_CMD_PENDING;
return;
+ case EP_STATE_STOPPED:
+ /*
+ * NEC uPD720200 sometimes sets this state and fails with
+ * Context Error while continuing to process TRBs.
+ * Be conservative and trust EP_CTX_STATE on other chips.
+ */
+ if (!(xhci->quirks & XHCI_NEC_HOST))
+ break;
+ fallthrough;
case EP_STATE_RUNNING:
/* Race, HW handled stop ep cmd before ep was running */
xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n");
@@ -1335,6 +1370,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
struct xhci_td *td, *tmp_td;
+ bool deferred = false;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
@@ -1421,6 +1457,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
__func__, td->urb);
xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
+ } else if (td->cancel_status == TD_CLEARING_CACHE_DEFERRED) {
+ deferred = true;
} else {
xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
__func__, td->urb, td->cancel_status);
@@ -1430,8 +1468,17 @@ cleanup:
ep->ep_state &= ~SET_DEQ_PENDING;
ep->queued_deq_seg = NULL;
ep->queued_deq_ptr = NULL;
- /* Restart any rings with pending URBs */
- ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+
+ if (deferred) {
+ /* We have more streams to clear */
+ xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n",
+ __func__);
+ xhci_invalidate_cancelled_tds(ep);
+ } else {
+ /* Restart any rings with pending URBs */
+ xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__);
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ }
}
static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
@@ -1870,7 +1917,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
u32 port_id;
u32 portsc, cmd_reg;
int max_ports;
- int slot_id;
unsigned int hcd_portnum;
struct xhci_bus_state *bus_state;
bool bogus_port_status = false;
@@ -1922,9 +1968,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
if (hcd->speed >= HCD_USB3 &&
(portsc & PORT_PLS_MASK) == XDEV_INACTIVE) {
- slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
- if (slot_id && xhci->devs[slot_id])
- xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR;
+ if (port->slot_id && xhci->devs[port->slot_id])
+ xhci->devs[port->slot_id]->flags |= VDEV_PORT_ERROR;
}
if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
@@ -1982,9 +2027,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
* so the roothub behavior is consistent with external
* USB 3.0 hub behavior.
*/
- slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
- if (slot_id && xhci->devs[slot_id])
- xhci_ring_device(xhci, slot_id);
+ if (port->slot_id && xhci->devs[port->slot_id])
+ xhci_ring_device(xhci, port->slot_id);
if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
usb_wakeup_notification(hcd->self.root_hub,
@@ -2387,8 +2431,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
break;
if (remaining) {
frame->status = short_framestatus;
- if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
- sum_trbs_for_length = true;
+ sum_trbs_for_length = true;
break;
}
frame->status = 0;
@@ -2523,9 +2566,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
goto finish_td;
case COMP_STOPPED_LENGTH_INVALID:
/* stopped on ep trb with invalid length, exclude it */
- ep_trb_len = 0;
- remaining = 0;
- break;
+ td->urb->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb);
+ goto finish_td;
case COMP_USB_TRANSACTION_ERROR:
if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
(ep->err_count++ > MAX_SOFT_RETRY) ||
@@ -2638,15 +2680,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* transfer type
*/
case COMP_SUCCESS:
- if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
- break;
- if (xhci->quirks & XHCI_TRUST_TX_LENGTH ||
- ep_ring->last_td_was_short)
+ if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
trb_comp_code = COMP_SHORT_PACKET;
- else
- xhci_warn_ratelimited(xhci,
- "WARN Successful completion on short TX for slot %u ep %u: needs XHCI_TRUST_TX_LENGTH quirk?\n",
- slot_id, ep_index);
+ xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td short %d\n",
+ slot_id, ep_index, ep_ring->last_td_was_short);
+ }
break;
case COMP_SHORT_PACKET:
break;
@@ -2816,7 +2854,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
td_num--;
/* Is this a TRB in the currently executing TD? */
- ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
+ ep_seg = trb_in_td(xhci, td->start_seg, td->first_trb,
td->last_trb, ep_trb_dma, false);
/*
@@ -2884,9 +2922,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
"part of current TD ep_index %d "
"comp_code %u\n", ep_index,
trb_comp_code);
- trb_in_td(xhci, ep_ring->deq_seg,
- ep_ring->dequeue, td->last_trb,
- ep_trb_dma, true);
+ trb_in_td(xhci, td->start_seg, td->first_trb,
+ td->last_trb, ep_trb_dma, true);
return -ESHUTDOWN;
}
}
@@ -2962,32 +2999,18 @@ err_out:
}
/*
- * This function handles all OS-owned events on the event ring. It may drop
+ * This function handles one OS-owned event on the event ring. It may drop
* xhci->lock between event processing (e.g. to pass up port status changes).
- * Returns >0 for "possibly more events to process" (caller should call again),
- * otherwise 0 if done. In future, <0 returns should indicate error code.
*/
-static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
+static int xhci_handle_event_trb(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
+ union xhci_trb *event)
{
- union xhci_trb *event;
u32 trb_type;
- /* Event ring hasn't been allocated yet. */
- if (!ir || !ir->event_ring || !ir->event_ring->dequeue) {
- xhci_err(xhci, "ERROR interrupter not ready\n");
- return -ENOMEM;
- }
-
- event = ir->event_ring->dequeue;
- /* Does the HC or OS own the TRB? */
- if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
- ir->event_ring->cycle_state)
- return 0;
-
trace_xhci_handle_event(ir->event_ring, &event->generic);
/*
- * Barrier between reading the TRB_CYCLE (valid) flag above and any
+ * Barrier between reading the TRB_CYCLE (valid) flag before, and any
* speculative reads of the event's flags/data below.
*/
rmb();
@@ -3017,15 +3040,11 @@ static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
* to make sure a watchdog timer didn't mark the host as non-responsive.
*/
if (xhci->xhc_state & XHCI_STATE_DYING) {
- xhci_dbg(xhci, "xHCI host dying, returning from "
- "event handler.\n");
- return 0;
+ xhci_dbg(xhci, "xHCI host dying, returning from event handler.\n");
+ return -ENODEV;
}
- /* Are there more items on the event ring? Caller will call us again to
- * check.
- */
- return 1;
+ return 0;
}
/*
@@ -3075,13 +3094,24 @@ static void xhci_clear_interrupt_pending(struct xhci_hcd *xhci,
}
}
+/*
+ * Handle all OS-owned events on an interrupter event ring. It may drop
+ * and reaquire xhci->lock between event processing.
+ */
static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
{
int event_loop = 0;
+ int err;
u64 temp;
xhci_clear_interrupt_pending(xhci, ir);
+ /* Event ring hasn't been allocated yet. */
+ if (!ir->event_ring || !ir->event_ring->dequeue) {
+ xhci_err(xhci, "ERROR interrupter event ring not ready\n");
+ return -ENOMEM;
+ }
+
if (xhci->xhc_state & XHCI_STATE_DYING ||
xhci->xhc_state & XHCI_STATE_HALTED) {
xhci_dbg(xhci, "xHCI dying, ignoring interrupt. Shouldn't IRQs be disabled?\n");
@@ -3092,7 +3122,10 @@ static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir
return -ENODEV;
}
- while (xhci_handle_event(xhci, ir) > 0) {
+ /* Process all OS owned event TRBs on this event ring */
+ while (unhandled_event_trb(ir->event_ring)) {
+ err = xhci_handle_event_trb(xhci, ir, ir->event_ring->dequeue);
+
/*
* If half a segment of events have been handled in one go then
* update ERDP, and force isoc trbs to interrupt more often
@@ -3108,6 +3141,9 @@ static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir
/* Update SW event ring dequeue pointer */
inc_deq(xhci, ir->event_ring);
+
+ if (err)
+ break;
}
xhci_update_erst_dequeue(xhci, ir, true);