From 9483b48fd7167358d76d86d48790df4301ec7b43 Mon Sep 17 00:00:00 2001 From: Mathias Nyman Date: Tue, 7 Jan 2020 16:12:17 +0200 Subject: [PATCH 2/2] xhci: fix halted endpoint at stop endpoint command completion xhci 4.6.9: A Busy endpoint may asynchronously transition from the Running to the Halted or Error state due to error conditions detected while processing TRBs. A possible race condition may occur if software, thinking an endpoint is in the Running state, issues a Stop Endpoint Command however at the same time the xHC asynchronously transitions the endpoint to the Halted or Error state. In this case, a Context State Error may be generated for the command completion. Software may verify that this case occurred by inspecting the EP State for Halted or Error when a Stop Endpoint Command results in a Context State Error. Fix this case by resetting the halted endpoint after cleaning up the calcelled trbs from the ring. If the TRB we halted on was cancelled then queue a new set TR dequeue pointer command as usually. If it wasn't cancelled then move past it with a set TR dequeue pointer and give it back with -EPIPE status as in a normal halted endpoint case. Signed-off-by: Mathias Nyman --- drivers/usb/host/xhci-ring.c | 56 ++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 5c223e92b8db..ceb3fac3f1c9 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -745,11 +745,13 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, struct xhci_ring *ep_ring; struct xhci_virt_ep *ep; struct xhci_td *cur_td = NULL; + struct xhci_td *halted_td = NULL; struct xhci_td *last_unlinked_td; struct xhci_ep_ctx *ep_ctx; struct xhci_virt_device *vdev; u64 hw_deq; struct xhci_dequeue_state deq_state; + u32 comp_code; if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { if (!xhci->devs[slot_id]) @@ -764,9 +766,19 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, vdev = xhci->devs[slot_id]; ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index); + trace_xhci_handle_cmd_stop_ep(ep_ctx); ep = &xhci->devs[slot_id]->eps[ep_index]; + comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); + + if (comp_code == COMP_CONTEXT_STATE_ERROR) { + /* endpoint is halted and needs to be reset */ + if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED) { + ep->ep_state |= EP_HALTED; + } + } + last_unlinked_td = list_last_entry(&ep->cancelled_td_list, struct xhci_td, cancelled_td_list); @@ -833,16 +845,60 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, xhci_stop_watchdog_timer_in_irq(xhci, ep); + /* + * If stop endpoint command raced with a halting endpoint we need to + * reset the endpoint first. If the TD we halted on isn't cancelled we + * must give it back with -EPIPE status, and move ring dequeue past it. + * If we can't find hw_deq, or the TD we halted on, do a soft reset + */ + /* FIXME, is there a risk EP_HALTED is set from other cases */ + if (ep->ep_state & EP_HALTED) { + enum xhci_ep_reset_type reset_type = EP_SOFT_RESET; + struct xhci_td *td; + + if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { + reset_type = EP_HARD_RESET; + } else if (ep->ep_state & EP_HAS_STREAMS) { + /* soft reset, nothing else */ + } else if (!list_empty(&ep->ring->td_list)) { + hw_deq = xhci_get_hw_deq(xhci, vdev, ep_index, 0); + hw_deq &= ~0xf; + td = list_first_entry(&ep->ring->td_list, + struct xhci_td, td_list); + if (trb_in_td(xhci, td->start_seg, td->first_trb, + td->last_trb, hw_deq, false)) { + halted_td = td; + reset_type = EP_HARD_RESET; + xhci_find_new_dequeue_state(xhci, slot_id, + ep_index, 0, td, + &deq_state); + } + } + xhci_reset_halted_ep(xhci, slot_id, ep_index, reset_type); + /* FIXME xhci_clear_hub_tt_buffer(xhci, td, ep); */ + } + /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { xhci_queue_new_dequeue_state(xhci, slot_id, ep_index, &deq_state); xhci_ring_cmd_db(xhci); + } else if (ep->ep_state & EP_HALTED) { + xhci_ring_cmd_db(xhci); /* for endpoint soft reset command */ } else { /* Otherwise ring the doorbell(s) to restart queued transfers */ ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } + /* If TD we halted on wasn't cancelled give it back with -EPIPE */ + if (halted_td) { + xhci_unmap_td_bounce_buffer(xhci, ep->ring, halted_td); + list_del_init(&halted_td->td_list); + inc_td_cnt(halted_td->urb); + if (last_td_in_urb(halted_td)) + xhci_giveback_urb_in_irq(xhci, halted_td, -EPIPE); + } + /* * Drop the lock and complete the URBs in the cancelled TD list. * New TDs to be cancelled might be added to the end of the list before -- 2.17.1