aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c220
1 files changed, 129 insertions, 91 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 74bf5c60a260..c50c902d009e 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -323,7 +323,7 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
if (i_cmd->status != COMP_COMMAND_ABORTED)
continue;
- i_cmd->status = COMP_STOPPED;
+ i_cmd->status = COMP_COMMAND_RING_STOPPED;
xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
i_cmd->command_trb);
@@ -480,10 +480,34 @@ struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
return NULL;
}
+
+/*
+ * Get the hw dequeue pointer xHC stopped on, either directly from the
+ * endpoint context, or if streams are in use from the stream context.
+ * The returned hw_dequeue contains the lowest four bits with cycle state
+ * and possbile stream context type.
+ */
+static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev,
+ unsigned int ep_index, unsigned int stream_id)
+{
+ struct xhci_ep_ctx *ep_ctx;
+ struct xhci_stream_ctx *st_ctx;
+ struct xhci_virt_ep *ep;
+
+ ep = &vdev->eps[ep_index];
+
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ st_ctx = &ep->stream_info->stream_ctx_array[stream_id];
+ return le64_to_cpu(st_ctx->stream_ring);
+ }
+ ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
+ return le64_to_cpu(ep_ctx->deq);
+}
+
/*
* Move the xHC's endpoint ring dequeue pointer past cur_td.
* Record the new state of the xHC's endpoint ring dequeue segment,
- * dequeue pointer, and new consumer cycle state in state.
+ * dequeue pointer, stream id, and new consumer cycle state in state.
* Update our internal representation of the ring's dequeue pointer.
*
* We do this in three jumps:
@@ -521,24 +545,15 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
stream_id);
return;
}
-
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Finding endpoint context");
- /* 4.6.9 the css flag is written to the stream context for streams */
- if (ep->ep_state & EP_HAS_STREAMS) {
- struct xhci_stream_ctx *ctx =
- &ep->stream_info->stream_ctx_array[stream_id];
- hw_dequeue = le64_to_cpu(ctx->stream_ring);
- } else {
- struct xhci_ep_ctx *ep_ctx
- = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
- hw_dequeue = le64_to_cpu(ep_ctx->deq);
- }
+ hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
new_seg = ep_ring->deq_seg;
new_deq = ep_ring->dequeue;
state->new_cycle_state = hw_dequeue & 0x1;
+ state->stream_id = stream_id;
/*
* We want to find the pointer, segment and cycle state of the new trb
@@ -641,8 +656,8 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
xhci_urb_free_priv(urb_priv);
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock(&xhci->lock);
- usb_hcd_giveback_urb(hcd, urb, status);
trace_xhci_urb_giveback(urb);
+ usb_hcd_giveback_urb(hcd, urb, status);
spin_lock(&xhci->lock);
}
@@ -691,7 +706,7 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
struct xhci_td *last_unlinked_td;
struct xhci_ep_ctx *ep_ctx;
struct xhci_virt_device *vdev;
-
+ u64 hw_deq;
struct xhci_dequeue_state deq_state;
if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
@@ -715,7 +730,6 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
if (list_empty(&ep->cancelled_td_list)) {
xhci_stop_watchdog_timer_in_irq(xhci, ep);
- ep->stopped_td = NULL;
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
return;
}
@@ -753,12 +767,19 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
* If we stopped on the TD we need to cancel, then we have to
* move the xHC endpoint ring dequeue pointer past this TD.
*/
- if (cur_td == ep->stopped_td)
+ hw_deq = xhci_get_hw_deq(xhci, vdev, ep_index,
+ cur_td->urb->stream_id);
+ hw_deq &= ~0xf;
+
+ if (trb_in_td(xhci, cur_td->start_seg, cur_td->first_trb,
+ cur_td->last_trb, hw_deq, false)) {
xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
- cur_td->urb->stream_id,
- cur_td, &deq_state);
- else
+ cur_td->urb->stream_id,
+ cur_td, &deq_state);
+ } else {
td_to_noop(xhci, ep_ring, cur_td, false);
+ }
+
remove_finished_td:
/*
* The event handler won't see a completion for this TD anymore,
@@ -773,15 +794,13 @@ remove_finished_td:
/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
- ep->stopped_td->urb->stream_id, &deq_state);
+ &deq_state);
xhci_ring_cmd_db(xhci);
} else {
/* Otherwise ring the doorbell(s) to restart queued transfers */
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
- ep->stopped_td = NULL;
-
/*
* Drop the lock and complete the URBs in the cancelled TD list.
* New TDs to be cancelled might be added to the end of the list before
@@ -1380,7 +1399,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
/* If CMD ring stopped we own the trbs between enqueue and dequeue */
- if (cmd_comp_code == COMP_STOPPED) {
+ if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) {
complete_all(&xhci->cmd_ring_stop_completion);
return;
}
@@ -1436,8 +1455,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
break;
case TRB_CMD_NOOP:
/* Is this an aborted command turned to NO-OP? */
- if (cmd->status == COMP_STOPPED)
- cmd_comp_code = COMP_STOPPED;
+ if (cmd->status == COMP_COMMAND_RING_STOPPED)
+ cmd_comp_code = COMP_COMMAND_RING_STOPPED;
break;
case TRB_RESET_EP:
WARN_ON(slot_id != TRB_TO_SLOT_ID(
@@ -1800,7 +1819,8 @@ struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id,
- struct xhci_td *td, union xhci_trb *ep_trb)
+ struct xhci_td *td, union xhci_trb *ep_trb,
+ enum xhci_ep_reset_type reset_type)
{
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
struct xhci_command *command;
@@ -1809,12 +1829,11 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
return;
ep->ep_state |= EP_HALTED;
- ep->stopped_stream = stream_id;
- xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
- xhci_cleanup_stalled_ring(xhci, ep_index, td);
+ xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
- ep->stopped_stream = 0;
+ if (reset_type == EP_HARD_RESET)
+ xhci_cleanup_stalled_ring(xhci, ep_index, stream_id, td);
xhci_ring_cmd_db(xhci);
}
@@ -1909,7 +1928,7 @@ static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
union xhci_trb *ep_trb, struct xhci_transfer_event *event,
- struct xhci_virt_ep *ep, int *status, bool skip)
+ struct xhci_virt_ep *ep, int *status)
{
struct xhci_virt_device *xdev;
struct xhci_ep_ctx *ep_ctx;
@@ -1925,9 +1944,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
- if (skip)
- goto td_cleanup;
-
if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
trb_comp_code == COMP_STOPPED ||
trb_comp_code == COMP_STOPPED_SHORT_PACKET) {
@@ -1935,7 +1951,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
* stopped TDs. A stopped TD may be restarted, so don't update
* the ring dequeue pointer or take this TD off any lists yet.
*/
- ep->stopped_td = td;
return 0;
}
if (trb_comp_code == COMP_STALL_ERROR ||
@@ -1947,7 +1962,8 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
* The class driver clears the device side halt later.
*/
xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
- ep_ring->stream_id, td, ep_trb);
+ ep_ring->stream_id, td, ep_trb,
+ EP_HARD_RESET);
} else {
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
@@ -1955,7 +1971,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
inc_deq(xhci, ep_ring);
}
-td_cleanup:
return xhci_td_cleanup(xhci, td, ep_ring, status);
}
@@ -2075,7 +2090,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
td->urb->actual_length = requested;
finish_td:
- return finish_td(xhci, td, ep_trb, event, ep, status, false);
+ return finish_td(xhci, td, ep_trb, event, ep, status);
}
/*
@@ -2162,7 +2177,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
td->urb->actual_length += frame->actual_length;
- return finish_td(xhci, td, ep_trb, event, ep, status, false);
+ return finish_td(xhci, td, ep_trb, event, ep, status);
}
static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
@@ -2190,7 +2205,7 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
inc_deq(xhci, ep_ring);
inc_deq(xhci, ep_ring);
- return finish_td(xhci, td, NULL, event, ep, status, true);
+ return xhci_td_cleanup(xhci, td, ep_ring, status);
}
/*
@@ -2252,7 +2267,7 @@ finish_td:
remaining);
td->urb->actual_length = 0;
}
- return finish_td(xhci, td, ep_trb, event, ep, status, false);
+ return finish_td(xhci, td, ep_trb, event, ep, status);
}
/*
@@ -2280,39 +2295,46 @@ static int handle_tx_event(struct xhci_hcd *xhci,
bool handling_skipped_tds = false;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
+ ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
+ trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+ ep_trb_dma = le64_to_cpu(event->buffer);
+
xdev = xhci->devs[slot_id];
if (!xdev) {
xhci_err(xhci, "ERROR Transfer event pointed to bad slot %u\n",
slot_id);
- xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
- (unsigned long long) xhci_trb_virt_to_dma(
- xhci->event_ring->deq_seg,
- xhci->event_ring->dequeue),
- lower_32_bits(le64_to_cpu(event->buffer)),
- upper_32_bits(le64_to_cpu(event->buffer)),
- le32_to_cpu(event->transfer_len),
- le32_to_cpu(event->flags));
- return -ENODEV;
- }
-
- /* Endpoint ID is 1 based, our index is zero based */
- ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
+ goto err_out;
+ }
+
ep = &xdev->eps[ep_index];
- ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
+ ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
- if (!ep_ring || GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
+
+ if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
xhci_err(xhci,
- "ERROR Transfer event for disabled endpoint slot %u ep %u or incorrect stream ring\n",
+ "ERROR Transfer event for disabled endpoint slot %u ep %u\n",
slot_id, ep_index);
- xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
- (unsigned long long) xhci_trb_virt_to_dma(
- xhci->event_ring->deq_seg,
- xhci->event_ring->dequeue),
- lower_32_bits(le64_to_cpu(event->buffer)),
- upper_32_bits(le64_to_cpu(event->buffer)),
- le32_to_cpu(event->transfer_len),
- le32_to_cpu(event->flags));
- return -ENODEV;
+ goto err_out;
+ }
+
+ /* Some transfer events don't always point to a trb, see xhci 4.17.4 */
+ if (!ep_ring) {
+ switch (trb_comp_code) {
+ case COMP_STALL_ERROR:
+ case COMP_USB_TRANSACTION_ERROR:
+ case COMP_INVALID_STREAM_TYPE_ERROR:
+ case COMP_INVALID_STREAM_ID_ERROR:
+ xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, 0,
+ NULL, NULL, EP_SOFT_RESET);
+ goto cleanup;
+ case COMP_RING_UNDERRUN:
+ case COMP_RING_OVERRUN:
+ goto cleanup;
+ default:
+ xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
+ slot_id, ep_index);
+ goto err_out;
+ }
}
/* Count current td numbers if ep->skip is set */
@@ -2321,8 +2343,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
td_num++;
}
- ep_trb_dma = le64_to_cpu(event->buffer);
- trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
/* Look for common error cases */
switch (trb_comp_code) {
/* Skip codes that require special handling depending on
@@ -2339,6 +2359,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
slot_id, ep_index);
case COMP_SHORT_PACKET:
break;
+ /* Completion codes for endpoint stopped state */
case COMP_STOPPED:
xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n",
slot_id, ep_index);
@@ -2353,18 +2374,13 @@ static int handle_tx_event(struct xhci_hcd *xhci,
"Stopped with short packet transfer detected for slot %u ep %u\n",
slot_id, ep_index);
break;
+ /* Completion codes for endpoint halted state */
case COMP_STALL_ERROR:
xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id,
ep_index);
ep->ep_state |= EP_HALTED;
status = -EPIPE;
break;
- case COMP_TRB_ERROR:
- xhci_warn(xhci,
- "WARN: TRB error for slot %u ep %u on endpoint\n",
- slot_id, ep_index);
- status = -EILSEQ;
- break;
case COMP_SPLIT_TRANSACTION_ERROR:
case COMP_USB_TRANSACTION_ERROR:
xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n",
@@ -2376,6 +2392,14 @@ static int handle_tx_event(struct xhci_hcd *xhci,
slot_id, ep_index);
status = -EOVERFLOW;
break;
+ /* Completion codes for endpoint error state */
+ case COMP_TRB_ERROR:
+ xhci_warn(xhci,
+ "WARN: TRB error for slot %u ep %u on endpoint\n",
+ slot_id, ep_index);
+ status = -EILSEQ;
+ break;
+ /* completion codes not indicating endpoint state change */
case COMP_DATA_BUFFER_ERROR:
xhci_warn(xhci,
"WARN: HC couldn't access mem fast enough for slot %u ep %u\n",
@@ -2413,12 +2437,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
ep_index);
goto cleanup;
- case COMP_INCOMPATIBLE_DEVICE_ERROR:
- xhci_warn(xhci,
- "WARN: detect an incompatible device for slot %u ep %u",
- slot_id, ep_index);
- status = -EPROTO;
- break;
case COMP_MISSED_SERVICE_ERROR:
/*
* When encounter missed service error, one or more isoc tds
@@ -2437,6 +2455,14 @@ static int handle_tx_event(struct xhci_hcd *xhci,
"No Ping response error for slot %u ep %u, Skip one Isoc TD\n",
slot_id, ep_index);
goto cleanup;
+
+ case COMP_INCOMPATIBLE_DEVICE_ERROR:
+ /* needs disable slot command to recover */
+ xhci_warn(xhci,
+ "WARN: detect an incompatible device for slot %u ep %u",
+ slot_id, ep_index);
+ status = -EPROTO;
+ break;
default:
if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
status = 0;
@@ -2589,6 +2615,17 @@ cleanup:
} while (handling_skipped_tds);
return 0;
+
+err_out:
+ xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
+ (unsigned long long) xhci_trb_virt_to_dma(
+ xhci->event_ring->deq_seg,
+ xhci->event_ring->dequeue),
+ lower_32_bits(le64_to_cpu(event->buffer)),
+ upper_32_bits(le64_to_cpu(event->buffer)),
+ le32_to_cpu(event->transfer_len),
+ le32_to_cpu(event->flags));
+ return -ENODEV;
}
/*
@@ -2677,11 +2714,12 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
union xhci_trb *event_ring_deq;
irqreturn_t ret = IRQ_NONE;
+ unsigned long flags;
dma_addr_t deq;
u64 temp_64;
u32 status;
- spin_lock(&xhci->lock);
+ spin_lock_irqsave(&xhci->lock, flags);
/* Check if the xHC generated the interrupt, or the irq is shared */
status = readl(&xhci->op_regs->status);
if (status == ~(u32)0) {
@@ -2707,12 +2745,9 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
*/
status |= STS_EINT;
writel(status, &xhci->op_regs->status);
- /* FIXME when MSI-X is supported and there are multiple vectors */
- /* Clear the MSI-X event interrupt status */
- if (hcd->irq) {
+ if (!hcd->msi_enabled) {
u32 irq_pending;
- /* Acknowledge the PCI interrupt */
irq_pending = readl(&xhci->ir_set->irq_pending);
irq_pending |= IMAN_IP;
writel(irq_pending, &xhci->ir_set->irq_pending);
@@ -2757,7 +2792,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
ret = IRQ_HANDLED;
out:
- spin_unlock(&xhci->lock);
+ spin_unlock_irqrestore(&xhci->lock, flags);
return ret;
}
@@ -3965,13 +4000,12 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
/* Set Transfer Ring Dequeue Pointer command */
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
- unsigned int stream_id,
struct xhci_dequeue_state *deq_state)
{
dma_addr_t addr;
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
- u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
+ u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id);
u32 trb_sct = 0;
u32 type = TRB_TYPE(TRB_SET_DEQ);
struct xhci_virt_ep *ep;
@@ -4009,7 +4043,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
ep->queued_deq_seg = deq_state->new_deq_seg;
ep->queued_deq_ptr = deq_state->new_deq_ptr;
- if (stream_id)
+ if (deq_state->stream_id)
trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
ret = queue_command(xhci, cmd,
lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
@@ -4029,12 +4063,16 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
}
int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
- int slot_id, unsigned int ep_index)
+ int slot_id, unsigned int ep_index,
+ enum xhci_ep_reset_type reset_type)
{
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 type = TRB_TYPE(TRB_RESET_EP);
+ if (reset_type == EP_SOFT_RESET)
+ type |= TRB_TSP;
+
return queue_command(xhci, cmd, 0, 0, 0,
trb_slot_id | trb_ep_index | type, false);
}