aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c146
1 files changed, 91 insertions, 55 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 0ed64eb68e48..5f926bea5ab1 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -546,9 +546,9 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_dequeue_state *state)
{
struct xhci_virt_device *dev = xhci->devs[slot_id];
+ struct xhci_virt_ep *ep = &dev->eps[ep_index];
struct xhci_ring *ep_ring;
struct xhci_generic_trb *trb;
- struct xhci_ep_ctx *ep_ctx;
dma_addr_t addr;
ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
@@ -573,8 +573,16 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Finding endpoint context");
- ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
- state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
+ /* 4.6.9 the css flag is written to the stream context for streams */
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ struct xhci_stream_ctx *ctx =
+ &ep->stream_info->stream_ctx_array[stream_id];
+ state->new_cycle_state = 0x1 & le64_to_cpu(ctx->stream_ring);
+ } else {
+ struct xhci_ep_ctx *ep_ctx
+ = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
+ state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
+ }
state->new_deq_ptr = cur_td->last_trb;
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@@ -892,6 +900,57 @@ remove_finished_td:
/* Return to the event handler with xhci->lock re-acquired */
}
+static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+ struct xhci_td *cur_td;
+
+ while (!list_empty(&ring->td_list)) {
+ cur_td = list_first_entry(&ring->td_list,
+ struct xhci_td, td_list);
+ list_del_init(&cur_td->td_list);
+ if (!list_empty(&cur_td->cancelled_td_list))
+ list_del_init(&cur_td->cancelled_td_list);
+ xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
+ }
+}
+
+static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
+ int slot_id, int ep_index)
+{
+ struct xhci_td *cur_td;
+ struct xhci_virt_ep *ep;
+ struct xhci_ring *ring;
+
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+ if ((ep->ep_state & EP_HAS_STREAMS) ||
+ (ep->ep_state & EP_GETTING_NO_STREAMS)) {
+ int stream_id;
+
+ for (stream_id = 0; stream_id < ep->stream_info->num_streams;
+ stream_id++) {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Killing URBs for slot ID %u, ep index %u, stream %u",
+ slot_id, ep_index, stream_id + 1);
+ xhci_kill_ring_urbs(xhci,
+ ep->stream_info->stream_rings[stream_id]);
+ }
+ } else {
+ ring = ep->ring;
+ if (!ring)
+ return;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Killing URBs for slot ID %u, ep index %u",
+ slot_id, ep_index);
+ xhci_kill_ring_urbs(xhci, ring);
+ }
+ while (!list_empty(&ep->cancelled_td_list)) {
+ cur_td = list_first_entry(&ep->cancelled_td_list,
+ struct xhci_td, cancelled_td_list);
+ list_del_init(&cur_td->cancelled_td_list);
+ xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
+ }
+}
+
/* Watchdog timer function for when a stop endpoint command fails to complete.
* In this case, we assume the host controller is broken or dying or dead. The
* host may still be completing some other events, so we have to be careful to
@@ -915,9 +974,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
{
struct xhci_hcd *xhci;
struct xhci_virt_ep *ep;
- struct xhci_virt_ep *temp_ep;
- struct xhci_ring *ring;
- struct xhci_td *cur_td;
int ret, i, j;
unsigned long flags;
@@ -974,34 +1030,8 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
for (i = 0; i < MAX_HC_SLOTS; i++) {
if (!xhci->devs[i])
continue;
- for (j = 0; j < 31; j++) {
- temp_ep = &xhci->devs[i]->eps[j];
- ring = temp_ep->ring;
- if (!ring)
- continue;
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Killing URBs for slot ID %u, "
- "ep index %u", i, j);
- while (!list_empty(&ring->td_list)) {
- cur_td = list_first_entry(&ring->td_list,
- struct xhci_td,
- td_list);
- list_del_init(&cur_td->td_list);
- if (!list_empty(&cur_td->cancelled_td_list))
- list_del_init(&cur_td->cancelled_td_list);
- xhci_giveback_urb_in_irq(xhci, cur_td,
- -ESHUTDOWN);
- }
- while (!list_empty(&temp_ep->cancelled_td_list)) {
- cur_td = list_first_entry(
- &temp_ep->cancelled_td_list,
- struct xhci_td,
- cancelled_td_list);
- list_del_init(&cur_td->cancelled_td_list);
- xhci_giveback_urb_in_irq(xhci, cur_td,
- -ESHUTDOWN);
- }
- }
+ for (j = 0; j < 31; j++)
+ xhci_kill_endpoint_urbs(xhci, i, j);
}
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@@ -1073,17 +1103,18 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
unsigned int stream_id;
struct xhci_ring *ep_ring;
struct xhci_virt_device *dev;
+ struct xhci_virt_ep *ep;
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
dev = xhci->devs[slot_id];
+ ep = &dev->eps[ep_index];
ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
if (!ep_ring) {
- xhci_warn(xhci, "WARN Set TR deq ptr command for "
- "freed stream ID %u\n",
+ xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
stream_id);
/* XXX: Harmless??? */
dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
@@ -1099,12 +1130,10 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
switch (cmd_comp_code) {
case COMP_TRB_ERR:
- xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
- "of stream ID configuration\n");
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
break;
case COMP_CTX_STATE:
- xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
- "to incorrect slot or ep state.\n");
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
ep_state = le32_to_cpu(ep_ctx->ep_info);
ep_state &= EP_STATE_MASK;
slot_state = le32_to_cpu(slot_ctx->dev_state);
@@ -1114,13 +1143,12 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
slot_state, ep_state);
break;
case COMP_EBADSLT:
- xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
- "slot %u was not enabled.\n", slot_id);
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
+ slot_id);
break;
default:
- xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
- "completion code of %u.\n",
- cmd_comp_code);
+ xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
+ cmd_comp_code);
break;
}
/* OK what do we do now? The endpoint state is hosed, and we
@@ -1130,23 +1158,28 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
* cancelling URBs, which might not be an error...
*/
} else {
+ u64 deq;
+ /* 4.6.10 deq ptr is written to the stream ctx for streams */
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ struct xhci_stream_ctx *ctx =
+ &ep->stream_info->stream_ctx_array[stream_id];
+ deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
+ } else {
+ deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
+ }
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Successful Set TR Deq Ptr cmd, deq = @%08llx",
- le64_to_cpu(ep_ctx->deq));
- if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
- dev->eps[ep_index].queued_deq_ptr) ==
- (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
+ "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
+ if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
+ ep->queued_deq_ptr) == deq) {
/* Update the ring's dequeue segment and dequeue pointer
* to reflect the new position.
*/
update_ring_for_set_deq_completion(xhci, dev,
ep_ring, ep_index);
} else {
- xhci_warn(xhci, "Mismatch between completed Set TR Deq "
- "Ptr command & xHCI internal state.\n");
+ xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
- dev->eps[ep_index].queued_deq_seg,
- dev->eps[ep_index].queued_deq_ptr);
+ ep->queued_deq_seg, ep->queued_deq_ptr);
}
}
@@ -4070,6 +4103,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
+ u32 trb_sct = 0;
u32 type = TRB_TYPE(TRB_SET_DEQ);
struct xhci_virt_ep *ep;
@@ -4088,7 +4122,9 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
}
ep->queued_deq_seg = deq_seg;
ep->queued_deq_ptr = deq_ptr;
- return queue_command(xhci, lower_32_bits(addr) | cycle_state,
+ if (stream_id)
+ trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
+ return queue_command(xhci, lower_32_bits(addr) | trb_sct | cycle_state,
upper_32_bits(addr), trb_stream_id,
trb_slot_id | trb_ep_index | type, false);
}