aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/wusbcore
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/wusbcore')
-rw-r--r--drivers/usb/wusbcore/wa-hc.h15
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c21
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c84
3 files changed, 108 insertions, 12 deletions
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
index d6bea3e0b54a..cf250c21e946 100644
--- a/drivers/usb/wusbcore/wa-hc.h
+++ b/drivers/usb/wusbcore/wa-hc.h
@@ -91,6 +91,7 @@
struct wusbhc;
struct wahc;
extern void wa_urb_enqueue_run(struct work_struct *ws);
+extern void wa_process_errored_transfers_run(struct work_struct *ws);
/**
* RPipe instance
@@ -190,8 +191,14 @@ struct wahc {
struct list_head xfer_list;
struct list_head xfer_delayed_list;
+ struct list_head xfer_errored_list;
+ /*
+ * lock for the above xfer lists. Can be taken while a xfer->lock is
+ * held but not in the reverse order.
+ */
spinlock_t xfer_list_lock;
- struct work_struct xfer_work;
+ struct work_struct xfer_enqueue_work;
+ struct work_struct xfer_error_work;
atomic_t xfer_id_count;
};
@@ -244,8 +251,10 @@ static inline void wa_init(struct wahc *wa)
edc_init(&wa->dti_edc);
INIT_LIST_HEAD(&wa->xfer_list);
INIT_LIST_HEAD(&wa->xfer_delayed_list);
+ INIT_LIST_HEAD(&wa->xfer_errored_list);
spin_lock_init(&wa->xfer_list_lock);
- INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
+ INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
+ INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
atomic_set(&wa->xfer_id_count, 1);
}
@@ -269,6 +278,8 @@ static inline void rpipe_put(struct wa_rpipe *rpipe)
}
extern void rpipe_ep_disable(struct wahc *, struct usb_host_endpoint *);
+extern void rpipe_clear_feature_stalled(struct wahc *,
+ struct usb_host_endpoint *);
extern int wa_rpipes_create(struct wahc *);
extern void wa_rpipes_destroy(struct wahc *);
static inline void rpipe_avail_dec(struct wa_rpipe *rpipe)
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index 9a595c1ed867..fd4f1ce6256a 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -527,3 +527,24 @@ void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep)
mutex_unlock(&wa->rpipe_mutex);
}
EXPORT_SYMBOL_GPL(rpipe_ep_disable);
+
+/* Clear the stalled status of an RPIPE. */
+void rpipe_clear_feature_stalled(struct wahc *wa, struct usb_host_endpoint *ep)
+{
+ struct wa_rpipe *rpipe;
+
+ mutex_lock(&wa->rpipe_mutex);
+ rpipe = ep->hcpriv;
+ if (rpipe != NULL) {
+ u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
+
+ usb_control_msg(
+ wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
+ RPIPE_STALL, index, NULL, 0, 1000);
+ }
+ mutex_unlock(&wa->rpipe_mutex);
+}
+EXPORT_SYMBOL_GPL(rpipe_clear_feature_stalled);
+
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index f5c81afc6e96..d74fe1ae16ac 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -1100,7 +1100,7 @@ error_xfer_submit:
*/
void wa_urb_enqueue_run(struct work_struct *ws)
{
- struct wahc *wa = container_of(ws, struct wahc, xfer_work);
+ struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
struct wa_xfer *xfer, *next;
struct urb *urb;
LIST_HEAD(tmp_list);
@@ -1126,6 +1126,49 @@ void wa_urb_enqueue_run(struct work_struct *ws)
EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
/*
+ * Process the errored transfers on the Wire Adapter outside of interrupt.
+ */
+void wa_process_errored_transfers_run(struct work_struct *ws)
+{
+ struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
+ struct wa_xfer *xfer, *next;
+ LIST_HEAD(tmp_list);
+
+ pr_info("%s: Run delayed STALL processing.\n", __func__);
+
+ /* Create a copy of the wa->xfer_errored_list while holding the lock */
+ spin_lock_irq(&wa->xfer_list_lock);
+ list_cut_position(&tmp_list, &wa->xfer_errored_list,
+ wa->xfer_errored_list.prev);
+ spin_unlock_irq(&wa->xfer_list_lock);
+
+ /*
+ * run rpipe_clear_feature_stalled from temp list without list lock
+ * held.
+ */
+ list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
+ struct usb_host_endpoint *ep;
+ unsigned long flags;
+ struct wa_rpipe *rpipe;
+
+ spin_lock_irqsave(&xfer->lock, flags);
+ ep = xfer->ep;
+ rpipe = ep->hcpriv;
+ spin_unlock_irqrestore(&xfer->lock, flags);
+
+ /* clear RPIPE feature stalled without holding a lock. */
+ rpipe_clear_feature_stalled(wa, ep);
+
+ /* complete the xfer. This removes it from the tmp list. */
+ wa_xfer_completion(xfer);
+
+ /* check for work. */
+ wa_xfer_delayed_run(rpipe);
+ }
+}
+EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
+
+/*
* Submit a transfer to the Wire Adapter in a delayed way
*
* The process of enqueuing involves possible sleeps() [see
@@ -1180,7 +1223,7 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
- queue_work(wusbd, &wa->xfer_work);
+ queue_work(wusbd, &wa->xfer_enqueue_work);
} else {
wa_urb_enqueue_b(xfer);
}
@@ -1222,7 +1265,8 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
xfer = urb->hcpriv;
if (xfer == NULL) {
- /* NOthing setup yet enqueue will see urb->status !=
+ /*
+ * Nothing setup yet enqueue will see urb->status !=
* -EINPROGRESS (by hcd layer) and bail out with
* error, no need to do completion
*/
@@ -1360,7 +1404,7 @@ static int wa_xfer_status_to_errno(u8 status)
*
* inbound transfers: need to schedule a DTI read
*
- * FIXME: this functio needs to be broken up in parts
+ * FIXME: this function needs to be broken up in parts
*/
static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
{
@@ -1482,17 +1526,37 @@ error_submit_buf_in:
seg->result = result;
kfree(wa->buf_in_urb->sg);
error_sg_alloc:
+ __wa_xfer_abort(xfer);
error_complete:
seg->status = WA_SEG_ERROR;
xfer->segs_done++;
rpipe_ready = rpipe_avail_inc(rpipe);
- __wa_xfer_abort(xfer);
done = __wa_xfer_is_done(xfer);
- spin_unlock_irqrestore(&xfer->lock, flags);
- if (done)
- wa_xfer_completion(xfer);
- if (rpipe_ready)
- wa_xfer_delayed_run(rpipe);
+ /*
+ * queue work item to clear STALL for control endpoints.
+ * Otherwise, let endpoint_reset take care of it.
+ */
+ if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
+ usb_endpoint_xfer_control(&xfer->ep->desc) &&
+ done) {
+
+ dev_info(dev, "Control EP stall. Queue delayed work.\n");
+ spin_lock_irq(&wa->xfer_list_lock);
+ /* remove xfer from xfer_list. */
+ list_del(&xfer->list_node);
+ /* add xfer to xfer_errored_list. */
+ list_add_tail(&xfer->list_node, &wa->xfer_errored_list);
+ spin_unlock_irq(&wa->xfer_list_lock);
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ queue_work(wusbd, &wa->xfer_error_work);
+ } else {
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ }
+
return;
error_bad_seg: