aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/usb/musb/musb_host.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/musb/musb_host.c')
-rw-r--r--drivers/usb/musb/musb_host.c536
1 files changed, 325 insertions, 211 deletions
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index c3d5fc9dfb5b..26c65e66cc0f 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -181,7 +181,7 @@ static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
/* NOTE: no locks here; caller should lock and select EP */
txcsr = musb_readw(ep->regs, MUSB_TXCSR);
txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
- if (is_cppi_enabled())
+ if (is_cppi_enabled(ep->musb))
txcsr |= MUSB_TXCSR_DMAMODE;
musb_writew(ep->regs, MUSB_TXCSR, txcsr);
}
@@ -294,7 +294,7 @@ start:
if (!hw_ep->tx_channel)
musb_h_tx_start(hw_ep);
- else if (is_cppi_enabled() || tusb_dma_omap())
+ else if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
musb_h_tx_dma_start(hw_ep);
}
}
@@ -555,8 +555,9 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
* the busy/not-empty tests are basically paranoia.
*/
static void
-musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
+musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
{
+ struct musb_hw_ep *ep = musb->endpoints + epnum;
u16 csr;
/* NOTE: we know the "rx" fifo reinit never triggers for ep0.
@@ -594,10 +595,9 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
/* target addr and (for multipoint) hub addr/port */
if (musb->is_multipoint) {
- musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
- musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
- musb_write_rxhubport(ep->target_regs, qh->h_port_reg);
-
+ musb_write_rxfunaddr(musb, epnum, qh->addr_reg);
+ musb_write_rxhubaddr(musb, epnum, qh->h_addr_reg);
+ musb_write_rxhubport(musb, epnum, qh->h_port_reg);
} else
musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
@@ -617,23 +617,22 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
ep->rx_reinit = 0;
}
-static bool musb_tx_dma_program(struct dma_controller *dma,
+static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
struct musb_hw_ep *hw_ep, struct musb_qh *qh,
- struct urb *urb, u32 offset, u32 length)
+ struct urb *urb, u32 offset,
+ u32 *length, u8 *mode)
{
struct dma_channel *channel = hw_ep->tx_channel;
void __iomem *epio = hw_ep->regs;
u16 pkt_size = qh->maxpacket;
u16 csr;
- u8 mode;
-#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
- if (length > channel->max_len)
- length = channel->max_len;
+ if (*length > channel->max_len)
+ *length = channel->max_len;
csr = musb_readw(epio, MUSB_TXCSR);
- if (length > pkt_size) {
- mode = 1;
+ if (*length > pkt_size) {
+ *mode = 1;
csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
/* autoset shouldn't be set in high bandwidth */
/*
@@ -649,15 +648,28 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
can_bulk_split(hw_ep->musb, qh->type)))
csr |= MUSB_TXCSR_AUTOSET;
} else {
- mode = 0;
+ *mode = 0;
csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
}
channel->desired_mode = mode;
musb_writew(epio, MUSB_TXCSR, csr);
-#else
- if (!is_cppi_enabled() && !tusb_dma_omap())
- return false;
+
+ return 0;
+}
+
+static int musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
+ struct musb_hw_ep *hw_ep,
+ struct musb_qh *qh,
+ struct urb *urb,
+ u32 offset,
+ u32 *length,
+ u8 *mode)
+{
+ struct dma_channel *channel = hw_ep->tx_channel;
+
+ if (!is_cppi_enabled(hw_ep->musb) && !tusb_dma_omap(hw_ep->musb))
+ return -ENODEV;
channel->actual_len = 0;
@@ -665,8 +677,28 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
* TX uses "RNDIS" mode automatically but needs help
* to identify the zero-length-final-packet case.
*/
- mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
-#endif
+ *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
+
+ return 0;
+}
+
+static bool musb_tx_dma_program(struct dma_controller *dma,
+ struct musb_hw_ep *hw_ep, struct musb_qh *qh,
+ struct urb *urb, u32 offset, u32 length)
+{
+ struct dma_channel *channel = hw_ep->tx_channel;
+ u16 pkt_size = qh->maxpacket;
+ u8 mode;
+ int res;
+
+ if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
+ res = musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb,
+ offset, &length, &mode);
+ else
+ res = musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb,
+ offset, &length, &mode);
+ if (res)
+ return false;
qh->segsize = length;
@@ -678,6 +710,9 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
if (!dma->channel_program(channel, pkt_size, mode,
urb->transfer_dma + offset, length)) {
+ void __iomem *epio = hw_ep->regs;
+ u16 csr;
+
dma->channel_release(channel);
hw_ep->tx_channel = NULL;
@@ -801,9 +836,9 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
/* target addr and (for multipoint) hub addr/port */
if (musb->is_multipoint) {
- musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
- musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
- musb_write_txhubport(mbase, epnum, qh->h_port_reg);
+ musb_write_txfunaddr(musb, epnum, qh->addr_reg);
+ musb_write_txhubaddr(musb, epnum, qh->h_addr_reg);
+ musb_write_txhubport(musb, epnum, qh->h_port_reg);
/* FIXME if !epnum, do the same for RX ... */
} else
musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
@@ -875,7 +910,7 @@ finish:
u16 csr;
if (hw_ep->rx_reinit) {
- musb_rx_reinit(musb, qh, hw_ep);
+ musb_rx_reinit(musb, qh, epnum);
/* init new state: toggle and NYET, maybe DMA later */
if (usb_gettoggle(urb->dev, qh->epnum, 0))
@@ -901,7 +936,7 @@ finish:
/* kick things off */
- if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
+ if ((is_cppi_enabled(musb) || tusb_dma_omap(musb)) && dma_channel) {
/* Candidate for DMA */
dma_channel->actual_len = 0L;
qh->segsize = len;
@@ -1441,7 +1476,7 @@ done:
} else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
offset, length)) {
- if (is_cppi_enabled() || tusb_dma_omap())
+ if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
musb_h_tx_dma_start(hw_ep);
return;
}
@@ -1498,9 +1533,47 @@ done:
MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
}
+#ifdef CONFIG_USB_TI_CPPI41_DMA
+/* Seems to set up ISO for cppi41 and not advance len. See commit c57c41d */
+static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
+ struct musb_hw_ep *hw_ep,
+ struct musb_qh *qh,
+ struct urb *urb,
+ size_t len)
+{
+ struct dma_channel *channel = hw_ep->tx_channel;
+ void __iomem *epio = hw_ep->regs;
+ dma_addr_t *buf;
+ u32 length, res;
+ u16 val;
-#ifdef CONFIG_USB_INVENTRA_DMA
+ buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset +
+ (u32)urb->transfer_dma;
+
+ length = urb->iso_frame_desc[qh->iso_idx].length;
+ val = musb_readw(epio, MUSB_RXCSR);
+ val |= MUSB_RXCSR_DMAENAB;
+ musb_writew(hw_ep->regs, MUSB_RXCSR, val);
+
+ res = dma->channel_program(channel, qh->maxpacket, 0,
+ (u32)buf, length);
+
+ return res;
+}
+#else
+static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
+ struct musb_hw_ep *hw_ep,
+ struct musb_qh *qh,
+ struct urb *urb,
+ size_t len)
+{
+ return false;
+}
+#endif
+
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
+ defined(CONFIG_USB_TI_CPPI41_DMA)
/* Host side RX (IN) using Mentor DMA works as follows:
submit_urb ->
- if queue was empty, ProgramEndpoint
@@ -1535,7 +1608,194 @@ done:
* thus be a great candidate for using mode 1 ... for all but the
* last packet of one URB's transfer.
*/
+static int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
+ struct musb_hw_ep *hw_ep,
+ struct musb_qh *qh,
+ struct urb *urb,
+ size_t len)
+{
+ struct dma_channel *channel = hw_ep->rx_channel;
+ void __iomem *epio = hw_ep->regs;
+ u16 val;
+ int pipe;
+ bool done;
+
+ pipe = urb->pipe;
+
+ if (usb_pipeisoc(pipe)) {
+ struct usb_iso_packet_descriptor *d;
+
+ d = urb->iso_frame_desc + qh->iso_idx;
+ d->actual_length = len;
+
+ /* even if there was an error, we did the dma
+ * for iso_frame_desc->length
+ */
+ if (d->status != -EILSEQ && d->status != -EOVERFLOW)
+ d->status = 0;
+
+ if (++qh->iso_idx >= urb->number_of_packets) {
+ done = true;
+ } else {
+ /* REVISIT: Why ignore return value here? */
+ if (musb_dma_cppi41(hw_ep->musb))
+ done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh,
+ urb, len);
+ done = false;
+ }
+
+ } else {
+ /* done if urb buffer is full or short packet is recd */
+ done = (urb->actual_length + len >=
+ urb->transfer_buffer_length
+ || channel->actual_len < qh->maxpacket
+ || channel->rx_packet_done);
+ }
+
+ /* send IN token for next packet, without AUTOREQ */
+ if (!done) {
+ val = musb_readw(epio, MUSB_RXCSR);
+ val |= MUSB_RXCSR_H_REQPKT;
+ musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
+ }
+
+ return done;
+}
+
+/* Disadvantage of using mode 1:
+ * It's basically usable only for mass storage class; essentially all
+ * other protocols also terminate transfers on short packets.
+ *
+ * Details:
+ * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
+ * If you try to use mode 1 for (transfer_buffer_length - 512), and try
+ * to use the extra IN token to grab the last packet using mode 0, then
+ * the problem is that you cannot be sure when the device will send the
+ * last packet and RxPktRdy set. Sometimes the packet is recd too soon
+ * such that it gets lost when RxCSR is re-set at the end of the mode 1
+ * transfer, while sometimes it is recd just a little late so that if you
+ * try to configure for mode 0 soon after the mode 1 transfer is
+ * completed, you will find rxcount 0. Okay, so you might think why not
+ * wait for an interrupt when the pkt is recd. Well, you won't get any!
+ */
+static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
+ struct musb_hw_ep *hw_ep,
+ struct musb_qh *qh,
+ struct urb *urb,
+ size_t len,
+ u8 iso_err)
+{
+ struct musb *musb = hw_ep->musb;
+ void __iomem *epio = hw_ep->regs;
+ struct dma_channel *channel = hw_ep->rx_channel;
+ u16 rx_count, val;
+ int length, pipe, done;
+ dma_addr_t buf;
+
+ rx_count = musb_readw(epio, MUSB_RXCOUNT);
+ pipe = urb->pipe;
+
+ if (usb_pipeisoc(pipe)) {
+ int d_status = 0;
+ struct usb_iso_packet_descriptor *d;
+
+ d = urb->iso_frame_desc + qh->iso_idx;
+ if (iso_err) {
+ d_status = -EILSEQ;
+ urb->error_count++;
+ }
+ if (rx_count > d->length) {
+ if (d_status == 0) {
+ d_status = -EOVERFLOW;
+ urb->error_count++;
+ }
+ dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",
+ rx_count, d->length);
+
+ length = d->length;
+ } else
+ length = rx_count;
+ d->status = d_status;
+ buf = urb->transfer_dma + d->offset;
+ } else {
+ length = rx_count;
+ buf = urb->transfer_dma + urb->actual_length;
+ }
+
+ channel->desired_mode = 0;
+#ifdef USE_MODE1
+ /* because of the issue below, mode 1 will
+ * only rarely behave with correct semantics.
+ */
+ if ((urb->transfer_flags & URB_SHORT_NOT_OK)
+ && (urb->transfer_buffer_length - urb->actual_length)
+ > qh->maxpacket)
+ channel->desired_mode = 1;
+ if (rx_count < hw_ep->max_packet_sz_rx) {
+ length = rx_count;
+ channel->desired_mode = 0;
+ } else {
+ length = urb->transfer_buffer_length;
+ }
+#endif
+
+ /* See comments above on disadvantages of using mode 1 */
+ val = musb_readw(epio, MUSB_RXCSR);
+ val &= ~MUSB_RXCSR_H_REQPKT;
+
+ if (channel->desired_mode == 0)
+ val &= ~MUSB_RXCSR_H_AUTOREQ;
+ else
+ val |= MUSB_RXCSR_H_AUTOREQ;
+ val |= MUSB_RXCSR_DMAENAB;
+
+ /* autoclear shouldn't be set in high bandwidth */
+ if (qh->hb_mult == 1)
+ val |= MUSB_RXCSR_AUTOCLEAR;
+
+ musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
+
+ /* REVISIT if when actual_length != 0,
+ * transfer_buffer_length needs to be
+ * adjusted first...
+ */
+ done = dma->channel_program(channel, qh->maxpacket,
+ channel->desired_mode,
+ buf, length);
+
+ if (!done) {
+ dma->channel_release(channel);
+ hw_ep->rx_channel = NULL;
+ channel = NULL;
+ val = musb_readw(epio, MUSB_RXCSR);
+ val &= ~(MUSB_RXCSR_DMAENAB
+ | MUSB_RXCSR_H_AUTOREQ
+ | MUSB_RXCSR_AUTOCLEAR);
+ musb_writew(epio, MUSB_RXCSR, val);
+ }
+
+ return done;
+}
+#else
+static inline int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
+ struct musb_hw_ep *hw_ep,
+ struct musb_qh *qh,
+ struct urb *urb,
+ size_t len)
+{
+ return false;
+}
+
+static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
+ struct musb_hw_ep *hw_ep,
+ struct musb_qh *qh,
+ struct urb *urb,
+ size_t len,
+ u8 iso_err)
+{
+ return false;
+}
#endif
/*
@@ -1546,6 +1806,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
{
struct urb *urb;
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+ struct dma_controller *c = musb->dma_controller;
void __iomem *epio = hw_ep->regs;
struct musb_qh *qh = hw_ep->in_qh;
size_t xfer_len;
@@ -1661,9 +1922,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
*/
/* FIXME this is _way_ too much in-line logic for Mentor DMA */
-
-#if !defined(CONFIG_USB_INVENTRA_DMA) && !defined(CONFIG_USB_UX500_DMA)
- if (rx_csr & MUSB_RXCSR_H_REQPKT) {
+ if (!musb_dma_inventra(musb) && !musb_dma_ux500(musb) &&
+ (rx_csr & MUSB_RXCSR_H_REQPKT)) {
/* REVISIT this happened for a while on some short reads...
* the cleanup still needs investigation... looks bad...
* and also duplicates dma cleanup code above ... plus,
@@ -1684,7 +1944,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
musb_writew(epio, MUSB_RXCSR,
MUSB_RXCSR_H_WZC_BITS | rx_csr);
}
-#endif
+
if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
xfer_len = dma->actual_len;
@@ -1694,67 +1954,18 @@ void musb_host_rx(struct musb *musb, u8 epnum)
| MUSB_RXCSR_RXPKTRDY);
musb_writew(hw_ep->regs, MUSB_RXCSR, val);
-#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
- defined(CONFIG_USB_TI_CPPI41_DMA)
- if (usb_pipeisoc(pipe)) {
- struct usb_iso_packet_descriptor *d;
-
- d = urb->iso_frame_desc + qh->iso_idx;
- d->actual_length = xfer_len;
-
- /* even if there was an error, we did the dma
- * for iso_frame_desc->length
- */
- if (d->status != -EILSEQ && d->status != -EOVERFLOW)
- d->status = 0;
-
- if (++qh->iso_idx >= urb->number_of_packets) {
- done = true;
- } else {
-#if defined(CONFIG_USB_TI_CPPI41_DMA)
- struct dma_controller *c;
- dma_addr_t *buf;
- u32 length, ret;
-
- c = musb->dma_controller;
- buf = (void *)
- urb->iso_frame_desc[qh->iso_idx].offset
- + (u32)urb->transfer_dma;
-
- length =
- urb->iso_frame_desc[qh->iso_idx].length;
-
- val |= MUSB_RXCSR_DMAENAB;
- musb_writew(hw_ep->regs, MUSB_RXCSR, val);
-
- ret = c->channel_program(dma, qh->maxpacket,
- 0, (u32) buf, length);
-#endif
- done = false;
- }
-
- } else {
- /* done if urb buffer is full or short packet is recd */
- done = (urb->actual_length + xfer_len >=
- urb->transfer_buffer_length
- || dma->actual_len < qh->maxpacket
- || dma->rx_packet_done);
- }
-
- /* send IN token for next packet, without AUTOREQ */
- if (!done) {
- val |= MUSB_RXCSR_H_REQPKT;
- musb_writew(epio, MUSB_RXCSR,
- MUSB_RXCSR_H_WZC_BITS | val);
+ if (musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
+ musb_dma_cppi41(musb)) {
+ done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len);
+ dev_dbg(hw_ep->musb->controller,
+ "ep %d dma %s, rxcsr %04x, rxcount %d\n",
+ epnum, done ? "off" : "reset",
+ musb_readw(epio, MUSB_RXCSR),
+ musb_readw(epio, MUSB_RXCOUNT));
+ } else {
+ done = true;
}
- dev_dbg(musb->controller, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
- done ? "off" : "reset",
- musb_readw(epio, MUSB_RXCSR),
- musb_readw(epio, MUSB_RXCOUNT));
-#else
- done = true;
-#endif
} else if (urb->status == -EINPROGRESS) {
/* if no errors, be sure a packet is ready for unloading */
if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
@@ -1772,126 +1983,24 @@ void musb_host_rx(struct musb *musb, u8 epnum)
}
/* we are expecting IN packets */
-#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
- defined(CONFIG_USB_TI_CPPI41_DMA)
- if (dma) {
- struct dma_controller *c;
- u16 rx_count;
- int ret, length;
- dma_addr_t buf;
-
- rx_count = musb_readw(epio, MUSB_RXCOUNT);
-
- dev_dbg(musb->controller, "RX%d count %d, buffer 0x%llx len %d/%d\n",
- epnum, rx_count,
- (unsigned long long) urb->transfer_dma
- + urb->actual_length,
- qh->offset,
- urb->transfer_buffer_length);
-
- c = musb->dma_controller;
-
- if (usb_pipeisoc(pipe)) {
- int d_status = 0;
- struct usb_iso_packet_descriptor *d;
-
- d = urb->iso_frame_desc + qh->iso_idx;
-
- if (iso_err) {
- d_status = -EILSEQ;
- urb->error_count++;
- }
- if (rx_count > d->length) {
- if (d_status == 0) {
- d_status = -EOVERFLOW;
- urb->error_count++;
- }
- dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",\
- rx_count, d->length);
-
- length = d->length;
- } else
- length = rx_count;
- d->status = d_status;
- buf = urb->transfer_dma + d->offset;
- } else {
- length = rx_count;
- buf = urb->transfer_dma +
- urb->actual_length;
- }
-
- dma->desired_mode = 0;
-#ifdef USE_MODE1
- /* because of the issue below, mode 1 will
- * only rarely behave with correct semantics.
- */
- if ((urb->transfer_flags &
- URB_SHORT_NOT_OK)
- && (urb->transfer_buffer_length -
- urb->actual_length)
- > qh->maxpacket)
- dma->desired_mode = 1;
- if (rx_count < hw_ep->max_packet_sz_rx) {
- length = rx_count;
- dma->desired_mode = 0;
- } else {
- length = urb->transfer_buffer_length;
- }
-#endif
-
-/* Disadvantage of using mode 1:
- * It's basically usable only for mass storage class; essentially all
- * other protocols also terminate transfers on short packets.
- *
- * Details:
- * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
- * If you try to use mode 1 for (transfer_buffer_length - 512), and try
- * to use the extra IN token to grab the last packet using mode 0, then
- * the problem is that you cannot be sure when the device will send the
- * last packet and RxPktRdy set. Sometimes the packet is recd too soon
- * such that it gets lost when RxCSR is re-set at the end of the mode 1
- * transfer, while sometimes it is recd just a little late so that if you
- * try to configure for mode 0 soon after the mode 1 transfer is
- * completed, you will find rxcount 0. Okay, so you might think why not
- * wait for an interrupt when the pkt is recd. Well, you won't get any!
- */
-
- val = musb_readw(epio, MUSB_RXCSR);
- val &= ~MUSB_RXCSR_H_REQPKT;
-
- if (dma->desired_mode == 0)
- val &= ~MUSB_RXCSR_H_AUTOREQ;
+ if ((musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
+ musb_dma_cppi41(musb)) && dma) {
+ dev_dbg(hw_ep->musb->controller,
+ "RX%d count %d, buffer 0x%llx len %d/%d\n",
+ epnum, musb_readw(epio, MUSB_RXCOUNT),
+ (unsigned long long) urb->transfer_dma
+ + urb->actual_length,
+ qh->offset,
+ urb->transfer_buffer_length);
+
+ done = musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh,
+ urb, xfer_len,
+ iso_err);
+ if (done)
+ goto finish;
else
- val |= MUSB_RXCSR_H_AUTOREQ;
- val |= MUSB_RXCSR_DMAENAB;
-
- /* autoclear shouldn't be set in high bandwidth */
- if (qh->hb_mult == 1)
- val |= MUSB_RXCSR_AUTOCLEAR;
-
- musb_writew(epio, MUSB_RXCSR,
- MUSB_RXCSR_H_WZC_BITS | val);
-
- /* REVISIT if when actual_length != 0,
- * transfer_buffer_length needs to be
- * adjusted first...
- */
- ret = c->channel_program(
- dma, qh->maxpacket,
- dma->desired_mode, buf, length);
-
- if (!ret) {
- c->channel_release(dma);
- hw_ep->rx_channel = NULL;
- dma = NULL;
- val = musb_readw(epio, MUSB_RXCSR);
- val &= ~(MUSB_RXCSR_DMAENAB
- | MUSB_RXCSR_H_AUTOREQ
- | MUSB_RXCSR_AUTOCLEAR);
- musb_writew(epio, MUSB_RXCSR, val);
- }
+ dev_err(musb->controller, "error: rx_dma failed\n");
}
-#endif /* Mentor DMA */
if (!dma) {
unsigned int received_len;
@@ -2512,6 +2621,7 @@ static void musb_free_temp_buffer(struct urb *urb)
{
enum dma_data_direction dir;
struct musb_temp_buffer *temp;
+ size_t length;
if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
return;
@@ -2522,8 +2632,12 @@ static void musb_free_temp_buffer(struct urb *urb)
data);
if (dir == DMA_FROM_DEVICE) {
- memcpy(temp->old_xfer_buffer, temp->data,
- urb->transfer_buffer_length);
+ if (usb_pipeisoc(urb->pipe))
+ length = urb->transfer_buffer_length;
+ else
+ length = urb->actual_length;
+
+ memcpy(temp->old_xfer_buffer, temp->data, length);
}
urb->transfer_buffer = temp->old_xfer_buffer;
kfree(temp->kmalloc_ptr);