aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorJarkko Nikula <jarkko.nikula@linux.intel.com>2017-01-17 13:57:25 +0200
committerVinod Koul <vinod.koul@intel.com>2017-01-25 11:51:39 +0530
commita46a7634017ae5da1adf352c5bd0c66a772f20b5 (patch)
tree4595bf91b11f853080af8215cd1774497549ab25 /drivers/dma
parentdmaengine: dw: pci: remove LPE Audio DMA ID (diff)
downloadlinux-dev-a46a7634017ae5da1adf352c5bd0c66a772f20b5.tar.xz
linux-dev-a46a7634017ae5da1adf352c5bd0c66a772f20b5.zip
dmaengine: dw: Fix data corruption in large device to memory transfers
When transferring more data than the maximum block size supported by the HW multiplied by source width the transfer is split into smaller chunks. Currently code calculates the memory width and thus aligment before splitting for both memory to device and device to memory transfers. For memory to device transfers this work fine since alignment is preserved through the splitting and split blocks are still memory width aligned. However in device to memory transfers aligment breaks when maximum block size multiplied by register width doesn't have the same alignment than the buffer. For instance when transferring from an 8-bit register 4100 bytes (32-bit aligned) on a DW DMA that has maximum block size of 4095 elements. An attempt to do such transfers caused data corruption. Fix this by calculating and setting the destination memory width after splitting by using the split block aligment and length. Signed-off-by: Jarkko Nikula <jarkko.nikula@linux.intel.com> Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/dw/core.c20
1 files changed, 9 insertions, 11 deletions
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index e5adf5d1c34f..45bb608a1b7c 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -789,17 +789,13 @@ slave_sg_todev_fill_desc:
lli_write(desc, sar, mem);
lli_write(desc, dar, reg);
- lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
if ((len >> mem_width) > dwc->block_size) {
dlen = dwc->block_size << mem_width;
- mem += dlen;
- len -= dlen;
} else {
dlen = len;
- len = 0;
}
-
lli_write(desc, ctlhi, dlen >> mem_width);
+ lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
desc->len = dlen;
if (!first) {
@@ -809,6 +805,9 @@ slave_sg_todev_fill_desc:
list_add_tail(&desc->desc_node, &first->tx_list);
}
prev = desc;
+
+ mem += dlen;
+ len -= dlen;
total_len += dlen;
if (len)
@@ -833,8 +832,6 @@ slave_sg_todev_fill_desc:
mem = sg_dma_address(sg);
len = sg_dma_len(sg);
- mem_width = __ffs(data_width | mem | len);
-
slave_sg_fromdev_fill_desc:
desc = dwc_desc_get(dwc);
if (!desc)
@@ -842,16 +839,14 @@ slave_sg_fromdev_fill_desc:
lli_write(desc, sar, reg);
lli_write(desc, dar, mem);
- lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
if ((len >> reg_width) > dwc->block_size) {
dlen = dwc->block_size << reg_width;
- mem += dlen;
- len -= dlen;
} else {
dlen = len;
- len = 0;
}
lli_write(desc, ctlhi, dlen >> reg_width);
+ mem_width = __ffs(data_width | mem | dlen);
+ lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
desc->len = dlen;
if (!first) {
@@ -861,6 +856,9 @@ slave_sg_fromdev_fill_desc:
list_add_tail(&desc->desc_node, &first->tx_list);
}
prev = desc;
+
+ mem += dlen;
+ len -= dlen;
total_len += dlen;
if (len)