aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig10
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/acpi-dma.c12
-rw-r--r--drivers/dma/bcm2835-dma.c42
-rw-r--r--drivers/dma/dma-jz4740.c623
-rw-r--r--drivers/dma/dma-jz4780.c19
-rw-r--r--drivers/dma/dmatest.c35
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.h2
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c18
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c34
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.c29
-rw-r--r--drivers/dma/dw/Makefile4
-rw-r--r--drivers/dma/dw/acpi.c53
-rw-r--r--drivers/dma/dw/internal.h51
-rw-r--r--drivers/dma/dw/of.c131
-rw-r--r--drivers/dma/dw/pci.c62
-rw-r--r--drivers/dma/dw/platform.c221
-rw-r--r--drivers/dma/fsl-edma-common.c20
-rw-r--r--drivers/dma/fsl-edma-common.h4
-rw-r--r--drivers/dma/fsl-edma.c81
-rw-r--r--drivers/dma/fsl-qdma.c9
-rw-r--r--drivers/dma/fsldma.c1
-rw-r--r--drivers/dma/imx-dma.c1
-rw-r--r--drivers/dma/imx-sdma.c4
-rw-r--r--drivers/dma/ioat/dca.c3
-rw-r--r--drivers/dma/iop-adma.c24
-rw-r--r--drivers/dma/iop-adma.h914
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c4
-rw-r--r--drivers/dma/mv_xor_v2.c11
-rw-r--r--drivers/dma/pl330.c9
-rw-r--r--drivers/dma/qcom/hidma_ll.c2
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c9
-rw-r--r--drivers/dma/s3c24xx-dma.c5
-rw-r--r--drivers/dma/sh/rcar-dmac.c32
-rw-r--r--drivers/dma/sh/usb-dmac.c4
-rw-r--r--drivers/dma/sprd-dma.c10
-rw-r--r--drivers/dma/st_fdma.c4
-rw-r--r--drivers/dma/ste_dma40.c4
-rw-r--r--drivers/dma/stm32-dma.c18
-rw-r--r--drivers/dma/stm32-dmamux.c3
-rw-r--r--drivers/dma/stm32-mdma.c9
-rw-r--r--drivers/dma/sun4i-dma.c4
-rw-r--r--drivers/dma/sun6i-dma.c4
-rw-r--r--drivers/dma/tegra20-apb-dma.c75
-rw-r--r--drivers/dma/tegra210-adma.c16
-rw-r--r--drivers/dma/ti/dma-crossbar.c4
-rw-r--r--drivers/dma/ti/edma.c228
-rw-r--r--drivers/dma/ti/omap-dma.c70
-rw-r--r--drivers/dma/uniphier-mdmac.c5
-rw-r--r--drivers/dma/xgene-dma.c8
50 files changed, 1787 insertions, 1159 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 03fa0c58cef3..7af874b69ffb 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -137,12 +137,6 @@ config DMA_BCM2835
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
-config DMA_JZ4740
- tristate "JZ4740 DMA support"
- depends on MACH_JZ4740 || COMPILE_TEST
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
-
config DMA_JZ4780
tristate "JZ4780 DMA support"
depends on MIPS || COMPILE_TEST
@@ -294,8 +288,8 @@ config INTEL_IOATDMA
If unsure, say N.
config INTEL_IOP_ADMA
- tristate "Intel IOP ADMA support"
- depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
+ tristate "Intel IOP32x ADMA support"
+ depends on ARCH_IOP32X || COMPILE_TEST
select DMA_ENGINE
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
help
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5bddf6f8790f..f5ce8665e944 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -22,7 +22,6 @@ obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
obj-$(CONFIG_BCM_SBA_RAID) += bcm-sba-raid.o
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
-obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
obj-$(CONFIG_DMA_JZ4780) += dma-jz4780.o
obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 30243f5c0710..8a05db3343d3 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -10,6 +10,7 @@
*/
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -82,6 +83,12 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
if (si->base_request_line == 0 && si->num_handshake_signals == 0)
return 0;
+ /* Set up DMA mask based on value from CSRT */
+ ret = dma_coerce_mask_and_coherent(&adev->dev,
+ DMA_BIT_MASK(si->dma_address_width));
+ if (ret)
+ return 0;
+
adma->base_request_line = si->base_request_line;
adma->end_request_line = si->base_request_line +
si->num_handshake_signals - 1;
@@ -140,7 +147,7 @@ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
* @dev: struct device of DMA controller
* @acpi_dma_xlate: translation function which converts a dma specifier
* into a dma_chan structure
- * @data pointer to controller specific data to be used by
+ * @data: pointer to controller specific data to be used by
* translation function
*
* Allocated memory should be freed with appropriate acpi_dma_controller_free()
@@ -224,7 +231,7 @@ static void devm_acpi_dma_release(struct device *dev, void *res)
* devm_acpi_dma_controller_register - resource managed acpi_dma_controller_register()
* @dev: device that is registering this DMA controller
* @acpi_dma_xlate: translation function
- * @data pointer to controller specific data
+ * @data: pointer to controller specific data
*
* Managed acpi_dma_controller_register(). DMA controller registered by this
* function are automatically freed on driver detach. See
@@ -257,6 +264,7 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register);
/**
* devm_acpi_dma_controller_free - resource managed acpi_dma_controller_free()
+ * @dev: device that is unregistering as DMA controller
*
* Unregister a DMA controller registered with
* devm_acpi_dma_controller_register(). Normally this function will not need to
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 8101ff2f05c1..e4c593f48575 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -37,10 +37,19 @@
#define BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED 14
#define BCM2835_DMA_CHAN_NAME_SIZE 8
+/**
+ * struct bcm2835_dmadev - BCM2835 DMA controller
+ * @ddev: DMA device
+ * @base: base address of register map
+ * @dma_parms: DMA parameters (to convey 1 GByte max segment size to clients)
+ * @zero_page: bus address of zero page (to detect transactions copying from
+ * zero page and avoid accessing memory if so)
+ */
struct bcm2835_dmadev {
struct dma_device ddev;
void __iomem *base;
struct device_dma_parameters dma_parms;
+ dma_addr_t zero_page;
};
struct bcm2835_dma_cb {
@@ -687,11 +696,12 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
size_t period_len, enum dma_transfer_direction direction,
unsigned long flags)
{
+ struct bcm2835_dmadev *od = to_bcm2835_dma_dev(chan->device);
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
struct bcm2835_desc *d;
dma_addr_t src, dst;
u32 info = BCM2835_DMA_WAIT_RESP;
- u32 extra = BCM2835_DMA_INT_EN;
+ u32 extra = 0;
size_t max_len = bcm2835_dma_max_frame_length(c);
size_t frames;
@@ -707,6 +717,11 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
return NULL;
}
+ if (flags & DMA_PREP_INTERRUPT)
+ extra |= BCM2835_DMA_INT_EN;
+ else
+ period_len = buf_len;
+
/*
* warn if buf_len is not a multiple of period_len - this may leed
* to unexpected latencies for interrupts and thus audiable clicks
@@ -732,6 +747,10 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
dst = c->cfg.dst_addr;
src = buf_addr;
info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC;
+
+ /* non-lite channels can write zeroes w/o accessing memory */
+ if (buf_addr == od->zero_page && !c->is_lite_channel)
+ info |= BCM2835_DMA_S_IGNORE;
}
/* calculate number of frames */
@@ -778,7 +797,10 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
/* stop DMA activity */
if (c->desc) {
- vchan_terminate_vdesc(&c->desc->vd);
+ if (c->desc->vd.tx.flags & DMA_PREP_INTERRUPT)
+ vchan_terminate_vdesc(&c->desc->vd);
+ else
+ vchan_vdesc_fini(&c->desc->vd);
c->desc = NULL;
bcm2835_dma_abort(c);
}
@@ -831,6 +853,9 @@ static void bcm2835_dma_free(struct bcm2835_dmadev *od)
list_del(&c->vc.chan.device_node);
tasklet_kill(&c->vc.task);
}
+
+ dma_unmap_page_attrs(od->ddev.dev, od->zero_page, PAGE_SIZE,
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
}
static const struct of_device_id bcm2835_dma_of_match[] = {
@@ -871,8 +896,10 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (rc)
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to set DMA mask\n");
return rc;
+ }
od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
if (!od)
@@ -907,11 +934,20 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
BIT(DMA_MEM_TO_MEM);
od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ od->ddev.descriptor_reuse = true;
od->ddev.dev = &pdev->dev;
INIT_LIST_HEAD(&od->ddev.channels);
platform_set_drvdata(pdev, od);
+ od->zero_page = dma_map_page_attrs(od->ddev.dev, ZERO_PAGE(0), 0,
+ PAGE_SIZE, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(od->ddev.dev, od->zero_page)) {
+ dev_err(&pdev->dev, "Failed to map zero page\n");
+ return -ENOMEM;
+ }
+
/* Request DMA channel mask from device tree */
if (of_property_read_u32(pdev->dev.of_node,
"brcm,dma-channel-mask",
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
deleted file mode 100644
index 39c676c47082..000000000000
--- a/drivers/dma/dma-jz4740.c
+++ /dev/null
@@ -1,623 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
- * JZ4740 DMAC support
- */
-
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/irq.h>
-#include <linux/clk.h>
-
-#include "virt-dma.h"
-
-#define JZ_DMA_NR_CHANS 6
-
-#define JZ_REG_DMA_SRC_ADDR(x) (0x00 + (x) * 0x20)
-#define JZ_REG_DMA_DST_ADDR(x) (0x04 + (x) * 0x20)
-#define JZ_REG_DMA_TRANSFER_COUNT(x) (0x08 + (x) * 0x20)
-#define JZ_REG_DMA_REQ_TYPE(x) (0x0C + (x) * 0x20)
-#define JZ_REG_DMA_STATUS_CTRL(x) (0x10 + (x) * 0x20)
-#define JZ_REG_DMA_CMD(x) (0x14 + (x) * 0x20)
-#define JZ_REG_DMA_DESC_ADDR(x) (0x18 + (x) * 0x20)
-
-#define JZ_REG_DMA_CTRL 0x300
-#define JZ_REG_DMA_IRQ 0x304
-#define JZ_REG_DMA_DOORBELL 0x308
-#define JZ_REG_DMA_DOORBELL_SET 0x30C
-
-#define JZ_DMA_STATUS_CTRL_NO_DESC BIT(31)
-#define JZ_DMA_STATUS_CTRL_DESC_INV BIT(6)
-#define JZ_DMA_STATUS_CTRL_ADDR_ERR BIT(4)
-#define JZ_DMA_STATUS_CTRL_TRANSFER_DONE BIT(3)
-#define JZ_DMA_STATUS_CTRL_HALT BIT(2)
-#define JZ_DMA_STATUS_CTRL_COUNT_TERMINATE BIT(1)
-#define JZ_DMA_STATUS_CTRL_ENABLE BIT(0)
-
-#define JZ_DMA_CMD_SRC_INC BIT(23)
-#define JZ_DMA_CMD_DST_INC BIT(22)
-#define JZ_DMA_CMD_RDIL_MASK (0xf << 16)
-#define JZ_DMA_CMD_SRC_WIDTH_MASK (0x3 << 14)
-#define JZ_DMA_CMD_DST_WIDTH_MASK (0x3 << 12)
-#define JZ_DMA_CMD_INTERVAL_LENGTH_MASK (0x7 << 8)
-#define JZ_DMA_CMD_BLOCK_MODE BIT(7)
-#define JZ_DMA_CMD_DESC_VALID BIT(4)
-#define JZ_DMA_CMD_DESC_VALID_MODE BIT(3)
-#define JZ_DMA_CMD_VALID_IRQ_ENABLE BIT(2)
-#define JZ_DMA_CMD_TRANSFER_IRQ_ENABLE BIT(1)
-#define JZ_DMA_CMD_LINK_ENABLE BIT(0)
-
-#define JZ_DMA_CMD_FLAGS_OFFSET 22
-#define JZ_DMA_CMD_RDIL_OFFSET 16
-#define JZ_DMA_CMD_SRC_WIDTH_OFFSET 14
-#define JZ_DMA_CMD_DST_WIDTH_OFFSET 12
-#define JZ_DMA_CMD_TRANSFER_SIZE_OFFSET 8
-#define JZ_DMA_CMD_MODE_OFFSET 7
-
-#define JZ_DMA_CTRL_PRIORITY_MASK (0x3 << 8)
-#define JZ_DMA_CTRL_HALT BIT(3)
-#define JZ_DMA_CTRL_ADDRESS_ERROR BIT(2)
-#define JZ_DMA_CTRL_ENABLE BIT(0)
-
-enum jz4740_dma_width {
- JZ4740_DMA_WIDTH_32BIT = 0,
- JZ4740_DMA_WIDTH_8BIT = 1,
- JZ4740_DMA_WIDTH_16BIT = 2,
-};
-
-enum jz4740_dma_transfer_size {
- JZ4740_DMA_TRANSFER_SIZE_4BYTE = 0,
- JZ4740_DMA_TRANSFER_SIZE_1BYTE = 1,
- JZ4740_DMA_TRANSFER_SIZE_2BYTE = 2,
- JZ4740_DMA_TRANSFER_SIZE_16BYTE = 3,
- JZ4740_DMA_TRANSFER_SIZE_32BYTE = 4,
-};
-
-enum jz4740_dma_flags {
- JZ4740_DMA_SRC_AUTOINC = 0x2,
- JZ4740_DMA_DST_AUTOINC = 0x1,
-};
-
-enum jz4740_dma_mode {
- JZ4740_DMA_MODE_SINGLE = 0,
- JZ4740_DMA_MODE_BLOCK = 1,
-};
-
-struct jz4740_dma_sg {
- dma_addr_t addr;
- unsigned int len;
-};
-
-struct jz4740_dma_desc {
- struct virt_dma_desc vdesc;
-
- enum dma_transfer_direction direction;
- bool cyclic;
-
- unsigned int num_sgs;
- struct jz4740_dma_sg sg[];
-};
-
-struct jz4740_dmaengine_chan {
- struct virt_dma_chan vchan;
- unsigned int id;
- struct dma_slave_config config;
-
- dma_addr_t fifo_addr;
- unsigned int transfer_shift;
-
- struct jz4740_dma_desc *desc;
- unsigned int next_sg;
-};
-
-struct jz4740_dma_dev {
- struct dma_device ddev;
- void __iomem *base;
- struct clk *clk;
-
- struct jz4740_dmaengine_chan chan[JZ_DMA_NR_CHANS];
-};
-
-static struct jz4740_dma_dev *jz4740_dma_chan_get_dev(
- struct jz4740_dmaengine_chan *chan)
-{
- return container_of(chan->vchan.chan.device, struct jz4740_dma_dev,
- ddev);
-}
-
-static struct jz4740_dmaengine_chan *to_jz4740_dma_chan(struct dma_chan *c)
-{
- return container_of(c, struct jz4740_dmaengine_chan, vchan.chan);
-}
-
-static struct jz4740_dma_desc *to_jz4740_dma_desc(struct virt_dma_desc *vdesc)
-{
- return container_of(vdesc, struct jz4740_dma_desc, vdesc);
-}
-
-static inline uint32_t jz4740_dma_read(struct jz4740_dma_dev *dmadev,
- unsigned int reg)
-{
- return readl(dmadev->base + reg);
-}
-
-static inline void jz4740_dma_write(struct jz4740_dma_dev *dmadev,
- unsigned reg, uint32_t val)
-{
- writel(val, dmadev->base + reg);
-}
-
-static inline void jz4740_dma_write_mask(struct jz4740_dma_dev *dmadev,
- unsigned int reg, uint32_t val, uint32_t mask)
-{
- uint32_t tmp;
-
- tmp = jz4740_dma_read(dmadev, reg);
- tmp &= ~mask;
- tmp |= val;
- jz4740_dma_write(dmadev, reg, tmp);
-}
-
-static struct jz4740_dma_desc *jz4740_dma_alloc_desc(unsigned int num_sgs)
-{
- return kzalloc(sizeof(struct jz4740_dma_desc) +
- sizeof(struct jz4740_dma_sg) * num_sgs, GFP_ATOMIC);
-}
-
-static enum jz4740_dma_width jz4740_dma_width(enum dma_slave_buswidth width)
-{
- switch (width) {
- case DMA_SLAVE_BUSWIDTH_1_BYTE:
- return JZ4740_DMA_WIDTH_8BIT;
- case DMA_SLAVE_BUSWIDTH_2_BYTES:
- return JZ4740_DMA_WIDTH_16BIT;
- case DMA_SLAVE_BUSWIDTH_4_BYTES:
- return JZ4740_DMA_WIDTH_32BIT;
- default:
- return JZ4740_DMA_WIDTH_32BIT;
- }
-}
-
-static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst)
-{
- if (maxburst <= 1)
- return JZ4740_DMA_TRANSFER_SIZE_1BYTE;
- else if (maxburst <= 3)
- return JZ4740_DMA_TRANSFER_SIZE_2BYTE;
- else if (maxburst <= 15)
- return JZ4740_DMA_TRANSFER_SIZE_4BYTE;
- else if (maxburst <= 31)
- return JZ4740_DMA_TRANSFER_SIZE_16BYTE;
-
- return JZ4740_DMA_TRANSFER_SIZE_32BYTE;
-}
-
-static int jz4740_dma_slave_config_write(struct dma_chan *c,
- struct dma_slave_config *config,
- enum dma_transfer_direction direction)
-{
- struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
- struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
- enum jz4740_dma_width src_width;
- enum jz4740_dma_width dst_width;
- enum jz4740_dma_transfer_size transfer_size;
- enum jz4740_dma_flags flags;
- uint32_t cmd;
-
- switch (direction) {
- case DMA_MEM_TO_DEV:
- flags = JZ4740_DMA_SRC_AUTOINC;
- transfer_size = jz4740_dma_maxburst(config->dst_maxburst);
- chan->fifo_addr = config->dst_addr;
- break;
- case DMA_DEV_TO_MEM:
- flags = JZ4740_DMA_DST_AUTOINC;
- transfer_size = jz4740_dma_maxburst(config->src_maxburst);
- chan->fifo_addr = config->src_addr;
- break;
- default:
- return -EINVAL;
- }
-
- src_width = jz4740_dma_width(config->src_addr_width);
- dst_width = jz4740_dma_width(config->dst_addr_width);
-
- switch (transfer_size) {
- case JZ4740_DMA_TRANSFER_SIZE_2BYTE:
- chan->transfer_shift = 1;
- break;
- case JZ4740_DMA_TRANSFER_SIZE_4BYTE:
- chan->transfer_shift = 2;
- break;
- case JZ4740_DMA_TRANSFER_SIZE_16BYTE:
- chan->transfer_shift = 4;
- break;
- case JZ4740_DMA_TRANSFER_SIZE_32BYTE:
- chan->transfer_shift = 5;
- break;
- default:
- chan->transfer_shift = 0;
- break;
- }
-
- cmd = flags << JZ_DMA_CMD_FLAGS_OFFSET;
- cmd |= src_width << JZ_DMA_CMD_SRC_WIDTH_OFFSET;
- cmd |= dst_width << JZ_DMA_CMD_DST_WIDTH_OFFSET;
- cmd |= transfer_size << JZ_DMA_CMD_TRANSFER_SIZE_OFFSET;
- cmd |= JZ4740_DMA_MODE_SINGLE << JZ_DMA_CMD_MODE_OFFSET;
- cmd |= JZ_DMA_CMD_TRANSFER_IRQ_ENABLE;
-
- jz4740_dma_write(dmadev, JZ_REG_DMA_CMD(chan->id), cmd);
- jz4740_dma_write(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0);
- jz4740_dma_write(dmadev, JZ_REG_DMA_REQ_TYPE(chan->id),
- config->slave_id);
-
- return 0;
-}
-
-static int jz4740_dma_slave_config(struct dma_chan *c,
- struct dma_slave_config *config)
-{
- struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
-
- memcpy(&chan->config, config, sizeof(*config));
- return 0;
-}
-
-static int jz4740_dma_terminate_all(struct dma_chan *c)
-{
- struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
- struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
- unsigned long flags;
- LIST_HEAD(head);
-
- spin_lock_irqsave(&chan->vchan.lock, flags);
- jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0,
- JZ_DMA_STATUS_CTRL_ENABLE);
- chan->desc = NULL;
- vchan_get_all_descriptors(&chan->vchan, &head);
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
-
- vchan_dma_desc_free_list(&chan->vchan, &head);
-
- return 0;
-}
-
-static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan)
-{
- struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
- dma_addr_t src_addr, dst_addr;
- struct virt_dma_desc *vdesc;
- struct jz4740_dma_sg *sg;
-
- jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0,
- JZ_DMA_STATUS_CTRL_ENABLE);
-
- if (!chan->desc) {
- vdesc = vchan_next_desc(&chan->vchan);
- if (!vdesc)
- return 0;
- chan->desc = to_jz4740_dma_desc(vdesc);
- chan->next_sg = 0;
- }
-
- if (chan->next_sg == chan->desc->num_sgs)
- chan->next_sg = 0;
-
- sg = &chan->desc->sg[chan->next_sg];
-
- if (chan->desc->direction == DMA_MEM_TO_DEV) {
- src_addr = sg->addr;
- dst_addr = chan->fifo_addr;
- } else {
- src_addr = chan->fifo_addr;
- dst_addr = sg->addr;
- }
- jz4740_dma_write(dmadev, JZ_REG_DMA_SRC_ADDR(chan->id), src_addr);
- jz4740_dma_write(dmadev, JZ_REG_DMA_DST_ADDR(chan->id), dst_addr);
- jz4740_dma_write(dmadev, JZ_REG_DMA_TRANSFER_COUNT(chan->id),
- sg->len >> chan->transfer_shift);
-
- chan->next_sg++;
-
- jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id),
- JZ_DMA_STATUS_CTRL_NO_DESC | JZ_DMA_STATUS_CTRL_ENABLE,
- JZ_DMA_STATUS_CTRL_HALT | JZ_DMA_STATUS_CTRL_NO_DESC |
- JZ_DMA_STATUS_CTRL_ENABLE);
-
- jz4740_dma_write_mask(dmadev, JZ_REG_DMA_CTRL,
- JZ_DMA_CTRL_ENABLE,
- JZ_DMA_CTRL_HALT | JZ_DMA_CTRL_ENABLE);
-
- return 0;
-}
-
-static void jz4740_dma_chan_irq(struct jz4740_dmaengine_chan *chan)
-{
- spin_lock(&chan->vchan.lock);
- if (chan->desc) {
- if (chan->desc->cyclic) {
- vchan_cyclic_callback(&chan->desc->vdesc);
- } else {
- if (chan->next_sg == chan->desc->num_sgs) {
- list_del(&chan->desc->vdesc.node);
- vchan_cookie_complete(&chan->desc->vdesc);
- chan->desc = NULL;
- }
- }
- }
- jz4740_dma_start_transfer(chan);
- spin_unlock(&chan->vchan.lock);
-}
-
-static irqreturn_t jz4740_dma_irq(int irq, void *devid)
-{
- struct jz4740_dma_dev *dmadev = devid;
- uint32_t irq_status;
- unsigned int i;
-
- irq_status = readl(dmadev->base + JZ_REG_DMA_IRQ);
-
- for (i = 0; i < 6; ++i) {
- if (irq_status & (1 << i)) {
- jz4740_dma_write_mask(dmadev,
- JZ_REG_DMA_STATUS_CTRL(i), 0,
- JZ_DMA_STATUS_CTRL_ENABLE |
- JZ_DMA_STATUS_CTRL_TRANSFER_DONE);
-
- jz4740_dma_chan_irq(&dmadev->chan[i]);
- }
- }
-
- return IRQ_HANDLED;
-}
-
-static void jz4740_dma_issue_pending(struct dma_chan *c)
-{
- struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
- unsigned long flags;
-
- spin_lock_irqsave(&chan->vchan.lock, flags);
- if (vchan_issue_pending(&chan->vchan) && !chan->desc)
- jz4740_dma_start_transfer(chan);
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
-}
-
-static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg(
- struct dma_chan *c, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
-{
- struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
- struct jz4740_dma_desc *desc;
- struct scatterlist *sg;
- unsigned int i;
-
- desc = jz4740_dma_alloc_desc(sg_len);
- if (!desc)
- return NULL;
-
- for_each_sg(sgl, sg, sg_len, i) {
- desc->sg[i].addr = sg_dma_address(sg);
- desc->sg[i].len = sg_dma_len(sg);
- }
-
- desc->num_sgs = sg_len;
- desc->direction = direction;
- desc->cyclic = false;
-
- jz4740_dma_slave_config_write(c, &chan->config, direction);
-
- return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
-}
-
-static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic(
- struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags)
-{
- struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
- struct jz4740_dma_desc *desc;
- unsigned int num_periods, i;
-
- if (buf_len % period_len)
- return NULL;
-
- num_periods = buf_len / period_len;
-
- desc = jz4740_dma_alloc_desc(num_periods);
- if (!desc)
- return NULL;
-
- for (i = 0; i < num_periods; i++) {
- desc->sg[i].addr = buf_addr;
- desc->sg[i].len = period_len;
- buf_addr += period_len;
- }
-
- desc->num_sgs = num_periods;
- desc->direction = direction;
- desc->cyclic = true;
-
- jz4740_dma_slave_config_write(c, &chan->config, direction);
-
- return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
-}
-
-static size_t jz4740_dma_desc_residue(struct jz4740_dmaengine_chan *chan,
- struct jz4740_dma_desc *desc, unsigned int next_sg)
-{
- struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
- unsigned int residue, count;
- unsigned int i;
-
- residue = 0;
-
- for (i = next_sg; i < desc->num_sgs; i++)
- residue += desc->sg[i].len;
-
- if (next_sg != 0) {
- count = jz4740_dma_read(dmadev,
- JZ_REG_DMA_TRANSFER_COUNT(chan->id));
- residue += count << chan->transfer_shift;
- }
-
- return residue;
-}
-
-static enum dma_status jz4740_dma_tx_status(struct dma_chan *c,
- dma_cookie_t cookie, struct dma_tx_state *state)
-{
- struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
- struct virt_dma_desc *vdesc;
- enum dma_status status;
- unsigned long flags;
-
- status = dma_cookie_status(c, cookie, state);
- if (status == DMA_COMPLETE || !state)
- return status;
-
- spin_lock_irqsave(&chan->vchan.lock, flags);
- vdesc = vchan_find_desc(&chan->vchan, cookie);
- if (cookie == chan->desc->vdesc.tx.cookie) {
- state->residue = jz4740_dma_desc_residue(chan, chan->desc,
- chan->next_sg);
- } else if (vdesc) {
- state->residue = jz4740_dma_desc_residue(chan,
- to_jz4740_dma_desc(vdesc), 0);
- } else {
- state->residue = 0;
- }
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
-
- return status;
-}
-
-static void jz4740_dma_free_chan_resources(struct dma_chan *c)
-{
- vchan_free_chan_resources(to_virt_chan(c));
-}
-
-static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc)
-{
- kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc));
-}
-
-#define JZ4740_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
- BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
-
-static int jz4740_dma_probe(struct platform_device *pdev)
-{
- struct jz4740_dmaengine_chan *chan;
- struct jz4740_dma_dev *dmadev;
- struct dma_device *dd;
- unsigned int i;
- struct resource *res;
- int ret;
- int irq;
-
- dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
- if (!dmadev)
- return -EINVAL;
-
- dd = &dmadev->ddev;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dmadev->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dmadev->base))
- return PTR_ERR(dmadev->base);
-
- dmadev->clk = clk_get(&pdev->dev, "dma");
- if (IS_ERR(dmadev->clk))
- return PTR_ERR(dmadev->clk);
-
- clk_prepare_enable(dmadev->clk);
-
- dma_cap_set(DMA_SLAVE, dd->cap_mask);
- dma_cap_set(DMA_CYCLIC, dd->cap_mask);
- dd->device_free_chan_resources = jz4740_dma_free_chan_resources;
- dd->device_tx_status = jz4740_dma_tx_status;
- dd->device_issue_pending = jz4740_dma_issue_pending;
- dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg;
- dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
- dd->device_config = jz4740_dma_slave_config;
- dd->device_terminate_all = jz4740_dma_terminate_all;
- dd->src_addr_widths = JZ4740_DMA_BUSWIDTHS;
- dd->dst_addr_widths = JZ4740_DMA_BUSWIDTHS;
- dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
- dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
- dd->dev = &pdev->dev;
- INIT_LIST_HEAD(&dd->channels);
-
- for (i = 0; i < JZ_DMA_NR_CHANS; i++) {
- chan = &dmadev->chan[i];
- chan->id = i;
- chan->vchan.desc_free = jz4740_dma_desc_free;
- vchan_init(&chan->vchan, dd);
- }
-
- ret = dma_async_device_register(dd);
- if (ret)
- goto err_clk;
-
- irq = platform_get_irq(pdev, 0);
- ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev);
- if (ret)
- goto err_unregister;
-
- platform_set_drvdata(pdev, dmadev);
-
- return 0;
-
-err_unregister:
- dma_async_device_unregister(dd);
-err_clk:
- clk_disable_unprepare(dmadev->clk);
- return ret;
-}
-
-static void jz4740_cleanup_vchan(struct dma_device *dmadev)
-{
- struct jz4740_dmaengine_chan *chan, *_chan;
-
- list_for_each_entry_safe(chan, _chan,
- &dmadev->channels, vchan.chan.device_node) {
- list_del(&chan->vchan.chan.device_node);
- tasklet_kill(&chan->vchan.task);
- }
-}
-
-
-static int jz4740_dma_remove(struct platform_device *pdev)
-{
- struct jz4740_dma_dev *dmadev = platform_get_drvdata(pdev);
- int irq = platform_get_irq(pdev, 0);
-
- free_irq(irq, dmadev);
-
- jz4740_cleanup_vchan(&dmadev->ddev);
- dma_async_device_unregister(&dmadev->ddev);
- clk_disable_unprepare(dmadev->clk);
-
- return 0;
-}
-
-static struct platform_driver jz4740_dma_driver = {
- .probe = jz4740_dma_probe,
- .remove = jz4740_dma_remove,
- .driver = {
- .name = "jz4740-dma",
- },
-};
-module_platform_driver(jz4740_dma_driver);
-
-MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
-MODULE_DESCRIPTION("JZ4740 DMA driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 7fe9309a876b..cafb1cc065bb 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -92,6 +92,7 @@
#define JZ_SOC_DATA_PROGRAMMABLE_DMA BIT(1)
#define JZ_SOC_DATA_PER_CHAN_PM BIT(2)
#define JZ_SOC_DATA_NO_DCKES_DCKEC BIT(3)
+#define JZ_SOC_DATA_BREAK_LINKS BIT(4)
/**
* struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller.
@@ -355,6 +356,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
void *context)
{
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+ struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
struct jz4780_dma_desc *desc;
unsigned int i;
int err;
@@ -375,7 +377,8 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
- if (i != (sg_len - 1)) {
+ if (i != (sg_len - 1) &&
+ !(jzdma->soc_data->flags & JZ_SOC_DATA_BREAK_LINKS)) {
/* Automatically proceeed to the next descriptor. */
desc->desc[i].dcm |= JZ_DMA_DCM_LINK;
@@ -664,6 +667,8 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
struct jz4780_dma_chan *jzchan)
{
+ const unsigned int soc_flags = jzdma->soc_data->flags;
+ struct jz4780_dma_desc *desc = jzchan->desc;
uint32_t dcs;
bool ack = true;
@@ -691,8 +696,11 @@ static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
jz4780_dma_begin(jzchan);
} else if (dcs & JZ_DMA_DCS_TT) {
- vchan_cookie_complete(&jzchan->desc->vdesc);
- jzchan->desc = NULL;
+ if (!(soc_flags & JZ_SOC_DATA_BREAK_LINKS) ||
+ (jzchan->curr_hwdesc + 1 == desc->count)) {
+ vchan_cookie_complete(&desc->vdesc);
+ jzchan->desc = NULL;
+ }
jz4780_dma_begin(jzchan);
} else {
@@ -878,10 +886,8 @@ static int jz4780_dma_probe(struct platform_device *pdev)
}
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "failed to get IRQ: %d\n", ret);
+ if (ret < 0)
return ret;
- }
jzdma->irq = ret;
@@ -992,6 +998,7 @@ static int jz4780_dma_remove(struct platform_device *pdev)
static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
.nb_channels = 6,
.transfer_ord_max = 5,
+ .flags = JZ_SOC_DATA_BREAK_LINKS,
};
static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 3d22ae8dca72..a2cadfa2e6d7 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -72,6 +72,10 @@ static bool norandom;
module_param(norandom, bool, 0644);
MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)");
+static bool polled;
+module_param(polled, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
+
static bool verbose;
module_param(verbose, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
@@ -110,6 +114,7 @@ struct dmatest_params {
bool norandom;
int alignment;
unsigned int transfer_size;
+ bool polled;
};
/**
@@ -651,7 +656,10 @@ static int dmatest_func(void *data)
/*
* src and dst buffers are freed by ourselves below
*/
- flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ if (params->polled)
+ flags = DMA_CTRL_ACK;
+ else
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
ktime = ktime_get();
while (!kthread_should_stop()
@@ -780,8 +788,10 @@ static int dmatest_func(void *data)
}
done->done = false;
- tx->callback = dmatest_callback;
- tx->callback_param = done;
+ if (!params->polled) {
+ tx->callback = dmatest_callback;
+ tx->callback_param = done;
+ }
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
@@ -790,12 +800,22 @@ static int dmatest_func(void *data)
msleep(100);
goto error_unmap_continue;
}
- dma_async_issue_pending(chan);
- wait_event_freezable_timeout(thread->done_wait, done->done,
- msecs_to_jiffies(params->timeout));
+ if (params->polled) {
+ status = dma_sync_wait(chan, cookie);
+ dmaengine_terminate_sync(chan);
+ if (status == DMA_COMPLETE)
+ done->done = true;
+ } else {
+ dma_async_issue_pending(chan);
+
+ wait_event_freezable_timeout(thread->done_wait,
+ done->done,
+ msecs_to_jiffies(params->timeout));
- status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
+ status = dma_async_is_tx_complete(chan, cookie, NULL,
+ NULL);
+ }
if (!done->done) {
result("test timed out", total_tests, src->off, dst->off,
@@ -1065,6 +1085,7 @@ static void add_threaded_test(struct dmatest_info *info)
params->norandom = norandom;
params->alignment = alignment;
params->transfer_size = transfer_size;
+ params->polled = polled;
request_channels(info, DMA_MEMCPY);
request_channels(info, DMA_MEMSET);
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h
index b6cc90cbc9dc..4e5f9f6e901b 100644
--- a/drivers/dma/dw-edma/dw-edma-core.h
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -50,7 +50,7 @@ struct dw_edma_burst {
struct dw_edma_region {
phys_addr_t paddr;
- dma_addr_t vaddr;
+ void __iomem *vaddr;
size_t sz;
};
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index 4c96e1c948f2..dc85f55e1bb8 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -130,19 +130,19 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
chip->id = pdev->devfn;
chip->irq = pdev->irq;
- dw->rg_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->rg_bar];
+ dw->rg_region.vaddr = pcim_iomap_table(pdev)[pdata->rg_bar];
dw->rg_region.vaddr += pdata->rg_off;
dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start;
dw->rg_region.paddr += pdata->rg_off;
dw->rg_region.sz = pdata->rg_sz;
- dw->ll_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->ll_bar];
+ dw->ll_region.vaddr = pcim_iomap_table(pdev)[pdata->ll_bar];
dw->ll_region.vaddr += pdata->ll_off;
dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start;
dw->ll_region.paddr += pdata->ll_off;
dw->ll_region.sz = pdata->ll_sz;
- dw->dt_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->dt_bar];
+ dw->dt_region.vaddr = pcim_iomap_table(pdev)[pdata->dt_bar];
dw->dt_region.vaddr += pdata->dt_off;
dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start;
dw->dt_region.paddr += pdata->dt_off;
@@ -158,17 +158,17 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
pci_dbg(pdev, "Mode:\t%s\n",
dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll");
- pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
+ pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
pdata->rg_bar, pdata->rg_off, pdata->rg_sz,
- &dw->rg_region.vaddr, &dw->rg_region.paddr);
+ dw->rg_region.vaddr, &dw->rg_region.paddr);
- pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
+ pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
pdata->ll_bar, pdata->ll_off, pdata->ll_sz,
- &dw->ll_region.vaddr, &dw->ll_region.paddr);
+ dw->ll_region.vaddr, &dw->ll_region.paddr);
- pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
+ pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
pdata->dt_bar, pdata->dt_off, pdata->dt_sz,
- &dw->dt_region.vaddr, &dw->dt_region.paddr);
+ dw->dt_region.vaddr, &dw->dt_region.paddr);
pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index 8a3180ed49a6..692de47b1670 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -25,7 +25,7 @@ enum dw_edma_control {
static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
{
- return (struct dw_edma_v0_regs __iomem *)dw->rg_region.vaddr;
+ return dw->rg_region.vaddr;
}
#define SET(dw, name, value) \
@@ -192,13 +192,12 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *child;
- struct dw_edma_v0_lli *lli;
- struct dw_edma_v0_llp *llp;
+ struct dw_edma_v0_lli __iomem *lli;
+ struct dw_edma_v0_llp __iomem *llp;
u32 control = 0, i = 0;
- u64 sar, dar, addr;
int j;
- lli = (struct dw_edma_v0_lli *)chunk->ll_region.vaddr;
+ lli = chunk->ll_region.vaddr;
if (chunk->cb)
control = DW_EDMA_V0_CB;
@@ -214,17 +213,15 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
/* Transfer size */
SET_LL(&lli[i].transfer_size, child->sz);
/* SAR - low, high */
- sar = cpu_to_le64(child->sar);
- SET_LL(&lli[i].sar_low, lower_32_bits(sar));
- SET_LL(&lli[i].sar_high, upper_32_bits(sar));
+ SET_LL(&lli[i].sar_low, lower_32_bits(child->sar));
+ SET_LL(&lli[i].sar_high, upper_32_bits(child->sar));
/* DAR - low, high */
- dar = cpu_to_le64(child->dar);
- SET_LL(&lli[i].dar_low, lower_32_bits(dar));
- SET_LL(&lli[i].dar_high, upper_32_bits(dar));
+ SET_LL(&lli[i].dar_low, lower_32_bits(child->dar));
+ SET_LL(&lli[i].dar_high, upper_32_bits(child->dar));
i++;
}
- llp = (struct dw_edma_v0_llp *)&lli[i];
+ llp = (void __iomem *)&lli[i];
control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
if (!chunk->cb)
control |= DW_EDMA_V0_CB;
@@ -232,9 +229,8 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
/* Channel control */
SET_LL(&llp->control, control);
/* Linked list - low, high */
- addr = cpu_to_le64(chunk->ll_region.paddr);
- SET_LL(&llp->llp_low, lower_32_bits(addr));
- SET_LL(&llp->llp_high, upper_32_bits(addr));
+ SET_LL(&llp->llp_low, lower_32_bits(chunk->ll_region.paddr));
+ SET_LL(&llp->llp_high, upper_32_bits(chunk->ll_region.paddr));
}
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
@@ -242,7 +238,6 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
struct dw_edma_chan *chan = chunk->chan;
struct dw_edma *dw = chan->chip->dw;
u32 tmp;
- u64 llp;
dw_edma_v0_core_write_chunk(chunk);
@@ -262,9 +257,10 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
SET_CH(dw, chan->dir, chan->id, ch_control1,
(DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
/* Linked list - low, high */
- llp = cpu_to_le64(chunk->ll_region.paddr);
- SET_CH(dw, chan->dir, chan->id, llp_low, lower_32_bits(llp));
- SET_CH(dw, chan->dir, chan->id, llp_high, upper_32_bits(llp));
+ SET_CH(dw, chan->dir, chan->id, llp_low,
+ lower_32_bits(chunk->ll_region.paddr));
+ SET_CH(dw, chan->dir, chan->id, llp_high,
+ upper_32_bits(chunk->ll_region.paddr));
}
/* Doorbell */
SET_RW(dw, chan->dir, doorbell,
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
index 3226f528cc11..42739508c0d8 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
@@ -14,7 +14,7 @@
#include "dw-edma-core.h"
#define REGS_ADDR(name) \
- ((dma_addr_t *)&regs->name)
+ ((void __force *)&regs->name)
#define REGISTER(name) \
{ #name, REGS_ADDR(name) }
@@ -40,36 +40,37 @@
static struct dentry *base_dir;
static struct dw_edma *dw;
-static struct dw_edma_v0_regs *regs;
+static struct dw_edma_v0_regs __iomem *regs;
static struct {
- void *start;
- void *end;
+ void __iomem *start;
+ void __iomem *end;
} lim[2][EDMA_V0_MAX_NR_CH];
struct debugfs_entries {
- char name[24];
+ const char *name;
dma_addr_t *reg;
};
static int dw_edma_debugfs_u32_get(void *data, u64 *val)
{
+ void __iomem *reg = (void __force __iomem *)data;
if (dw->mode == EDMA_MODE_LEGACY &&
- data >= (void *)&regs->type.legacy.ch) {
- void *ptr = (void *)&regs->type.legacy.ch;
+ reg >= (void __iomem *)&regs->type.legacy.ch) {
+ void __iomem *ptr = &regs->type.legacy.ch;
u32 viewport_sel = 0;
unsigned long flags;
u16 ch;
for (ch = 0; ch < dw->wr_ch_cnt; ch++)
- if (lim[0][ch].start >= data && data < lim[0][ch].end) {
- ptr += (data - lim[0][ch].start);
+ if (lim[0][ch].start >= reg && reg < lim[0][ch].end) {
+ ptr += (reg - lim[0][ch].start);
goto legacy_sel_wr;
}
for (ch = 0; ch < dw->rd_ch_cnt; ch++)
- if (lim[1][ch].start >= data && data < lim[1][ch].end) {
- ptr += (data - lim[1][ch].start);
+ if (lim[1][ch].start >= reg && reg < lim[1][ch].end) {
+ ptr += (reg - lim[1][ch].start);
goto legacy_sel_rd;
}
@@ -86,7 +87,7 @@ legacy_sel_wr:
raw_spin_unlock_irqrestore(&dw->lock, flags);
} else {
- *val = readl(data);
+ *val = readl(reg);
}
return 0;
@@ -105,7 +106,7 @@ static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[],
}
}
-static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs *regs,
+static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs,
struct dentry *dir)
{
int nr_entries;
@@ -288,7 +289,7 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
if (!dw)
return;
- regs = (struct dw_edma_v0_regs *)dw->rg_region.vaddr;
+ regs = dw->rg_region.vaddr;
if (!regs)
return;
diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile
index 63ed895c09aa..b6f06699e91a 100644
--- a/drivers/dma/dw/Makefile
+++ b/drivers/dma/dw/Makefile
@@ -3,7 +3,9 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw_dmac_core.o
dw_dmac_core-objs := core.o dw.o idma32.o
obj-$(CONFIG_DW_DMAC) += dw_dmac.o
-dw_dmac-objs := platform.o
+dw_dmac-y := platform.o
+dw_dmac-$(CONFIG_ACPI) += acpi.o
+dw_dmac-$(CONFIG_OF) += of.o
obj-$(CONFIG_DW_DMAC_PCI) += dw_dmac_pci.o
dw_dmac_pci-objs := pci.o
diff --git a/drivers/dma/dw/acpi.c b/drivers/dma/dw/acpi.c
new file mode 100644
index 000000000000..f6e8d55b4f6e
--- /dev/null
+++ b/drivers/dma/dw/acpi.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013,2019 Intel Corporation
+
+#include <linux/acpi.h>
+#include <linux/acpi_dma.h>
+
+#include "internal.h"
+
+static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
+{
+ struct acpi_dma_spec *dma_spec = param;
+ struct dw_dma_slave slave = {
+ .dma_dev = dma_spec->dev,
+ .src_id = dma_spec->slave_id,
+ .dst_id = dma_spec->slave_id,
+ .m_master = 0,
+ .p_master = 1,
+ };
+
+ return dw_dma_filter(chan, &slave);
+}
+
+void dw_dma_acpi_controller_register(struct dw_dma *dw)
+{
+ struct device *dev = dw->dma.dev;
+ struct acpi_dma_filter_info *info;
+ int ret;
+
+ if (!has_acpi_companion(dev))
+ return;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return;
+
+ dma_cap_zero(info->dma_cap);
+ dma_cap_set(DMA_SLAVE, info->dma_cap);
+ info->filter_fn = dw_dma_acpi_filter;
+
+ ret = acpi_dma_controller_register(dev, acpi_dma_simple_xlate, info);
+ if (ret)
+ dev_err(dev, "could not register acpi_dma_controller\n");
+}
+
+void dw_dma_acpi_controller_free(struct dw_dma *dw)
+{
+ struct device *dev = dw->dma.dev;
+
+ if (!has_acpi_companion(dev))
+ return;
+
+ acpi_dma_controller_free(dev);
+}
diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h
index 1dd7a4e6dd23..2e1c52eefdeb 100644
--- a/drivers/dma/dw/internal.h
+++ b/drivers/dma/dw/internal.h
@@ -23,4 +23,55 @@ int do_dw_dma_enable(struct dw_dma_chip *chip);
extern bool dw_dma_filter(struct dma_chan *chan, void *param);
+#ifdef CONFIG_ACPI
+void dw_dma_acpi_controller_register(struct dw_dma *dw);
+void dw_dma_acpi_controller_free(struct dw_dma *dw);
+#else /* !CONFIG_ACPI */
+static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
+static inline void dw_dma_acpi_controller_free(struct dw_dma *dw) {}
+#endif /* !CONFIG_ACPI */
+
+struct platform_device;
+
+#ifdef CONFIG_OF
+struct dw_dma_platform_data *dw_dma_parse_dt(struct platform_device *pdev);
+void dw_dma_of_controller_register(struct dw_dma *dw);
+void dw_dma_of_controller_free(struct dw_dma *dw);
+#else
+static inline struct dw_dma_platform_data *dw_dma_parse_dt(struct platform_device *pdev)
+{
+ return NULL;
+}
+static inline void dw_dma_of_controller_register(struct dw_dma *dw) {}
+static inline void dw_dma_of_controller_free(struct dw_dma *dw) {}
+#endif
+
+struct dw_dma_chip_pdata {
+ const struct dw_dma_platform_data *pdata;
+ int (*probe)(struct dw_dma_chip *chip);
+ int (*remove)(struct dw_dma_chip *chip);
+ struct dw_dma_chip *chip;
+};
+
+static __maybe_unused const struct dw_dma_chip_pdata dw_dma_chip_pdata = {
+ .probe = dw_dma_probe,
+ .remove = dw_dma_remove,
+};
+
+static const struct dw_dma_platform_data idma32_pdata = {
+ .nr_channels = 8,
+ .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
+ .chan_priority = CHAN_PRIORITY_ASCENDING,
+ .block_size = 131071,
+ .nr_masters = 1,
+ .data_width = {4},
+ .multi_block = {1, 1, 1, 1, 1, 1, 1, 1},
+};
+
+static __maybe_unused const struct dw_dma_chip_pdata idma32_chip_pdata = {
+ .pdata = &idma32_pdata,
+ .probe = idma32_dma_probe,
+ .remove = idma32_dma_remove,
+};
+
#endif /* _DMA_DW_INTERNAL_H */
diff --git a/drivers/dma/dw/of.c b/drivers/dma/dw/of.c
new file mode 100644
index 000000000000..9e27831dee32
--- /dev/null
+++ b/drivers/dma/dw/of.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Platform driver for the Synopsys DesignWare DMA Controller
+ *
+ * Copyright (C) 2007-2008 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
+ * Copyright (C) 2013 Intel Corporation
+ */
+
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+
+#include "internal.h"
+
+static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct dw_dma *dw = ofdma->of_dma_data;
+ struct dw_dma_slave slave = {
+ .dma_dev = dw->dma.dev,
+ };
+ dma_cap_mask_t cap;
+
+ if (dma_spec->args_count != 3)
+ return NULL;
+
+ slave.src_id = dma_spec->args[0];
+ slave.dst_id = dma_spec->args[0];
+ slave.m_master = dma_spec->args[1];
+ slave.p_master = dma_spec->args[2];
+
+ if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
+ slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
+ slave.m_master >= dw->pdata->nr_masters ||
+ slave.p_master >= dw->pdata->nr_masters))
+ return NULL;
+
+ dma_cap_zero(cap);
+ dma_cap_set(DMA_SLAVE, cap);
+
+ /* TODO: there should be a simpler way to do this */
+ return dma_request_channel(cap, dw_dma_filter, &slave);
+}
+
+struct dw_dma_platform_data *dw_dma_parse_dt(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct dw_dma_platform_data *pdata;
+ u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
+ u32 nr_masters;
+ u32 nr_channels;
+
+ if (!np) {
+ dev_err(&pdev->dev, "Missing DT data\n");
+ return NULL;
+ }
+
+ if (of_property_read_u32(np, "dma-masters", &nr_masters))
+ return NULL;
+ if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
+ return NULL;
+
+ if (of_property_read_u32(np, "dma-channels", &nr_channels))
+ return NULL;
+ if (nr_channels > DW_DMA_MAX_NR_CHANNELS)
+ return NULL;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ pdata->nr_masters = nr_masters;
+ pdata->nr_channels = nr_channels;
+
+ if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
+ pdata->chan_allocation_order = (unsigned char)tmp;
+
+ if (!of_property_read_u32(np, "chan_priority", &tmp))
+ pdata->chan_priority = tmp;
+
+ if (!of_property_read_u32(np, "block_size", &tmp))
+ pdata->block_size = tmp;
+
+ if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
+ for (tmp = 0; tmp < nr_masters; tmp++)
+ pdata->data_width[tmp] = arr[tmp];
+ } else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
+ for (tmp = 0; tmp < nr_masters; tmp++)
+ pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
+ }
+
+ if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
+ for (tmp = 0; tmp < nr_channels; tmp++)
+ pdata->multi_block[tmp] = mb[tmp];
+ } else {
+ for (tmp = 0; tmp < nr_channels; tmp++)
+ pdata->multi_block[tmp] = 1;
+ }
+
+ if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) {
+ if (tmp > CHAN_PROTCTL_MASK)
+ return NULL;
+ pdata->protctl = tmp;
+ }
+
+ return pdata;
+}
+
+void dw_dma_of_controller_register(struct dw_dma *dw)
+{
+ struct device *dev = dw->dma.dev;
+ int ret;
+
+ if (!dev->of_node)
+ return;
+
+ ret = of_dma_controller_register(dev->of_node, dw_dma_of_xlate, dw);
+ if (ret)
+ dev_err(dev, "could not register of_dma_controller\n");
+}
+
+void dw_dma_of_controller_free(struct dw_dma *dw)
+{
+ struct device *dev = dw->dma.dev;
+
+ if (!dev->of_node)
+ return;
+
+ of_dma_controller_free(dev->of_node);
+}
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index 8de87b15a988..cf6e8ec4c0ff 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -12,38 +12,10 @@
#include "internal.h"
-struct dw_dma_pci_data {
- const struct dw_dma_platform_data *pdata;
- int (*probe)(struct dw_dma_chip *chip);
- int (*remove)(struct dw_dma_chip *chip);
- struct dw_dma_chip *chip;
-};
-
-static const struct dw_dma_pci_data dw_pci_data = {
- .probe = dw_dma_probe,
- .remove = dw_dma_remove,
-};
-
-static const struct dw_dma_platform_data idma32_pdata = {
- .nr_channels = 8,
- .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
- .chan_priority = CHAN_PRIORITY_ASCENDING,
- .block_size = 131071,
- .nr_masters = 1,
- .data_width = {4},
- .multi_block = {1, 1, 1, 1, 1, 1, 1, 1},
-};
-
-static const struct dw_dma_pci_data idma32_pci_data = {
- .pdata = &idma32_pdata,
- .probe = idma32_dma_probe,
- .remove = idma32_dma_remove,
-};
-
static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
{
- const struct dw_dma_pci_data *drv_data = (void *)pid->driver_data;
- struct dw_dma_pci_data *data;
+ const struct dw_dma_chip_pdata *drv_data = (void *)pid->driver_data;
+ struct dw_dma_chip_pdata *data;
struct dw_dma_chip *chip;
int ret;
@@ -95,7 +67,7 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
static void dw_pci_remove(struct pci_dev *pdev)
{
- struct dw_dma_pci_data *data = pci_get_drvdata(pdev);
+ struct dw_dma_chip_pdata *data = pci_get_drvdata(pdev);
struct dw_dma_chip *chip = data->chip;
int ret;
@@ -108,7 +80,7 @@ static void dw_pci_remove(struct pci_dev *pdev)
static int dw_pci_suspend_late(struct device *dev)
{
- struct dw_dma_pci_data *data = dev_get_drvdata(dev);
+ struct dw_dma_chip_pdata *data = dev_get_drvdata(dev);
struct dw_dma_chip *chip = data->chip;
return do_dw_dma_disable(chip);
@@ -116,7 +88,7 @@ static int dw_pci_suspend_late(struct device *dev)
static int dw_pci_resume_early(struct device *dev)
{
- struct dw_dma_pci_data *data = dev_get_drvdata(dev);
+ struct dw_dma_chip_pdata *data = dev_get_drvdata(dev);
struct dw_dma_chip *chip = data->chip;
return do_dw_dma_enable(chip);
@@ -130,29 +102,29 @@ static const struct dev_pm_ops dw_pci_dev_pm_ops = {
static const struct pci_device_id dw_pci_id_table[] = {
/* Medfield (GPDMA) */
- { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_data },
+ { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_dma_chip_pdata },
/* BayTrail */
- { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_data },
- { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_data },
+ { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_dma_chip_pdata },
+ { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_dma_chip_pdata },
/* Merrifield */
- { PCI_VDEVICE(INTEL, 0x11a2), (kernel_ulong_t)&idma32_pci_data },
+ { PCI_VDEVICE(INTEL, 0x11a2), (kernel_ulong_t)&idma32_chip_pdata },
/* Braswell */
- { PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_data },
- { PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_data },
+ { PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_dma_chip_pdata },
+ { PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_dma_chip_pdata },
- /* Elkhart Lake iDMA 32-bit (OSE DMA) */
- { PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&idma32_pci_data },
- { PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&idma32_pci_data },
- { PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&idma32_pci_data },
+ /* Elkhart Lake iDMA 32-bit (PSE DMA) */
+ { PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&idma32_chip_pdata },
+ { PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&idma32_chip_pdata },
+ { PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&idma32_chip_pdata },
/* Haswell */
- { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_data },
+ { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_dma_chip_pdata },
/* Broadwell */
- { PCI_VDEVICE(INTEL, 0x9ce0), (kernel_ulong_t)&dw_pci_data },
+ { PCI_VDEVICE(INTEL, 0x9ce0), (kernel_ulong_t)&dw_dma_chip_pdata },
{ }
};
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 382dfd9e9600..c90c798e5ec3 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -17,163 +17,28 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
-#include <linux/of_dma.h>
#include <linux/acpi.h>
-#include <linux/acpi_dma.h>
#include "internal.h"
#define DRV_NAME "dw_dmac"
-static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
- struct of_dma *ofdma)
-{
- struct dw_dma *dw = ofdma->of_dma_data;
- struct dw_dma_slave slave = {
- .dma_dev = dw->dma.dev,
- };
- dma_cap_mask_t cap;
-
- if (dma_spec->args_count != 3)
- return NULL;
-
- slave.src_id = dma_spec->args[0];
- slave.dst_id = dma_spec->args[0];
- slave.m_master = dma_spec->args[1];
- slave.p_master = dma_spec->args[2];
-
- if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
- slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
- slave.m_master >= dw->pdata->nr_masters ||
- slave.p_master >= dw->pdata->nr_masters))
- return NULL;
-
- dma_cap_zero(cap);
- dma_cap_set(DMA_SLAVE, cap);
-
- /* TODO: there should be a simpler way to do this */
- return dma_request_channel(cap, dw_dma_filter, &slave);
-}
-
-#ifdef CONFIG_ACPI
-static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
-{
- struct acpi_dma_spec *dma_spec = param;
- struct dw_dma_slave slave = {
- .dma_dev = dma_spec->dev,
- .src_id = dma_spec->slave_id,
- .dst_id = dma_spec->slave_id,
- .m_master = 0,
- .p_master = 1,
- };
-
- return dw_dma_filter(chan, &slave);
-}
-
-static void dw_dma_acpi_controller_register(struct dw_dma *dw)
-{
- struct device *dev = dw->dma.dev;
- struct acpi_dma_filter_info *info;
- int ret;
-
- info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
- if (!info)
- return;
-
- dma_cap_zero(info->dma_cap);
- dma_cap_set(DMA_SLAVE, info->dma_cap);
- info->filter_fn = dw_dma_acpi_filter;
-
- ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
- info);
- if (ret)
- dev_err(dev, "could not register acpi_dma_controller\n");
-}
-#else /* !CONFIG_ACPI */
-static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
-#endif /* !CONFIG_ACPI */
-
-#ifdef CONFIG_OF
-static struct dw_dma_platform_data *
-dw_dma_parse_dt(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct dw_dma_platform_data *pdata;
- u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
- u32 nr_masters;
- u32 nr_channels;
-
- if (!np) {
- dev_err(&pdev->dev, "Missing DT data\n");
- return NULL;
- }
-
- if (of_property_read_u32(np, "dma-masters", &nr_masters))
- return NULL;
- if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
- return NULL;
-
- if (of_property_read_u32(np, "dma-channels", &nr_channels))
- return NULL;
- if (nr_channels > DW_DMA_MAX_NR_CHANNELS)
- return NULL;
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return NULL;
-
- pdata->nr_masters = nr_masters;
- pdata->nr_channels = nr_channels;
-
- if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
- pdata->chan_allocation_order = (unsigned char)tmp;
-
- if (!of_property_read_u32(np, "chan_priority", &tmp))
- pdata->chan_priority = tmp;
-
- if (!of_property_read_u32(np, "block_size", &tmp))
- pdata->block_size = tmp;
-
- if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
- for (tmp = 0; tmp < nr_masters; tmp++)
- pdata->data_width[tmp] = arr[tmp];
- } else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
- for (tmp = 0; tmp < nr_masters; tmp++)
- pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
- }
-
- if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
- for (tmp = 0; tmp < nr_channels; tmp++)
- pdata->multi_block[tmp] = mb[tmp];
- } else {
- for (tmp = 0; tmp < nr_channels; tmp++)
- pdata->multi_block[tmp] = 1;
- }
-
- if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) {
- if (tmp > CHAN_PROTCTL_MASK)
- return NULL;
- pdata->protctl = tmp;
- }
-
- return pdata;
-}
-#else
-static inline struct dw_dma_platform_data *
-dw_dma_parse_dt(struct platform_device *pdev)
-{
- return NULL;
-}
-#endif
-
static int dw_probe(struct platform_device *pdev)
{
+ const struct dw_dma_chip_pdata *match;
+ struct dw_dma_chip_pdata *data;
struct dw_dma_chip *chip;
struct device *dev = &pdev->dev;
- struct resource *mem;
- const struct dw_dma_platform_data *pdata;
int err;
+ match = device_get_match_data(dev);
+ if (!match)
+ return -ENODEV;
+
+ data = devm_kmemdup(&pdev->dev, match, sizeof(*match), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
@@ -182,8 +47,7 @@ static int dw_probe(struct platform_device *pdev)
if (chip->irq < 0)
return chip->irq;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->regs = devm_ioremap_resource(dev, mem);
+ chip->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
@@ -191,13 +55,16 @@ static int dw_probe(struct platform_device *pdev)
if (err)
return err;
- pdata = dev_get_platdata(dev);
- if (!pdata)
- pdata = dw_dma_parse_dt(pdev);
+ if (!data->pdata)
+ data->pdata = dev_get_platdata(dev);
+ if (!data->pdata)
+ data->pdata = dw_dma_parse_dt(pdev);
chip->dev = dev;
chip->id = pdev->id;
- chip->pdata = pdata;
+ chip->pdata = data->pdata;
+
+ data->chip = chip;
chip->clk = devm_clk_get(chip->dev, "hclk");
if (IS_ERR(chip->clk))
@@ -208,22 +75,15 @@ static int dw_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
- err = dw_dma_probe(chip);
+ err = data->probe(chip);
if (err)
goto err_dw_dma_probe;
- platform_set_drvdata(pdev, chip);
+ platform_set_drvdata(pdev, data);
- if (pdev->dev.of_node) {
- err = of_dma_controller_register(pdev->dev.of_node,
- dw_dma_of_xlate, chip->dw);
- if (err)
- dev_err(&pdev->dev,
- "could not register of_dma_controller\n");
- }
+ dw_dma_of_controller_register(chip->dw);
- if (ACPI_HANDLE(&pdev->dev))
- dw_dma_acpi_controller_register(chip->dw);
+ dw_dma_acpi_controller_register(chip->dw);
return 0;
@@ -235,12 +95,18 @@ err_dw_dma_probe:
static int dw_remove(struct platform_device *pdev)
{
- struct dw_dma_chip *chip = platform_get_drvdata(pdev);
+ struct dw_dma_chip_pdata *data = platform_get_drvdata(pdev);
+ struct dw_dma_chip *chip = data->chip;
+ int ret;
+
+ dw_dma_acpi_controller_free(chip->dw);
- if (pdev->dev.of_node)
- of_dma_controller_free(pdev->dev.of_node);
+ dw_dma_of_controller_free(chip->dw);
+
+ ret = data->remove(chip);
+ if (ret)
+ dev_warn(chip->dev, "can't remove device properly: %d\n", ret);
- dw_dma_remove(chip);
pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(chip->clk);
@@ -249,7 +115,8 @@ static int dw_remove(struct platform_device *pdev)
static void dw_shutdown(struct platform_device *pdev)
{
- struct dw_dma_chip *chip = platform_get_drvdata(pdev);
+ struct dw_dma_chip_pdata *data = platform_get_drvdata(pdev);
+ struct dw_dma_chip *chip = data->chip;
/*
* We have to call do_dw_dma_disable() to stop any ongoing transfer. On
@@ -269,7 +136,7 @@ static void dw_shutdown(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id dw_dma_of_id_table[] = {
- { .compatible = "snps,dma-spear1340" },
+ { .compatible = "snps,dma-spear1340", .data = &dw_dma_chip_pdata },
{}
};
MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
@@ -277,9 +144,15 @@ MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
#ifdef CONFIG_ACPI
static const struct acpi_device_id dw_dma_acpi_id_table[] = {
- { "INTL9C60", 0 },
- { "80862286", 0 },
- { "808622C0", 0 },
+ { "INTL9C60", (kernel_ulong_t)&dw_dma_chip_pdata },
+ { "80862286", (kernel_ulong_t)&dw_dma_chip_pdata },
+ { "808622C0", (kernel_ulong_t)&dw_dma_chip_pdata },
+
+ /* Elkhart Lake iDMA 32-bit (PSE DMA) */
+ { "80864BB4", (kernel_ulong_t)&idma32_chip_pdata },
+ { "80864BB5", (kernel_ulong_t)&idma32_chip_pdata },
+ { "80864BB6", (kernel_ulong_t)&idma32_chip_pdata },
+
{ }
};
MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
@@ -289,7 +162,8 @@ MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
static int dw_suspend_late(struct device *dev)
{
- struct dw_dma_chip *chip = dev_get_drvdata(dev);
+ struct dw_dma_chip_pdata *data = dev_get_drvdata(dev);
+ struct dw_dma_chip *chip = data->chip;
do_dw_dma_disable(chip);
clk_disable_unprepare(chip->clk);
@@ -299,7 +173,8 @@ static int dw_suspend_late(struct device *dev)
static int dw_resume_early(struct device *dev)
{
- struct dw_dma_chip *chip = dev_get_drvdata(dev);
+ struct dw_dma_chip_pdata *data = dev_get_drvdata(dev);
+ struct dw_dma_chip *chip = data->chip;
int ret;
ret = clk_prepare_enable(chip->clk);
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 44d92c34dec3..b1a7ca91701a 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -90,8 +90,21 @@ static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
iowrite8(val8, addr + off);
}
+static void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
+ u32 off, u32 slot, bool enable)
+{
+ u32 val;
+
+ if (enable)
+ val = EDMAMUX_CHCFG_ENBL << 24 | slot;
+ else
+ val = EDMAMUX_CHCFG_DIS;
+
+ iowrite32(val, addr + off * 4);
+}
+
void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
- unsigned int slot, bool enable)
+ unsigned int slot, bool enable)
{
u32 ch = fsl_chan->vchan.chan.chan_id;
void __iomem *muxaddr;
@@ -103,7 +116,10 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
slot = EDMAMUX_CHCFG_SOURCE(slot);
- mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
+ if (fsl_chan->edma->drvdata->version == v3)
+ mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable);
+ else
+ mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
}
EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 4e175560292c..5eaa2902ed39 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -125,6 +125,7 @@ struct fsl_edma_chan {
dma_addr_t dma_dev_addr;
u32 dma_dev_size;
enum dma_data_direction dma_dir;
+ char chan_name[16];
};
struct fsl_edma_desc {
@@ -139,11 +140,13 @@ struct fsl_edma_desc {
enum edma_version {
v1, /* 32ch, Vybrid, mpc57x, etc */
v2, /* 64ch Coldfire */
+ v3, /* 32ch, i.mx7ulp */
};
struct fsl_edma_drvdata {
enum edma_version version;
u32 dmamuxs;
+ bool has_dmaclk;
int (*setup_irq)(struct platform_device *pdev,
struct fsl_edma_engine *fsl_edma);
};
@@ -153,6 +156,7 @@ struct fsl_edma_engine {
void __iomem *membase;
void __iomem *muxbase[DMAMUX_NR];
struct clk *muxclk[DMAMUX_NR];
+ struct clk *dmaclk;
struct mutex fsl_edma_mutex;
const struct fsl_edma_drvdata *drvdata;
u32 n_chans;
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index fcbad6ae954a..b626c06ac2e0 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -20,6 +20,13 @@
#include "fsl-edma-common.h"
+static void fsl_edma_synchronize(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+
+ vchan_synchronize(&fsl_chan->vchan);
+}
+
static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
{
struct fsl_edma_engine *fsl_edma = dev_id;
@@ -125,16 +132,12 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
int ret;
fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
- if (fsl_edma->txirq < 0) {
- dev_err(&pdev->dev, "Can't get edma-tx irq.\n");
+ if (fsl_edma->txirq < 0)
return fsl_edma->txirq;
- }
fsl_edma->errirq = platform_get_irq_byname(pdev, "edma-err");
- if (fsl_edma->errirq < 0) {
- dev_err(&pdev->dev, "Can't get edma-err irq.\n");
+ if (fsl_edma->errirq < 0)
return fsl_edma->errirq;
- }
if (fsl_edma->txirq == fsl_edma->errirq) {
ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
@@ -162,6 +165,49 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
return 0;
}
+static int
+fsl_edma2_irq_init(struct platform_device *pdev,
+ struct fsl_edma_engine *fsl_edma)
+{
+ int i, ret, irq;
+ int count;
+
+ count = platform_irq_count(pdev);
+ dev_dbg(&pdev->dev, "%s Found %d interrupts\r\n", __func__, count);
+ if (count <= 2) {
+ dev_err(&pdev->dev, "Interrupts in DTS not correct.\n");
+ return -EINVAL;
+ }
+ /*
+ * 16 channel independent interrupts + 1 error interrupt on i.mx7ulp.
+ * 2 channel share one interrupt, for example, ch0/ch16, ch1/ch17...
+ * For now, just simply request irq without IRQF_SHARED flag, since 16
+ * channels are enough on i.mx7ulp whose M4 domain own some peripherals.
+ */
+ for (i = 0; i < count; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0)
+ return -ENXIO;
+
+ sprintf(fsl_edma->chans[i].chan_name, "eDMA2-CH%02d", i);
+
+ /* The last IRQ is for eDMA err */
+ if (i == count - 1)
+ ret = devm_request_irq(&pdev->dev, irq,
+ fsl_edma_err_handler,
+ 0, "eDMA2-ERR", fsl_edma);
+ else
+ ret = devm_request_irq(&pdev->dev, irq,
+ fsl_edma_tx_handler, 0,
+ fsl_edma->chans[i].chan_name,
+ fsl_edma);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static void fsl_edma_irq_exit(
struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
@@ -187,8 +233,16 @@ static struct fsl_edma_drvdata vf610_data = {
.setup_irq = fsl_edma_irq_init,
};
+static struct fsl_edma_drvdata imx7ulp_data = {
+ .version = v3,
+ .dmamuxs = 1,
+ .has_dmaclk = true,
+ .setup_irq = fsl_edma2_irq_init,
+};
+
static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,vf610-edma", .data = &vf610_data},
+ { .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
@@ -236,6 +290,20 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_edma_setup_regs(fsl_edma);
regs = &fsl_edma->regs;
+ if (drvdata->has_dmaclk) {
+ fsl_edma->dmaclk = devm_clk_get(&pdev->dev, "dma");
+ if (IS_ERR(fsl_edma->dmaclk)) {
+ dev_err(&pdev->dev, "Missing DMA block clock.\n");
+ return PTR_ERR(fsl_edma->dmaclk);
+ }
+
+ ret = clk_prepare_enable(fsl_edma->dmaclk);
+ if (ret) {
+ dev_err(&pdev->dev, "DMA clk block failed.\n");
+ return ret;
+ }
+ }
+
for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
char clkname[32];
@@ -302,6 +370,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_edma->dma_dev.device_pause = fsl_edma_pause;
fsl_edma->dma_dev.device_resume = fsl_edma_resume;
fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
+ fsl_edma->dma_dev.device_synchronize = fsl_edma_synchronize;
fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 8e341c0c13bc..06664fbd2d91 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -758,10 +758,8 @@ fsl_qdma_irq_init(struct platform_device *pdev,
fsl_qdma->error_irq =
platform_get_irq_byname(pdev, "qdma-error");
- if (fsl_qdma->error_irq < 0) {
- dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
+ if (fsl_qdma->error_irq < 0)
return fsl_qdma->error_irq;
- }
ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
fsl_qdma_error_handler, 0,
@@ -776,11 +774,8 @@ fsl_qdma_irq_init(struct platform_device *pdev,
fsl_qdma->queue_irq[i] =
platform_get_irq_byname(pdev, irq_name);
- if (fsl_qdma->queue_irq[i] < 0) {
- dev_err(&pdev->dev,
- "Can't get qdma queue %d irq.\n", i);
+ if (fsl_qdma->queue_irq[i] < 0)
return fsl_qdma->queue_irq[i];
- }
ret = devm_request_irq(&pdev->dev,
fsl_qdma->queue_irq[i],
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 23e0a356f167..ad72b3f42ffa 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1163,6 +1163,7 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
switch (chan->feature & FSL_DMA_IP_MASK) {
case FSL_DMA_IP_85XX:
chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
+ /* Fall through */
case FSL_DMA_IP_83XX:
chan->toggle_ext_start = fsl_chan_toggle_ext_start;
chan->set_src_loop_size = fsl_chan_set_src_loop_size;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 00a089e24150..5c0fb3134825 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -556,6 +556,7 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
* We fall-through here intentionally, since a 2D transfer is
* similar to MEMCPY just adding the 2D slot configuration.
*/
+ /* Fall through */
case IMXDMA_DESC_MEMCPY:
imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index a01f4b5d793c..9ba74ab7e912 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1886,10 +1886,6 @@ static int sdma_init(struct sdma_engine *sdma)
sdma->context_phys = ccb_phys +
MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
- /* Zero-out the CCB structures array just allocated */
- memset(sdma->channel_control, 0,
- MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
-
/* disable all channels */
for (i = 0; i < sdma->drvdata->num_events; i++)
writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 70fd8454d002..be61c32a876f 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -286,8 +286,7 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
return NULL;
dca = alloc_dca_provider(&ioat_dca_ops,
- sizeof(*ioatdca)
- + (sizeof(struct ioat_dca_slot) * slots));
+ struct_size(ioatdca, req_slots, slots));
if (!dca)
return NULL;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index c6c0143670d9..a3f942a6a946 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -16,13 +16,13 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/prefetch.h>
#include <linux/memory.h>
#include <linux/ioport.h>
#include <linux/raid/pq.h>
#include <linux/slab.h>
-#include <mach/adma.h>
-
+#include "iop-adma.h"
#include "dmaengine.h"
#define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
@@ -116,9 +116,9 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
chain_node) {
pr_debug("\tcookie: %d slot: %d busy: %d "
- "this_desc: %#x next_desc: %#x ack: %d\n",
+ "this_desc: %pad next_desc: %#llx ack: %d\n",
iter->async_tx.cookie, iter->idx, busy,
- iter->async_tx.phys, iop_desc_get_next_desc(iter),
+ &iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter),
async_tx_test_ack(&iter->async_tx));
prefetch(_iter);
prefetch(&_iter->async_tx);
@@ -306,9 +306,9 @@ retry:
int i;
dev_dbg(iop_chan->device->common.dev,
"allocated slot: %d "
- "(desc %p phys: %#x) slots_per_op %d\n",
+ "(desc %p phys: %#llx) slots_per_op %d\n",
iter->idx, iter->hw_desc,
- iter->async_tx.phys, slots_per_op);
+ (u64)iter->async_tx.phys, slots_per_op);
/* pre-ack all but the last descriptor */
if (num_slots != slots_per_op)
@@ -364,13 +364,11 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
struct iop_adma_desc_slot *grp_start, *old_chain_tail;
int slot_cnt;
- int slots_per_op;
dma_cookie_t cookie;
dma_addr_t next_dma;
grp_start = sw_desc->group_head;
slot_cnt = grp_start->slot_cnt;
- slots_per_op = grp_start->slots_per_op;
spin_lock_bh(&iop_chan->lock);
cookie = dma_cookie_assign(tx);
@@ -516,7 +514,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
return NULL;
BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
- dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
+ dev_dbg(iop_chan->device->common.dev, "%s len: %zu\n",
__func__, len);
spin_lock_bh(&iop_chan->lock);
@@ -549,7 +547,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
dev_dbg(iop_chan->device->common.dev,
- "%s src_cnt: %d len: %u flags: %lx\n",
+ "%s src_cnt: %d len: %zu flags: %lx\n",
__func__, src_cnt, len, flags);
spin_lock_bh(&iop_chan->lock);
@@ -582,7 +580,7 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
if (unlikely(!len))
return NULL;
- dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
+ dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
__func__, src_cnt, len);
spin_lock_bh(&iop_chan->lock);
@@ -620,7 +618,7 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
dev_dbg(iop_chan->device->common.dev,
- "%s src_cnt: %d len: %u flags: %lx\n",
+ "%s src_cnt: %d len: %zu flags: %lx\n",
__func__, src_cnt, len, flags);
if (dmaf_p_disabled_continue(flags))
@@ -683,7 +681,7 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
return NULL;
BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
- dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
+ dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
__func__, src_cnt, len);
spin_lock_bh(&iop_chan->lock);
diff --git a/drivers/dma/iop-adma.h b/drivers/dma/iop-adma.h
new file mode 100644
index 000000000000..c499c9578f00
--- /dev/null
+++ b/drivers/dma/iop-adma.h
@@ -0,0 +1,914 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2006, Intel Corporation.
+ */
+#ifndef _ADMA_H
+#define _ADMA_H
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/platform_data/dma-iop32x.h>
+
+/* Memory copy units */
+#define DMA_CCR(chan) (chan->mmr_base + 0x0)
+#define DMA_CSR(chan) (chan->mmr_base + 0x4)
+#define DMA_DAR(chan) (chan->mmr_base + 0xc)
+#define DMA_NDAR(chan) (chan->mmr_base + 0x10)
+#define DMA_PADR(chan) (chan->mmr_base + 0x14)
+#define DMA_PUADR(chan) (chan->mmr_base + 0x18)
+#define DMA_LADR(chan) (chan->mmr_base + 0x1c)
+#define DMA_BCR(chan) (chan->mmr_base + 0x20)
+#define DMA_DCR(chan) (chan->mmr_base + 0x24)
+
+/* Application accelerator unit */
+#define AAU_ACR(chan) (chan->mmr_base + 0x0)
+#define AAU_ASR(chan) (chan->mmr_base + 0x4)
+#define AAU_ADAR(chan) (chan->mmr_base + 0x8)
+#define AAU_ANDAR(chan) (chan->mmr_base + 0xc)
+#define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2)))
+#define AAU_DAR(chan) (chan->mmr_base + 0x20)
+#define AAU_ABCR(chan) (chan->mmr_base + 0x24)
+#define AAU_ADCR(chan) (chan->mmr_base + 0x28)
+#define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2)))
+#define AAU_EDCR0_IDX 8
+#define AAU_EDCR1_IDX 17
+#define AAU_EDCR2_IDX 26
+
+struct iop3xx_aau_desc_ctrl {
+ unsigned int int_en:1;
+ unsigned int blk1_cmd_ctrl:3;
+ unsigned int blk2_cmd_ctrl:3;
+ unsigned int blk3_cmd_ctrl:3;
+ unsigned int blk4_cmd_ctrl:3;
+ unsigned int blk5_cmd_ctrl:3;
+ unsigned int blk6_cmd_ctrl:3;
+ unsigned int blk7_cmd_ctrl:3;
+ unsigned int blk8_cmd_ctrl:3;
+ unsigned int blk_ctrl:2;
+ unsigned int dual_xor_en:1;
+ unsigned int tx_complete:1;
+ unsigned int zero_result_err:1;
+ unsigned int zero_result_en:1;
+ unsigned int dest_write_en:1;
+};
+
+struct iop3xx_aau_e_desc_ctrl {
+ unsigned int reserved:1;
+ unsigned int blk1_cmd_ctrl:3;
+ unsigned int blk2_cmd_ctrl:3;
+ unsigned int blk3_cmd_ctrl:3;
+ unsigned int blk4_cmd_ctrl:3;
+ unsigned int blk5_cmd_ctrl:3;
+ unsigned int blk6_cmd_ctrl:3;
+ unsigned int blk7_cmd_ctrl:3;
+ unsigned int blk8_cmd_ctrl:3;
+ unsigned int reserved2:7;
+};
+
+struct iop3xx_dma_desc_ctrl {
+ unsigned int pci_transaction:4;
+ unsigned int int_en:1;
+ unsigned int dac_cycle_en:1;
+ unsigned int mem_to_mem_en:1;
+ unsigned int crc_data_tx_en:1;
+ unsigned int crc_gen_en:1;
+ unsigned int crc_seed_dis:1;
+ unsigned int reserved:21;
+ unsigned int crc_tx_complete:1;
+};
+
+struct iop3xx_desc_dma {
+ u32 next_desc;
+ union {
+ u32 pci_src_addr;
+ u32 pci_dest_addr;
+ u32 src_addr;
+ };
+ union {
+ u32 upper_pci_src_addr;
+ u32 upper_pci_dest_addr;
+ };
+ union {
+ u32 local_pci_src_addr;
+ u32 local_pci_dest_addr;
+ u32 dest_addr;
+ };
+ u32 byte_count;
+ union {
+ u32 desc_ctrl;
+ struct iop3xx_dma_desc_ctrl desc_ctrl_field;
+ };
+ u32 crc_addr;
+};
+
+struct iop3xx_desc_aau {
+ u32 next_desc;
+ u32 src[4];
+ u32 dest_addr;
+ u32 byte_count;
+ union {
+ u32 desc_ctrl;
+ struct iop3xx_aau_desc_ctrl desc_ctrl_field;
+ };
+ union {
+ u32 src_addr;
+ u32 e_desc_ctrl;
+ struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
+ } src_edc[31];
+};
+
+struct iop3xx_aau_gfmr {
+ unsigned int gfmr1:8;
+ unsigned int gfmr2:8;
+ unsigned int gfmr3:8;
+ unsigned int gfmr4:8;
+};
+
+struct iop3xx_desc_pq_xor {
+ u32 next_desc;
+ u32 src[3];
+ union {
+ u32 data_mult1;
+ struct iop3xx_aau_gfmr data_mult1_field;
+ };
+ u32 dest_addr;
+ u32 byte_count;
+ union {
+ u32 desc_ctrl;
+ struct iop3xx_aau_desc_ctrl desc_ctrl_field;
+ };
+ union {
+ u32 src_addr;
+ u32 e_desc_ctrl;
+ struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
+ u32 data_multiplier;
+ struct iop3xx_aau_gfmr data_mult_field;
+ u32 reserved;
+ } src_edc_gfmr[19];
+};
+
+struct iop3xx_desc_dual_xor {
+ u32 next_desc;
+ u32 src0_addr;
+ u32 src1_addr;
+ u32 h_src_addr;
+ u32 d_src_addr;
+ u32 h_dest_addr;
+ u32 byte_count;
+ union {
+ u32 desc_ctrl;
+ struct iop3xx_aau_desc_ctrl desc_ctrl_field;
+ };
+ u32 d_dest_addr;
+};
+
+union iop3xx_desc {
+ struct iop3xx_desc_aau *aau;
+ struct iop3xx_desc_dma *dma;
+ struct iop3xx_desc_pq_xor *pq_xor;
+ struct iop3xx_desc_dual_xor *dual_xor;
+ void *ptr;
+};
+
+/* No support for p+q operations */
+static inline int
+iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op)
+{
+ BUG();
+ return 0;
+}
+
+static inline void
+iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
+ unsigned long flags)
+{
+ BUG();
+}
+
+static inline void
+iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr)
+{
+ BUG();
+}
+
+static inline void
+iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
+ dma_addr_t addr, unsigned char coef)
+{
+ BUG();
+}
+
+static inline int
+iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op)
+{
+ BUG();
+ return 0;
+}
+
+static inline void
+iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
+ unsigned long flags)
+{
+ BUG();
+}
+
+static inline void
+iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
+{
+ BUG();
+}
+
+#define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr
+
+static inline void
+iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx,
+ dma_addr_t *src)
+{
+ BUG();
+}
+
+static inline int iop_adma_get_max_xor(void)
+{
+ return 32;
+}
+
+static inline int iop_adma_get_max_pq(void)
+{
+ BUG();
+ return 0;
+}
+
+static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
+{
+ int id = chan->device->id;
+
+ switch (id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ return __raw_readl(DMA_DAR(chan));
+ case AAU_ID:
+ return __raw_readl(AAU_ADAR(chan));
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
+ u32 next_desc_addr)
+{
+ int id = chan->device->id;
+
+ switch (id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ __raw_writel(next_desc_addr, DMA_NDAR(chan));
+ break;
+ case AAU_ID:
+ __raw_writel(next_desc_addr, AAU_ANDAR(chan));
+ break;
+ }
+
+}
+
+#define IOP_ADMA_STATUS_BUSY (1 << 10)
+#define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
+#define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
+#define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
+
+static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
+{
+ u32 status = __raw_readl(DMA_CSR(chan));
+ return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
+}
+
+static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
+ int num_slots)
+{
+ /* num_slots will only ever be 1, 2, 4, or 8 */
+ return (desc->idx & (num_slots - 1)) ? 0 : 1;
+}
+
+/* to do: support large (i.e. > hw max) buffer sizes */
+static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
+{
+ *slots_per_op = 1;
+ return 1;
+}
+
+/* to do: support large (i.e. > hw max) buffer sizes */
+static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
+{
+ *slots_per_op = 1;
+ return 1;
+}
+
+static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
+ int *slots_per_op)
+{
+ static const char slot_count_table[] = {
+ 1, 1, 1, 1, /* 01 - 04 */
+ 2, 2, 2, 2, /* 05 - 08 */
+ 4, 4, 4, 4, /* 09 - 12 */
+ 4, 4, 4, 4, /* 13 - 16 */
+ 8, 8, 8, 8, /* 17 - 20 */
+ 8, 8, 8, 8, /* 21 - 24 */
+ 8, 8, 8, 8, /* 25 - 28 */
+ 8, 8, 8, 8, /* 29 - 32 */
+ };
+ *slots_per_op = slot_count_table[src_cnt - 1];
+ return *slots_per_op;
+}
+
+static inline int
+iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
+{
+ switch (chan->device->id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ return iop_chan_memcpy_slot_count(0, slots_per_op);
+ case AAU_ID:
+ return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
+ int *slots_per_op)
+{
+ int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
+
+ if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
+ return slot_cnt;
+
+ len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
+ while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
+ len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
+ slot_cnt += *slots_per_op;
+ }
+
+ slot_cnt += *slots_per_op;
+
+ return slot_cnt;
+}
+
+/* zero sum on iop3xx is limited to 1k at a time so it requires multiple
+ * descriptors
+ */
+static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
+ int *slots_per_op)
+{
+ int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
+
+ if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
+ return slot_cnt;
+
+ len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
+ while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
+ len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
+ slot_cnt += *slots_per_op;
+ }
+
+ slot_cnt += *slots_per_op;
+
+ return slot_cnt;
+}
+
+static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
+ struct iop_adma_chan *chan)
+{
+ union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+
+ switch (chan->device->id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ return hw_desc.dma->byte_count;
+ case AAU_ID:
+ return hw_desc.aau->byte_count;
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+/* translate the src_idx to a descriptor word index */
+static inline int __desc_idx(int src_idx)
+{
+ static const int desc_idx_table[] = { 0, 0, 0, 0,
+ 0, 1, 2, 3,
+ 5, 6, 7, 8,
+ 9, 10, 11, 12,
+ 14, 15, 16, 17,
+ 18, 19, 20, 21,
+ 23, 24, 25, 26,
+ 27, 28, 29, 30,
+ };
+
+ return desc_idx_table[src_idx];
+}
+
+static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
+ struct iop_adma_chan *chan,
+ int src_idx)
+{
+ union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+
+ switch (chan->device->id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ return hw_desc.dma->src_addr;
+ case AAU_ID:
+ break;
+ default:
+ BUG();
+ }
+
+ if (src_idx < 4)
+ return hw_desc.aau->src[src_idx];
+ else
+ return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr;
+}
+
+static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
+ int src_idx, dma_addr_t addr)
+{
+ if (src_idx < 4)
+ hw_desc->src[src_idx] = addr;
+ else
+ hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr;
+}
+
+static inline void
+iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
+{
+ struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
+ union {
+ u32 value;
+ struct iop3xx_dma_desc_ctrl field;
+ } u_desc_ctrl;
+
+ u_desc_ctrl.value = 0;
+ u_desc_ctrl.field.mem_to_mem_en = 1;
+ u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
+ u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
+ hw_desc->desc_ctrl = u_desc_ctrl.value;
+ hw_desc->upper_pci_src_addr = 0;
+ hw_desc->crc_addr = 0;
+}
+
+static inline void
+iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
+{
+ struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
+ union {
+ u32 value;
+ struct iop3xx_aau_desc_ctrl field;
+ } u_desc_ctrl;
+
+ u_desc_ctrl.value = 0;
+ u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
+ u_desc_ctrl.field.dest_write_en = 1;
+ u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
+ hw_desc->desc_ctrl = u_desc_ctrl.value;
+}
+
+static inline u32
+iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
+ unsigned long flags)
+{
+ int i, shift;
+ u32 edcr;
+ union {
+ u32 value;
+ struct iop3xx_aau_desc_ctrl field;
+ } u_desc_ctrl;
+
+ u_desc_ctrl.value = 0;
+ switch (src_cnt) {
+ case 25 ... 32:
+ u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
+ edcr = 0;
+ shift = 1;
+ for (i = 24; i < src_cnt; i++) {
+ edcr |= (1 << shift);
+ shift += 3;
+ }
+ hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
+ src_cnt = 24;
+ /* fall through */
+ case 17 ... 24:
+ if (!u_desc_ctrl.field.blk_ctrl) {
+ hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
+ u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
+ }
+ edcr = 0;
+ shift = 1;
+ for (i = 16; i < src_cnt; i++) {
+ edcr |= (1 << shift);
+ shift += 3;
+ }
+ hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
+ src_cnt = 16;
+ /* fall through */
+ case 9 ... 16:
+ if (!u_desc_ctrl.field.blk_ctrl)
+ u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
+ edcr = 0;
+ shift = 1;
+ for (i = 8; i < src_cnt; i++) {
+ edcr |= (1 << shift);
+ shift += 3;
+ }
+ hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
+ src_cnt = 8;
+ /* fall through */
+ case 2 ... 8:
+ shift = 1;
+ for (i = 0; i < src_cnt; i++) {
+ u_desc_ctrl.value |= (1 << shift);
+ shift += 3;
+ }
+
+ if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
+ u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
+ }
+
+ u_desc_ctrl.field.dest_write_en = 1;
+ u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
+ u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
+ hw_desc->desc_ctrl = u_desc_ctrl.value;
+
+ return u_desc_ctrl.value;
+}
+
+static inline void
+iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
+ unsigned long flags)
+{
+ iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags);
+}
+
+/* return the number of operations */
+static inline int
+iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
+ unsigned long flags)
+{
+ int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
+ struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
+ union {
+ u32 value;
+ struct iop3xx_aau_desc_ctrl field;
+ } u_desc_ctrl;
+ int i, j;
+
+ hw_desc = desc->hw_desc;
+
+ for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
+ i += slots_per_op, j++) {
+ iter = iop_hw_desc_slot_idx(hw_desc, i);
+ u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags);
+ u_desc_ctrl.field.dest_write_en = 0;
+ u_desc_ctrl.field.zero_result_en = 1;
+ u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
+ iter->desc_ctrl = u_desc_ctrl.value;
+
+ /* for the subsequent descriptors preserve the store queue
+ * and chain them together
+ */
+ if (i) {
+ prev_hw_desc =
+ iop_hw_desc_slot_idx(hw_desc, i - slots_per_op);
+ prev_hw_desc->next_desc =
+ (u32) (desc->async_tx.phys + (i << 5));
+ }
+ }
+
+ return j;
+}
+
+static inline void
+iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt,
+ unsigned long flags)
+{
+ struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
+ union {
+ u32 value;
+ struct iop3xx_aau_desc_ctrl field;
+ } u_desc_ctrl;
+
+ u_desc_ctrl.value = 0;
+ switch (src_cnt) {
+ case 25 ... 32:
+ u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
+ hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
+ /* fall through */
+ case 17 ... 24:
+ if (!u_desc_ctrl.field.blk_ctrl) {
+ hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
+ u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
+ }
+ hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
+ /* fall through */
+ case 9 ... 16:
+ if (!u_desc_ctrl.field.blk_ctrl)
+ u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
+ hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
+ /* fall through */
+ case 1 ... 8:
+ if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
+ u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
+ }
+
+ u_desc_ctrl.field.dest_write_en = 0;
+ u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
+ hw_desc->desc_ctrl = u_desc_ctrl.value;
+}
+
+static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
+ struct iop_adma_chan *chan,
+ u32 byte_count)
+{
+ union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+
+ switch (chan->device->id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ hw_desc.dma->byte_count = byte_count;
+ break;
+ case AAU_ID:
+ hw_desc.aau->byte_count = byte_count;
+ break;
+ default:
+ BUG();
+ }
+}
+
+static inline void
+iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
+ struct iop_adma_chan *chan)
+{
+ union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+
+ switch (chan->device->id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ iop_desc_init_memcpy(desc, 1);
+ hw_desc.dma->byte_count = 0;
+ hw_desc.dma->dest_addr = 0;
+ hw_desc.dma->src_addr = 0;
+ break;
+ case AAU_ID:
+ iop_desc_init_null_xor(desc, 2, 1);
+ hw_desc.aau->byte_count = 0;
+ hw_desc.aau->dest_addr = 0;
+ hw_desc.aau->src[0] = 0;
+ hw_desc.aau->src[1] = 0;
+ break;
+ default:
+ BUG();
+ }
+}
+
+static inline void
+iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
+{
+ int slots_per_op = desc->slots_per_op;
+ struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
+ int i = 0;
+
+ if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
+ hw_desc->byte_count = len;
+ } else {
+ do {
+ iter = iop_hw_desc_slot_idx(hw_desc, i);
+ iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
+ len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
+ i += slots_per_op;
+ } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
+
+ iter = iop_hw_desc_slot_idx(hw_desc, i);
+ iter->byte_count = len;
+ }
+}
+
+static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
+ struct iop_adma_chan *chan,
+ dma_addr_t addr)
+{
+ union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+
+ switch (chan->device->id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ hw_desc.dma->dest_addr = addr;
+ break;
+ case AAU_ID:
+ hw_desc.aau->dest_addr = addr;
+ break;
+ default:
+ BUG();
+ }
+}
+
+static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
+ dma_addr_t addr)
+{
+ struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
+ hw_desc->src_addr = addr;
+}
+
+static inline void
+iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
+ dma_addr_t addr)
+{
+
+ struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
+ int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
+ int i;
+
+ for (i = 0; (slot_cnt -= slots_per_op) >= 0;
+ i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
+ iter = iop_hw_desc_slot_idx(hw_desc, i);
+ iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
+ }
+}
+
+static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
+ int src_idx, dma_addr_t addr)
+{
+
+ struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
+ int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
+ int i;
+
+ for (i = 0; (slot_cnt -= slots_per_op) >= 0;
+ i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) {
+ iter = iop_hw_desc_slot_idx(hw_desc, i);
+ iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
+ }
+}
+
+static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
+ u32 next_desc_addr)
+{
+ /* hw_desc->next_desc is the same location for all channels */
+ union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+
+ iop_paranoia(hw_desc.dma->next_desc);
+ hw_desc.dma->next_desc = next_desc_addr;
+}
+
+static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
+{
+ /* hw_desc->next_desc is the same location for all channels */
+ union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+ return hw_desc.dma->next_desc;
+}
+
+static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
+{
+ /* hw_desc->next_desc is the same location for all channels */
+ union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
+ hw_desc.dma->next_desc = 0;
+}
+
+static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
+ u32 val)
+{
+ struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
+ hw_desc->src[0] = val;
+}
+
+static inline enum sum_check_flags
+iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
+{
+ struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
+ struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
+
+ iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
+ return desc_ctrl.zero_result_err << SUM_CHECK_P;
+}
+
+static inline void iop_chan_append(struct iop_adma_chan *chan)
+{
+ u32 dma_chan_ctrl;
+
+ dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
+ dma_chan_ctrl |= 0x2;
+ __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
+}
+
+static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
+{
+ return __raw_readl(DMA_CSR(chan));
+}
+
+static inline void iop_chan_disable(struct iop_adma_chan *chan)
+{
+ u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
+ dma_chan_ctrl &= ~1;
+ __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
+}
+
+static inline void iop_chan_enable(struct iop_adma_chan *chan)
+{
+ u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
+
+ dma_chan_ctrl |= 1;
+ __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
+}
+
+static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
+{
+ u32 status = __raw_readl(DMA_CSR(chan));
+ status &= (1 << 9);
+ __raw_writel(status, DMA_CSR(chan));
+}
+
+static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
+{
+ u32 status = __raw_readl(DMA_CSR(chan));
+ status &= (1 << 8);
+ __raw_writel(status, DMA_CSR(chan));
+}
+
+static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
+{
+ u32 status = __raw_readl(DMA_CSR(chan));
+
+ switch (chan->device->id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1);
+ break;
+ case AAU_ID:
+ status &= (1 << 5);
+ break;
+ default:
+ BUG();
+ }
+
+ __raw_writel(status, DMA_CSR(chan));
+}
+
+static inline int
+iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
+{
+ return 0;
+}
+
+static inline int
+iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
+{
+ return 0;
+}
+
+static inline int
+iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
+{
+ return 0;
+}
+
+static inline int
+iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
+{
+ return test_bit(5, &status);
+}
+
+static inline int
+iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
+{
+ switch (chan->device->id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ return test_bit(2, &status);
+ default:
+ return 0;
+ }
+}
+
+static inline int
+iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
+{
+ switch (chan->device->id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ return test_bit(3, &status);
+ default:
+ return 0;
+ }
+}
+
+static inline int
+iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
+{
+ switch (chan->device->id) {
+ case DMA0_ID:
+ case DMA1_ID:
+ return test_bit(1, &status);
+ default:
+ return 0;
+ }
+}
+#endif /* _ADMA_H */
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index 546995c20876..f40051d6aecb 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -547,10 +547,8 @@ static int mtk_uart_apdma_probe(struct platform_device *pdev)
vchan_init(&c->vc, &mtkd->ddev);
rc = platform_get_irq(pdev, i);
- if (rc < 0) {
- dev_err(&pdev->dev, "failed to get IRQ[%d]\n", i);
+ if (rc < 0)
goto err_no_dma;
- }
c->irq = rc;
}
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index fa5dab481203..e3850f04f676 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -33,7 +33,6 @@
#define MV_XOR_V2_DMA_IMSG_CDAT_OFF 0x014
#define MV_XOR_V2_DMA_IMSG_THRD_OFF 0x018
#define MV_XOR_V2_DMA_IMSG_THRD_MASK 0x7FFF
-#define MV_XOR_V2_DMA_IMSG_THRD_SHIFT 0x0
#define MV_XOR_V2_DMA_IMSG_TIMER_EN BIT(18)
#define MV_XOR_V2_DMA_DESQ_AWATTR_OFF 0x01C
/* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */
@@ -50,7 +49,6 @@
#define MV_XOR_V2_DMA_DESQ_ADD_OFF 0x808
#define MV_XOR_V2_DMA_IMSG_TMOT 0x810
#define MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK 0x1FFF
-#define MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT 0
/* XOR Global registers */
#define MV_XOR_V2_GLOB_BW_CTRL 0x4
@@ -261,16 +259,15 @@ void mv_xor_v2_enable_imsg_thrd(struct mv_xor_v2_device *xor_dev)
/* Configure threshold of number of descriptors, and enable timer */
reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
- reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
- reg |= (MV_XOR_V2_DONE_IMSG_THRD << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
+ reg &= ~MV_XOR_V2_DMA_IMSG_THRD_MASK;
+ reg |= MV_XOR_V2_DONE_IMSG_THRD;
reg |= MV_XOR_V2_DMA_IMSG_TIMER_EN;
writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
/* Configure Timer Threshold */
reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
- reg &= (~MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK <<
- MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT);
- reg |= (MV_XOR_V2_TIMER_THRD << MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT);
+ reg &= ~MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK;
+ reg |= MV_XOR_V2_TIMER_THRD;
writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
}
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 1163af2ba4a3..6cce9ef61b29 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1922,9 +1922,10 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330)
if (ret) {
dev_err(pl330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n",
__func__, __LINE__);
- dma_free_coherent(pl330->ddma.dev,
+ dma_free_attrs(pl330->ddma.dev,
chans * pl330->mcbufsz,
- pl330->mcode_cpu, pl330->mcode_bus);
+ pl330->mcode_cpu, pl330->mcode_bus,
+ DMA_ATTR_PRIVILEGED);
return ret;
}
@@ -2003,9 +2004,9 @@ static void pl330_del(struct pl330_dmac *pl330)
/* Free DMAC resources */
dmac_free_threads(pl330);
- dma_free_coherent(pl330->ddma.dev,
+ dma_free_attrs(pl330->ddma.dev,
pl330->pcfg.num_chan * pl330->mcbufsz, pl330->mcode_cpu,
- pl330->mcode_bus);
+ pl330->mcode_bus, DMA_ATTR_PRIVILEGED);
}
/* forward declaration */
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index 5bf8b145c427..bb4471e84e48 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -749,7 +749,6 @@ struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
if (!lldev->tre_ring)
return NULL;
- memset(lldev->tre_ring, 0, (HIDMA_TRE_SIZE + 1) * nr_tres);
lldev->tre_ring_size = HIDMA_TRE_SIZE * nr_tres;
lldev->nr_tres = nr_tres;
@@ -769,7 +768,6 @@ struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
if (!lldev->evre_ring)
return NULL;
- memset(lldev->evre_ring, 0, (HIDMA_EVRE_SIZE + 1) * nr_tres);
lldev->evre_ring_size = HIDMA_EVRE_SIZE * nr_tres;
/* the EVRE ring has to be EVRE_SIZE aligned */
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 3022d66e7a33..806ca02c52d7 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -183,7 +183,6 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "irq resources not found\n");
rc = irq;
goto out;
}
@@ -388,7 +387,6 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
ret = PTR_ERR(new_pdev);
goto out;
}
- of_node_get(child);
new_pdev->dev.of_node = child;
of_dma_configure(&new_pdev->dev, child, true);
/*
@@ -396,9 +394,14 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
* platforms with or without MSI support.
*/
of_msi_configure(&new_pdev->dev, child);
- of_node_put(child);
}
+
+ kfree(res);
+
+ return ret;
+
out:
+ of_node_put(child);
kfree(res);
return ret;
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index ad30f3d2c7f6..43da8eeb18ef 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -1237,11 +1237,8 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
phy->host = s3cdma;
phy->irq = platform_get_irq(pdev, i);
- if (phy->irq < 0) {
- dev_err(&pdev->dev, "failed to get irq %d, err %d\n",
- i, phy->irq);
+ if (phy->irq < 0)
continue;
- }
ret = devm_request_irq(&pdev->dev, phy->irq, s3c24xx_dma_irq,
0, pdev->name, phy);
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 9c41a4e42575..3993ab65c62c 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -192,6 +192,7 @@ struct rcar_dmac_chan {
* @iomem: remapped I/O memory base
* @n_channels: number of available channels
* @channels: array of DMAC channels
+ * @channels_mask: bitfield of which DMA channels are managed by this driver
* @modules: bitmask of client modules in use
*/
struct rcar_dmac {
@@ -202,6 +203,7 @@ struct rcar_dmac {
unsigned int n_channels;
struct rcar_dmac_chan *channels;
+ unsigned int channels_mask;
DECLARE_BITMAP(modules, 256);
};
@@ -438,7 +440,7 @@ static int rcar_dmac_init(struct rcar_dmac *dmac)
u16 dmaor;
/* Clear all channels and enable the DMAC globally. */
- rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0));
+ rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
rcar_dmac_write(dmac, RCAR_DMAOR,
RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
@@ -814,6 +816,9 @@ static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac)
for (i = 0; i < dmac->n_channels; ++i) {
struct rcar_dmac_chan *chan = &dmac->channels[i];
+ if (!(dmac->channels_mask & BIT(i)))
+ continue;
+
/* Stop and reinitialize the channel. */
spin_lock_irq(&chan->lock);
rcar_dmac_chan_halt(chan);
@@ -1744,10 +1749,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
/* Request the channel interrupt. */
sprintf(pdev_irqname, "ch%u", index);
rchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
- if (rchan->irq < 0) {
- dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
+ if (rchan->irq < 0)
return -ENODEV;
- }
irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
dev_name(dmac->dev), index);
@@ -1776,6 +1779,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
return 0;
}
+#define RCAR_DMAC_MAX_CHANNELS 32
+
static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
{
struct device_node *np = dev->of_node;
@@ -1787,12 +1792,16 @@ static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
return ret;
}
- if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
+ /* The hardware and driver don't support more than 32 bits in CHCLR */
+ if (dmac->n_channels <= 0 ||
+ dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) {
dev_err(dev, "invalid number of channels %u\n",
dmac->n_channels);
return -EINVAL;
}
+ dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0);
+
return 0;
}
@@ -1802,7 +1811,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
- unsigned int channels_offset = 0;
struct dma_device *engine;
struct rcar_dmac *dmac;
struct resource *mem;
@@ -1831,10 +1839,8 @@ static int rcar_dmac_probe(struct platform_device *pdev)
* level we can't disable it selectively, so ignore channel 0 for now if
* the device is part of an IOMMU group.
*/
- if (device_iommu_mapped(&pdev->dev)) {
- dmac->n_channels--;
- channels_offset = 1;
- }
+ if (device_iommu_mapped(&pdev->dev))
+ dmac->channels_mask &= ~BIT(0);
dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
sizeof(*dmac->channels), GFP_KERNEL);
@@ -1892,8 +1898,10 @@ static int rcar_dmac_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&engine->channels);
for (i = 0; i < dmac->n_channels; ++i) {
- ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i],
- i + channels_offset);
+ if (!(dmac->channels_mask & BIT(i)))
+ continue;
+
+ ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i);
if (ret < 0)
goto error;
}
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 17063aaf51bc..b218a013c260 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -717,10 +717,8 @@ static int usb_dmac_chan_probe(struct usb_dmac *dmac,
/* Request the channel interrupt. */
sprintf(pdev_irqname, "ch%u", index);
uchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
- if (uchan->irq < 0) {
- dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
+ if (uchan->irq < 0)
return -ENODEV;
- }
irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
dev_name(dmac->dev), index);
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index baac476c8622..525dc7338fe3 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -908,6 +908,7 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
struct dma_slave_config *slave_cfg = &schan->slave_cfg;
dma_addr_t src = 0, dst = 0;
+ dma_addr_t start_src = 0, start_dst = 0;
struct sprd_dma_desc *sdesc;
struct scatterlist *sg;
u32 len = 0;
@@ -954,6 +955,11 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
dst = sg_dma_address(sg);
}
+ if (!i) {
+ start_src = src;
+ start_dst = dst;
+ }
+
/*
* The link-list mode needs at least 2 link-list
* configurations. If there is only one sg, it doesn't
@@ -970,8 +976,8 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
}
- ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len,
- dir, flags, slave_cfg);
+ ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, start_src,
+ start_dst, len, dir, flags, slave_cfg);
if (ret) {
kfree(sdesc);
return NULL;
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
index a3ee0f6bb664..67087dbe2f9f 100644
--- a/drivers/dma/st_fdma.c
+++ b/drivers/dma/st_fdma.c
@@ -771,10 +771,8 @@ static int st_fdma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fdev);
fdev->irq = platform_get_irq(pdev, 0);
- if (fdev->irq < 0) {
- dev_err(&pdev->dev, "Failed to get irq resource\n");
+ if (fdev->irq < 0)
return -EINVAL;
- }
ret = devm_request_irq(&pdev->dev, fdev->irq, st_fdma_irq_handler, 0,
dev_name(&pdev->dev), fdev);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 89d710899010..de8bfd9a76e9 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -142,7 +142,7 @@ enum d40_events {
* when the DMA hw is powered off.
* TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
*/
-static u32 d40_backup_regs[] = {
+static __maybe_unused u32 d40_backup_regs[] = {
D40_DREG_LCPA,
D40_DREG_LCLA,
D40_DREG_PRMSE,
@@ -211,7 +211,7 @@ static u32 d40_backup_regs_v4b[] = {
#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
-static u32 d40_backup_regs_chan[] = {
+static __maybe_unused u32 d40_backup_regs_chan[] = {
D40_CHAN_REG_SSCFG,
D40_CHAN_REG_SSELT,
D40_CHAN_REG_SSPTR,
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index ef4d109e7189..5989b0893521 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -243,12 +243,6 @@ static void stm32_dma_write(struct stm32_dma_device *dmadev, u32 reg, u32 val)
writel_relaxed(val, dmadev->base + reg);
}
-static struct stm32_dma_desc *stm32_dma_alloc_desc(u32 num_sgs)
-{
- return kzalloc(sizeof(struct stm32_dma_desc) +
- sizeof(struct stm32_dma_sg_req) * num_sgs, GFP_NOWAIT);
-}
-
static int stm32_dma_get_width(struct stm32_dma_chan *chan,
enum dma_slave_buswidth width)
{
@@ -853,7 +847,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
return NULL;
}
- desc = stm32_dma_alloc_desc(sg_len);
+ desc = kzalloc(struct_size(desc, sg_req, sg_len), GFP_NOWAIT);
if (!desc)
return NULL;
@@ -954,7 +948,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
num_periods = buf_len / period_len;
- desc = stm32_dma_alloc_desc(num_periods);
+ desc = kzalloc(struct_size(desc, sg_req, num_periods), GFP_NOWAIT);
if (!desc)
return NULL;
@@ -989,7 +983,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
int i;
num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
- desc = stm32_dma_alloc_desc(num_sgs);
+ desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT);
if (!desc)
return NULL;
@@ -1366,12 +1360,8 @@ static int stm32_dma_probe(struct platform_device *pdev)
for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
chan = &dmadev->chan[i];
ret = platform_get_irq(pdev, i);
- if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "No irq resource for chan %d\n", i);
+ if (ret < 0)
goto err_unregister;
- }
chan->irq = ret;
ret = devm_request_irq(&pdev->dev, chan->irq,
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index b552949da14b..3c89bd39e096 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -185,8 +185,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
if (!node)
return -ENODEV;
- count = device_property_read_u32_array(&pdev->dev, "dma-masters",
- NULL, 0);
+ count = device_property_count_u32(&pdev->dev, "dma-masters");
if (count < 0) {
dev_err(&pdev->dev, "Can't get DMA master(s) node\n");
return -ENODEV;
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index d6e919d3936a..5838311cf990 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1366,7 +1366,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
chan = &dmadev->chan[id];
if (!chan) {
- dev_err(chan2dev(chan), "MDMA channel not initialized\n");
+ dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n");
goto exit;
}
@@ -1555,8 +1555,7 @@ static int stm32_mdma_probe(struct platform_device *pdev)
nr_requests);
}
- count = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
- NULL, 0);
+ count = device_property_count_u32(&pdev->dev, "st,ahb-addr-masks");
if (count < 0)
count = 0;
@@ -1638,10 +1637,8 @@ static int stm32_mdma_probe(struct platform_device *pdev)
}
dmadev->irq = platform_get_irq(pdev, 0);
- if (dmadev->irq < 0) {
- dev_err(&pdev->dev, "failed to get IRQ\n");
+ if (dmadev->irq < 0)
return dmadev->irq;
- }
ret = devm_request_irq(&pdev->dev, dmadev->irq, stm32_mdma_irq_handler,
0, dev_name(&pdev->dev), dmadev);
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index 1f80568b2613..e397a50058c8 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -1132,10 +1132,8 @@ static int sun4i_dma_probe(struct platform_device *pdev)
return PTR_ERR(priv->base);
priv->irq = platform_get_irq(pdev, 0);
- if (priv->irq < 0) {
- dev_err(&pdev->dev, "Cannot claim IRQ\n");
+ if (priv->irq < 0)
return priv->irq;
- }
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index ed5b68dcfe50..06cd7f867f7c 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -1251,10 +1251,8 @@ static int sun6i_dma_probe(struct platform_device *pdev)
return PTR_ERR(sdc->base);
sdc->irq = platform_get_irq(pdev, 0);
- if (sdc->irq < 0) {
- dev_err(&pdev->dev, "Cannot claim IRQ\n");
+ if (sdc->irq < 0)
return sdc->irq;
- }
sdc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(sdc->clk)) {
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 79e9593815f1..3a45079d11ec 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -152,6 +152,7 @@ struct tegra_dma_sg_req {
bool last_sg;
struct list_head node;
struct tegra_dma_desc *dma_desc;
+ unsigned int words_xferred;
};
/*
@@ -496,6 +497,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
nsg_req->configured = true;
+ nsg_req->words_xferred = 0;
tegra_dma_resume(tdc);
}
@@ -511,6 +513,7 @@ static void tdc_start_head_req(struct tegra_dma_channel *tdc)
typeof(*sg_req), node);
tegra_dma_start(tdc, sg_req);
sg_req->configured = true;
+ sg_req->words_xferred = 0;
tdc->busy = true;
}
@@ -638,6 +641,8 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
dma_desc->cb_count++;
+ sgreq->words_xferred = 0;
+
/* If not last req then put at end of pending list */
if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
list_move_tail(&sgreq->node, &tdc->pending_sg_req);
@@ -797,6 +802,65 @@ skip_dma_stop:
return 0;
}
+static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
+ struct tegra_dma_sg_req *sg_req)
+{
+ unsigned long status, wcount = 0;
+
+ if (!list_is_first(&sg_req->node, &tdc->pending_sg_req))
+ return 0;
+
+ if (tdc->tdma->chip_data->support_separate_wcount_reg)
+ wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
+
+ status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
+
+ if (!tdc->tdma->chip_data->support_separate_wcount_reg)
+ wcount = status;
+
+ if (status & TEGRA_APBDMA_STATUS_ISE_EOC)
+ return sg_req->req_len;
+
+ wcount = get_current_xferred_count(tdc, sg_req, wcount);
+
+ if (!wcount) {
+ /*
+ * If wcount wasn't ever polled for this SG before, then
+ * simply assume that transfer hasn't started yet.
+ *
+ * Otherwise it's the end of the transfer.
+ *
+ * The alternative would be to poll the status register
+ * until EOC bit is set or wcount goes UP. That's so
+ * because EOC bit is getting set only after the last
+ * burst's completion and counter is less than the actual
+ * transfer size by 4 bytes. The counter value wraps around
+ * in a cyclic mode before EOC is set(!), so we can't easily
+ * distinguish start of transfer from its end.
+ */
+ if (sg_req->words_xferred)
+ wcount = sg_req->req_len - 4;
+
+ } else if (wcount < sg_req->words_xferred) {
+ /*
+ * This case will never happen for a non-cyclic transfer.
+ *
+ * For a cyclic transfer, although it is possible for the
+ * next transfer to have already started (resetting the word
+ * count), this case should still not happen because we should
+ * have detected that the EOC bit is set and hence the transfer
+ * was completed.
+ */
+ WARN_ON_ONCE(1);
+
+ wcount = sg_req->req_len - 4;
+ } else {
+ sg_req->words_xferred = wcount;
+ }
+
+ return wcount;
+}
+
static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
@@ -806,6 +870,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
enum dma_status ret;
unsigned long flags;
unsigned int residual;
+ unsigned int bytes = 0;
ret = dma_cookie_status(dc, cookie, txstate);
if (ret == DMA_COMPLETE)
@@ -825,6 +890,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
dma_desc = sg_req->dma_desc;
if (dma_desc->txd.cookie == cookie) {
+ bytes = tegra_dma_sg_bytes_xferred(tdc, sg_req);
ret = dma_desc->dma_status;
goto found;
}
@@ -836,7 +902,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
found:
if (dma_desc && txstate) {
residual = dma_desc->bytes_requested -
- (dma_desc->bytes_transferred %
+ ((dma_desc->bytes_transferred + bytes) %
dma_desc->bytes_requested);
dma_set_residue(txstate, residual);
}
@@ -1441,12 +1507,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
- /*
- * XXX The hardware appears to support
- * DMA_RESIDUE_GRANULARITY_BURST-level reporting, but it's
- * only used by this driver during tegra_dma_terminate_all()
- */
- tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+ tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
tdma->dma_dev.device_config = tegra_dma_slave_config;
tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 2805853e963f..5f8adf5c1f20 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -42,12 +42,8 @@
#define ADMA_CH_CONFIG_MAX_BUFS 8
#define ADMA_CH_FIFO_CTRL 0x2c
-#define TEGRA210_ADMA_CH_FIFO_CTRL_OFLWTHRES(val) (((val) & 0xf) << 24)
-#define TEGRA210_ADMA_CH_FIFO_CTRL_STRVTHRES(val) (((val) & 0xf) << 16)
#define TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0xf) << 8)
#define TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0xf)
-#define TEGRA186_ADMA_CH_FIFO_CTRL_OFLWTHRES(val) (((val) & 0x1f) << 24)
-#define TEGRA186_ADMA_CH_FIFO_CTRL_STRVTHRES(val) (((val) & 0x1f) << 16)
#define TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0x1f) << 8)
#define TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0x1f)
@@ -64,14 +60,10 @@
#define TEGRA_ADMA_BURST_COMPLETE_TIME 20
-#define TEGRA210_FIFO_CTRL_DEFAULT (TEGRA210_ADMA_CH_FIFO_CTRL_OFLWTHRES(1) | \
- TEGRA210_ADMA_CH_FIFO_CTRL_STRVTHRES(1) | \
- TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
+#define TEGRA210_FIFO_CTRL_DEFAULT (TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(3))
-#define TEGRA186_FIFO_CTRL_DEFAULT (TEGRA186_ADMA_CH_FIFO_CTRL_OFLWTHRES(1) | \
- TEGRA186_ADMA_CH_FIFO_CTRL_STRVTHRES(1) | \
- TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
+#define TEGRA186_FIFO_CTRL_DEFAULT (TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(3))
#define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift)
@@ -712,7 +704,7 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
return chan;
}
-static int tegra_adma_runtime_suspend(struct device *dev)
+static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
{
struct tegra_adma *tdma = dev_get_drvdata(dev);
struct tegra_adma_chan_regs *ch_reg;
@@ -744,7 +736,7 @@ clk_disable:
return 0;
}
-static int tegra_adma_runtime_resume(struct device *dev)
+static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
{
struct tegra_adma *tdma = dev_get_drvdata(dev);
struct tegra_adma_chan_regs *ch_reg;
diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c
index ad2f0a4cd6a4..f255056696ee 100644
--- a/drivers/dma/ti/dma-crossbar.c
+++ b/drivers/dma/ti/dma-crossbar.c
@@ -391,8 +391,10 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
nelm * 2);
- if (ret)
+ if (ret) {
+ kfree(rsv_events);
return ret;
+ }
for (i = 0; i < nelm; i++) {
ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index ceabdea40ae0..ba7c4f07fcd6 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -15,7 +15,7 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
-#include <linux/edma.h>
+#include <linux/bitmap.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -133,6 +133,17 @@
#define EDMA_CONT_PARAMS_FIXED_EXACT 1002
#define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
+/*
+ * 64bit array registers are split into two 32bit registers:
+ * reg0: channel/event 0-31
+ * reg1: channel/event 32-63
+ *
+ * bit 5 in the channel number tells the array index (0/1)
+ * bit 0-4 (0x1f) is the bit offset within the register
+ */
+#define EDMA_REG_ARRAY_INDEX(channel) ((channel) >> 5)
+#define EDMA_CHANNEL_BIT(channel) (BIT((channel) & 0x1f))
+
/* PaRAM slots are laid out like this */
struct edmacc_param {
u32 opt;
@@ -169,6 +180,7 @@ struct edma_desc {
struct list_head node;
enum dma_transfer_direction direction;
int cyclic;
+ bool polled;
int absync;
int pset_nr;
struct edma_chan *echan;
@@ -412,12 +424,6 @@ static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
}
-static inline void edma_set_bits(int offset, int len, unsigned long *p)
-{
- for (; len > 0; len--)
- set_bit(offset + (len - 1), p);
-}
-
static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
int priority)
{
@@ -441,15 +447,14 @@ static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
{
struct edma_cc *ecc = echan->ecc;
int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
if (enable) {
- edma_shadow0_write_array(ecc, SH_ICR, channel >> 5,
- BIT(channel & 0x1f));
- edma_shadow0_write_array(ecc, SH_IESR, channel >> 5,
- BIT(channel & 0x1f));
+ edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit);
+ edma_shadow0_write_array(ecc, SH_IESR, idx, ch_bit);
} else {
- edma_shadow0_write_array(ecc, SH_IECR, channel >> 5,
- BIT(channel & 0x1f));
+ edma_shadow0_write_array(ecc, SH_IECR, idx, ch_bit);
}
}
@@ -587,26 +592,26 @@ static void edma_start(struct edma_chan *echan)
{
struct edma_cc *ecc = echan->ecc;
int channel = EDMA_CHAN_SLOT(echan->ch_num);
- int j = (channel >> 5);
- unsigned int mask = BIT(channel & 0x1f);
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
if (!echan->hw_triggered) {
/* EDMA channels without event association */
- dev_dbg(ecc->dev, "ESR%d %08x\n", j,
- edma_shadow0_read_array(ecc, SH_ESR, j));
- edma_shadow0_write_array(ecc, SH_ESR, j, mask);
+ dev_dbg(ecc->dev, "ESR%d %08x\n", idx,
+ edma_shadow0_read_array(ecc, SH_ESR, idx));
+ edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit);
} else {
/* EDMA channel with event association */
- dev_dbg(ecc->dev, "ER%d %08x\n", j,
- edma_shadow0_read_array(ecc, SH_ER, j));
+ dev_dbg(ecc->dev, "ER%d %08x\n", idx,
+ edma_shadow0_read_array(ecc, SH_ER, idx));
/* Clear any pending event or error */
- edma_write_array(ecc, EDMA_ECR, j, mask);
- edma_write_array(ecc, EDMA_EMCR, j, mask);
+ edma_write_array(ecc, EDMA_ECR, idx, ch_bit);
+ edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
/* Clear any SER */
- edma_shadow0_write_array(ecc, SH_SECR, j, mask);
- edma_shadow0_write_array(ecc, SH_EESR, j, mask);
- dev_dbg(ecc->dev, "EER%d %08x\n", j,
- edma_shadow0_read_array(ecc, SH_EER, j));
+ edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
+ edma_shadow0_write_array(ecc, SH_EESR, idx, ch_bit);
+ dev_dbg(ecc->dev, "EER%d %08x\n", idx,
+ edma_shadow0_read_array(ecc, SH_EER, idx));
}
}
@@ -614,19 +619,19 @@ static void edma_stop(struct edma_chan *echan)
{
struct edma_cc *ecc = echan->ecc;
int channel = EDMA_CHAN_SLOT(echan->ch_num);
- int j = (channel >> 5);
- unsigned int mask = BIT(channel & 0x1f);
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
- edma_shadow0_write_array(ecc, SH_EECR, j, mask);
- edma_shadow0_write_array(ecc, SH_ECR, j, mask);
- edma_shadow0_write_array(ecc, SH_SECR, j, mask);
- edma_write_array(ecc, EDMA_EMCR, j, mask);
+ edma_shadow0_write_array(ecc, SH_EECR, idx, ch_bit);
+ edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit);
+ edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
+ edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
/* clear possibly pending completion interrupt */
- edma_shadow0_write_array(ecc, SH_ICR, j, mask);
+ edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit);
- dev_dbg(ecc->dev, "EER%d %08x\n", j,
- edma_shadow0_read_array(ecc, SH_EER, j));
+ dev_dbg(ecc->dev, "EER%d %08x\n", idx,
+ edma_shadow0_read_array(ecc, SH_EER, idx));
/* REVISIT: consider guarding against inappropriate event
* chaining by overwriting with dummy_paramset.
@@ -640,45 +645,49 @@ static void edma_stop(struct edma_chan *echan)
static void edma_pause(struct edma_chan *echan)
{
int channel = EDMA_CHAN_SLOT(echan->ch_num);
- unsigned int mask = BIT(channel & 0x1f);
- edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask);
+ edma_shadow0_write_array(echan->ecc, SH_EECR,
+ EDMA_REG_ARRAY_INDEX(channel),
+ EDMA_CHANNEL_BIT(channel));
}
/* Re-enable EDMA hardware events on the specified channel. */
static void edma_resume(struct edma_chan *echan)
{
int channel = EDMA_CHAN_SLOT(echan->ch_num);
- unsigned int mask = BIT(channel & 0x1f);
- edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask);
+ edma_shadow0_write_array(echan->ecc, SH_EESR,
+ EDMA_REG_ARRAY_INDEX(channel),
+ EDMA_CHANNEL_BIT(channel));
}
static void edma_trigger_channel(struct edma_chan *echan)
{
struct edma_cc *ecc = echan->ecc;
int channel = EDMA_CHAN_SLOT(echan->ch_num);
- unsigned int mask = BIT(channel & 0x1f);
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
- edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask);
+ edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit);
- dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5),
- edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5)));
+ dev_dbg(ecc->dev, "ESR%d %08x\n", idx,
+ edma_shadow0_read_array(ecc, SH_ESR, idx));
}
static void edma_clean_channel(struct edma_chan *echan)
{
struct edma_cc *ecc = echan->ecc;
int channel = EDMA_CHAN_SLOT(echan->ch_num);
- int j = (channel >> 5);
- unsigned int mask = BIT(channel & 0x1f);
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
- dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j));
- edma_shadow0_write_array(ecc, SH_ECR, j, mask);
+ dev_dbg(ecc->dev, "EMR%d %08x\n", idx,
+ edma_read_array(ecc, EDMA_EMR, idx));
+ edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit);
/* Clear the corresponding EMR bits */
- edma_write_array(ecc, EDMA_EMCR, j, mask);
+ edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
/* Clear any SER */
- edma_shadow0_write_array(ecc, SH_SECR, j, mask);
+ edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
}
@@ -708,7 +717,8 @@ static int edma_alloc_channel(struct edma_chan *echan,
int channel = EDMA_CHAN_SLOT(echan->ch_num);
/* ensure access through shadow region 0 */
- edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
+ edma_or_array2(ecc, EDMA_DRAE, 0, EDMA_REG_ARRAY_INDEX(channel),
+ EDMA_CHANNEL_BIT(channel));
/* ensure no events are pending */
edma_stop(echan);
@@ -1011,6 +1021,7 @@ static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
src_cidx = cidx;
dst_bidx = acnt;
dst_cidx = cidx;
+ epset->addr = src_addr;
} else {
dev_err(dev, "%s: direction not implemented yet\n", __func__);
return -EINVAL;
@@ -1211,8 +1222,9 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
edesc->pset[0].param.opt |= ITCCHEN;
if (nslots == 1) {
- /* Enable transfer complete interrupt */
- edesc->pset[0].param.opt |= TCINTEN;
+ /* Enable transfer complete interrupt if requested */
+ if (tx_flags & DMA_PREP_INTERRUPT)
+ edesc->pset[0].param.opt |= TCINTEN;
} else {
/* Enable transfer complete chaining for the first slot */
edesc->pset[0].param.opt |= TCCHEN;
@@ -1239,9 +1251,14 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
}
edesc->pset[1].param.opt |= ITCCHEN;
- edesc->pset[1].param.opt |= TCINTEN;
+ /* Enable transfer complete interrupt if requested */
+ if (tx_flags & DMA_PREP_INTERRUPT)
+ edesc->pset[1].param.opt |= TCINTEN;
}
+ if (!(tx_flags & DMA_PREP_INTERRUPT))
+ edesc->polled = true;
+
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}
@@ -1721,7 +1738,11 @@ static u32 edma_residue(struct edma_desc *edesc)
int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
struct edma_chan *echan = edesc->echan;
struct edma_pset *pset = edesc->pset;
- dma_addr_t done, pos;
+ dma_addr_t done, pos, pos_old;
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ int idx = EDMA_REG_ARRAY_INDEX(channel);
+ int ch_bit = EDMA_CHANNEL_BIT(channel);
+ int event_reg;
int i;
/*
@@ -1734,16 +1755,20 @@ static u32 edma_residue(struct edma_desc *edesc)
* "pos" may represent a transfer request that is still being
* processed by the EDMACC or EDMATC. We will busy wait until
* any one of the situations occurs:
- * 1. the DMA hardware is idle
- * 2. a new transfer request is setup
+ * 1. while and event is pending for the channel
+ * 2. a position updated
* 3. we hit the loop limit
*/
- while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
- /* check if a new transfer request is setup */
- if (edma_get_position(echan->ecc,
- echan->slot[0], dst) != pos) {
+ if (is_slave_direction(edesc->direction))
+ event_reg = SH_ER;
+ else
+ event_reg = SH_ESR;
+
+ pos_old = pos;
+ while (edma_shadow0_read_array(echan->ecc, event_reg, idx) & ch_bit) {
+ pos = edma_get_position(echan->ecc, echan->slot[0], dst);
+ if (pos != pos_old)
break;
- }
if (!--loop_count) {
dev_dbg_ratelimited(echan->vchan.chan.device->dev,
@@ -1769,6 +1794,12 @@ static u32 edma_residue(struct edma_desc *edesc)
}
/*
+ * If the position is 0, then EDMA loaded the closing dummy slot, the
+ * transfer is completed
+ */
+ if (!pos)
+ return 0;
+ /*
* For SG operation we catch up with the last processed
* status.
*/
@@ -1796,19 +1827,46 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
struct dma_tx_state *txstate)
{
struct edma_chan *echan = to_edma_chan(chan);
- struct virt_dma_desc *vdesc;
+ struct dma_tx_state txstate_tmp;
enum dma_status ret;
unsigned long flags;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_COMPLETE || !txstate)
+
+ if (ret == DMA_COMPLETE)
return ret;
+ /* Provide a dummy dma_tx_state for completion checking */
+ if (!txstate)
+ txstate = &txstate_tmp;
+
spin_lock_irqsave(&echan->vchan.lock, flags);
- if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
+ if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
txstate->residue = edma_residue(echan->edesc);
- else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
- txstate->residue = to_edma_desc(&vdesc->tx)->residue;
+ } else {
+ struct virt_dma_desc *vdesc = vchan_find_desc(&echan->vchan,
+ cookie);
+
+ if (vdesc)
+ txstate->residue = to_edma_desc(&vdesc->tx)->residue;
+ else
+ txstate->residue = 0;
+ }
+
+ /*
+ * Mark the cookie completed if the residue is 0 for non cyclic
+ * transfers
+ */
+ if (ret != DMA_COMPLETE && !txstate->residue &&
+ echan->edesc && echan->edesc->polled &&
+ echan->edesc->vdesc.tx.cookie == cookie) {
+ edma_stop(echan);
+ vchan_cookie_complete(&echan->edesc->vdesc);
+ echan->edesc = NULL;
+ edma_execute(echan);
+ ret = DMA_COMPLETE;
+ }
+
spin_unlock_irqrestore(&echan->vchan.lock, flags);
return ret;
@@ -2185,11 +2243,13 @@ static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
}
#endif
+static bool edma_filter_fn(struct dma_chan *chan, void *param);
+
static int edma_probe(struct platform_device *pdev)
{
struct edma_soc_info *info = pdev->dev.platform_data;
s8 (*queue_priority_mapping)[2];
- int i, off, ln;
+ int i, off;
const s16 (*rsv_slots)[2];
const s16 (*xbar_chans)[2];
int irq;
@@ -2273,21 +2333,22 @@ static int edma_probe(struct platform_device *pdev)
ecc->default_queue = info->default_queue;
- for (i = 0; i < ecc->num_slots; i++)
- edma_write_slot(ecc, i, &dummy_paramset);
-
if (info->rsv) {
/* Set the reserved slots in inuse list */
rsv_slots = info->rsv->rsv_slots;
if (rsv_slots) {
- for (i = 0; rsv_slots[i][0] != -1; i++) {
- off = rsv_slots[i][0];
- ln = rsv_slots[i][1];
- edma_set_bits(off, ln, ecc->slot_inuse);
- }
+ for (i = 0; rsv_slots[i][0] != -1; i++)
+ bitmap_set(ecc->slot_inuse, rsv_slots[i][0],
+ rsv_slots[i][1]);
}
}
+ for (i = 0; i < ecc->num_slots; i++) {
+ /* Reset only unused - not reserved - paRAM slots */
+ if (!test_bit(i, ecc->slot_inuse))
+ edma_write_slot(ecc, i, &dummy_paramset);
+ }
+
/* Clear the xbar mapped channels in unused list */
xbar_chans = info->xbar_chans;
if (xbar_chans) {
@@ -2366,11 +2427,10 @@ static int edma_probe(struct platform_device *pdev)
edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
queue_priority_mapping[i][1]);
- for (i = 0; i < ecc->num_region; i++) {
- edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0);
- edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0);
- edma_write_array(ecc, EDMA_QRAE, i, 0x0);
- }
+ edma_write_array2(ecc, EDMA_DRAE, 0, 0, 0x0);
+ edma_write_array2(ecc, EDMA_DRAE, 0, 1, 0x0);
+ edma_write_array(ecc, EDMA_QRAE, 0, 0x0);
+
ecc->info = info;
/* Init the dma device and channels */
@@ -2482,8 +2542,9 @@ static int edma_pm_resume(struct device *dev)
for (i = 0; i < ecc->num_channels; i++) {
if (echan[i].alloced) {
/* ensure access through shadow region 0 */
- edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5,
- BIT(i & 0x1f));
+ edma_or_array2(ecc, EDMA_DRAE, 0,
+ EDMA_REG_ARRAY_INDEX(i),
+ EDMA_CHANNEL_BIT(i));
edma_setup_interrupt(&echan[i], true);
@@ -2524,7 +2585,7 @@ static struct platform_driver edma_tptc_driver = {
},
};
-bool edma_filter_fn(struct dma_chan *chan, void *param)
+static bool edma_filter_fn(struct dma_chan *chan, void *param)
{
bool match = false;
@@ -2539,7 +2600,6 @@ bool edma_filter_fn(struct dma_chan *chan, void *param)
}
return match;
}
-EXPORT_SYMBOL(edma_filter_fn);
static int edma_init(void)
{
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index ba2489d4ea24..6b6ba238b81a 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -91,6 +91,7 @@ struct omap_desc {
bool using_ll;
enum dma_transfer_direction dir;
dma_addr_t dev_addr;
+ bool polled;
int32_t fi; /* for OMAP_DMA_SYNC_PACKET / double indexing */
int16_t ei; /* for double indexing */
@@ -202,6 +203,7 @@ static const unsigned es_bytes[] = {
[CSDP_DATA_TYPE_32] = 4,
};
+static bool omap_dma_filter_fn(struct dma_chan *chan, void *param);
static struct of_dma_filter_info omap_dma_info = {
.filter_fn = omap_dma_filter_fn,
};
@@ -812,31 +814,22 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct omap_chan *c = to_omap_dma_chan(chan);
- struct virt_dma_desc *vd;
enum dma_status ret;
unsigned long flags;
+ struct omap_desc *d = NULL;
ret = dma_cookie_status(chan, cookie, txstate);
-
- if (!c->paused && c->running) {
- uint32_t ccr = omap_dma_chan_read(c, CCR);
- /*
- * The channel is no longer active, set the return value
- * accordingly
- */
- if (!(ccr & CCR_ENABLE))
- ret = DMA_COMPLETE;
- }
-
- if (ret == DMA_COMPLETE || !txstate)
+ if (ret == DMA_COMPLETE)
return ret;
spin_lock_irqsave(&c->vc.lock, flags);
- vd = vchan_find_desc(&c->vc, cookie);
- if (vd) {
- txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
- } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
- struct omap_desc *d = c->desc;
+ if (c->desc && c->desc->vd.tx.cookie == cookie)
+ d = c->desc;
+
+ if (!txstate)
+ goto out;
+
+ if (d) {
dma_addr_t pos;
if (d->dir == DMA_MEM_TO_DEV)
@@ -848,10 +841,31 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
txstate->residue = omap_dma_desc_size_pos(d, pos);
} else {
- txstate->residue = 0;
+ struct virt_dma_desc *vd = vchan_find_desc(&c->vc, cookie);
+
+ if (vd)
+ txstate->residue = omap_dma_desc_size(
+ to_omap_dma_desc(&vd->tx));
+ else
+ txstate->residue = 0;
}
- if (ret == DMA_IN_PROGRESS && c->paused)
+
+out:
+ if (ret == DMA_IN_PROGRESS && c->paused) {
ret = DMA_PAUSED;
+ } else if (d && d->polled && c->running) {
+ uint32_t ccr = omap_dma_chan_read(c, CCR);
+ /*
+ * The channel is no longer active, set the return value
+ * accordingly and mark it as completed
+ */
+ if (!(ccr & CCR_ENABLE)) {
+ ret = DMA_COMPLETE;
+ omap_dma_start_desc(c);
+ vchan_cookie_complete(&d->vd);
+ }
+ }
+
spin_unlock_irqrestore(&c->vc.lock, flags);
return ret;
@@ -1178,7 +1192,10 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
d->ccr = c->ccr;
d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
- d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
+ if (tx_flags & DMA_PREP_INTERRUPT)
+ d->cicr |= CICR_FRAME_IE;
+ else
+ d->polled = true;
d->csdp = data_type;
@@ -1234,7 +1251,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
if (src_icg) {
d->ccr |= CCR_SRC_AMODE_DBLIDX;
d->ei = 1;
- d->fi = src_icg;
+ d->fi = src_icg + 1;
} else if (xt->src_inc) {
d->ccr |= CCR_SRC_AMODE_POSTINC;
d->fi = 0;
@@ -1249,7 +1266,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
if (dst_icg) {
d->ccr |= CCR_DST_AMODE_DBLIDX;
sg->ei = 1;
- sg->fi = dst_icg;
+ sg->fi = dst_icg + 1;
} else if (xt->dst_inc) {
d->ccr |= CCR_DST_AMODE_POSTINC;
sg->fi = 0;
@@ -1540,8 +1557,10 @@ static int omap_dma_probe(struct platform_device *pdev)
rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
IRQF_SHARED, "omap-dma-engine", od);
- if (rc)
+ if (rc) {
+ omap_dma_free(od);
return rc;
+ }
}
if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
@@ -1637,7 +1656,7 @@ static struct platform_driver omap_dma_driver = {
},
};
-bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
+static bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
{
if (chan->device->dev->driver == &omap_dma_driver.driver) {
struct omap_dmadev *od = to_omap_dma_dev(chan->device);
@@ -1651,7 +1670,6 @@ bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
}
return false;
}
-EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
static int omap_dma_init(void)
{
diff --git a/drivers/dma/uniphier-mdmac.c b/drivers/dma/uniphier-mdmac.c
index ec65a7430dc4..fde54687856b 100644
--- a/drivers/dma/uniphier-mdmac.c
+++ b/drivers/dma/uniphier-mdmac.c
@@ -354,11 +354,8 @@ static int uniphier_mdmac_chan_init(struct platform_device *pdev,
int irq, ret;
irq = platform_get_irq(pdev, chan_id);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to get IRQ number for ch%d\n",
- chan_id);
+ if (irq < 0)
return irq;
- }
irq_name = devm_kasprintf(dev, GFP_KERNEL, "uniphier-mio-dmac-ch%d",
chan_id);
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 957c269ce1fd..cd60fa6d6750 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -1678,20 +1678,16 @@ static int xgene_dma_get_resources(struct platform_device *pdev,
/* Get DMA error interrupt */
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(&pdev->dev, "Failed to get Error IRQ\n");
+ if (irq <= 0)
return -ENXIO;
- }
pdma->err_irq = irq;
/* Get DMA Rx ring descriptor interrupts for all DMA channels */
for (i = 1; i <= XGENE_DMA_MAX_CHANNEL; i++) {
irq = platform_get_irq(pdev, i);
- if (irq <= 0) {
- dev_err(&pdev->dev, "Failed to get Rx IRQ\n");
+ if (irq <= 0)
return -ENXIO;
- }
pdma->chan[i - 1].rx_irq = irq;
}