aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig28
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/amba-pl08x.c8
-rw-r--r--drivers/dma/at_hdmac.c211
-rw-r--r--drivers/dma/at_hdmac_regs.h5
-rw-r--r--drivers/dma/coh901318.c43
-rw-r--r--drivers/dma/dma-jz4740.c617
-rw-r--r--drivers/dma/dmaengine.c7
-rw-r--r--drivers/dma/dw/Kconfig29
-rw-r--r--drivers/dma/dw/Makefile8
-rw-r--r--drivers/dma/dw/core.c (renamed from drivers/dma/dw_dmac.c)320
-rw-r--r--drivers/dma/dw/internal.h70
-rw-r--r--drivers/dma/dw/pci.c101
-rw-r--r--drivers/dma/dw/platform.c317
-rw-r--r--drivers/dma/dw/regs.h (renamed from drivers/dma/dw_dmac_regs.h)7
-rw-r--r--drivers/dma/edma.c2
-rw-r--r--drivers/dma/fsldma.c5
-rw-r--r--drivers/dma/imx-dma.c77
-rw-r--r--drivers/dma/imx-sdma.c40
-rw-r--r--drivers/dma/intel_mid_dma.c2
-rw-r--r--drivers/dma/ioat/dma.c3
-rw-r--r--drivers/dma/ioat/dma_v2.h1
-rw-r--r--drivers/dma/ioat/dma_v3.c114
-rw-r--r--drivers/dma/ioat/hw.h27
-rw-r--r--drivers/dma/iop-adma.c70
-rw-r--r--drivers/dma/mmp_tdma.c4
-rw-r--r--drivers/dma/mv_xor.c85
-rw-r--r--drivers/dma/mv_xor.h1
-rw-r--r--drivers/dma/mxs-dma.c2
-rw-r--r--drivers/dma/of-dma.c17
-rw-r--r--drivers/dma/pl330.c33
-rw-r--r--drivers/dma/ppc4xx/adma.c52
-rw-r--r--drivers/dma/sh/Makefile2
-rw-r--r--drivers/dma/sh/shdma-base.c26
-rw-r--r--drivers/dma/sh/shdma-of.c82
-rw-r--r--drivers/dma/sh/shdma.c33
-rw-r--r--drivers/dma/sirf-dma.c17
-rw-r--r--drivers/dma/ste_dma40.c533
-rw-r--r--drivers/dma/ste_dma40_ll.c189
-rw-r--r--drivers/dma/ste_dma40_ll.h3
-rw-r--r--drivers/dma/tegra20-apb-dma.c3
-rw-r--r--drivers/dma/timb_dma.c2
42 files changed, 2100 insertions, 1099 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index e9924898043a..6825957c97fb 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -79,25 +79,7 @@ config INTEL_IOP_ADMA
help
Enable support for the Intel(R) IOP Series RAID engines.
-config DW_DMAC
- tristate "Synopsys DesignWare AHB DMA support"
- depends on GENERIC_HARDIRQS
- select DMA_ENGINE
- default y if CPU_AT32AP7000
- help
- Support the Synopsys DesignWare AHB DMA controller. This
- can be integrated in chips such as the Atmel AT32ap7000.
-
-config DW_DMAC_BIG_ENDIAN_IO
- bool "Use big endian I/O register access"
- default y if AVR32
- depends on DW_DMAC
- help
- Say yes here to use big endian I/O access when reading and writing
- to the DMA controller registers. This is needed on some platforms,
- like the Atmel AVR32 architecture.
-
- If unsure, use the default setting.
+source "drivers/dma/dw/Kconfig"
config AT_HDMAC
tristate "Atmel AHB DMA support"
@@ -213,7 +195,7 @@ config SIRF_DMA
config TI_EDMA
tristate "TI EDMA support"
- depends on ARCH_DAVINCI
+ depends on ARCH_DAVINCI || ARCH_OMAP
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
default n
@@ -312,6 +294,12 @@ config MMP_PDMA
help
Support the MMP PDMA engine for PXA and MMP platfrom.
+config DMA_JZ4740
+ tristate "JZ4740 DMA support"
+ depends on MACH_JZ4740
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index a2b0df591f95..5e0f2ef85614 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -15,7 +15,7 @@ obj-$(CONFIG_FSL_DMA) += fsldma.o
obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_MV_XOR) += mv_xor.o
-obj-$(CONFIG_DW_DMAC) += dw_dmac.o
+obj-$(CONFIG_DW_DMAC_CORE) += dw/
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
@@ -38,3 +38,4 @@ obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
+obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 8bad254a498d..06fe45c74de5 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -299,8 +299,8 @@ static int pl08x_request_mux(struct pl08x_dma_chan *plchan)
const struct pl08x_platform_data *pd = plchan->host->pd;
int ret;
- if (plchan->mux_use++ == 0 && pd->get_signal) {
- ret = pd->get_signal(plchan->cd);
+ if (plchan->mux_use++ == 0 && pd->get_xfer_signal) {
+ ret = pd->get_xfer_signal(plchan->cd);
if (ret < 0) {
plchan->mux_use = 0;
return ret;
@@ -318,8 +318,8 @@ static void pl08x_release_mux(struct pl08x_dma_chan *plchan)
if (plchan->signal >= 0) {
WARN_ON(plchan->mux_use == 0);
- if (--plchan->mux_use == 0 && pd->put_signal) {
- pd->put_signal(plchan->cd, plchan->signal);
+ if (--plchan->mux_use == 0 && pd->put_xfer_signal) {
+ pd->put_xfer_signal(plchan->cd, plchan->signal);
plchan->signal = -1;
}
}
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index e923cda930f9..c787f38a186a 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -14,6 +14,7 @@
* found on AT91SAM9263.
*/
+#include <dt-bindings/dma/at91.h>
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
@@ -54,6 +55,7 @@ MODULE_PARM_DESC(init_nr_desc_per_channel,
/* prototypes */
static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
+static void atc_issue_pending(struct dma_chan *chan);
/*----------------------------------------------------------------------*/
@@ -230,6 +232,95 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
vdbg_dump_regs(atchan);
}
+/*
+ * atc_get_current_descriptors -
+ * locate the descriptor which equal to physical address in DSCR
+ * @atchan: the channel we want to start
+ * @dscr_addr: physical descriptor address in DSCR
+ */
+static struct at_desc *atc_get_current_descriptors(struct at_dma_chan *atchan,
+ u32 dscr_addr)
+{
+ struct at_desc *desc, *_desc, *child, *desc_cur = NULL;
+
+ list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
+ if (desc->lli.dscr == dscr_addr) {
+ desc_cur = desc;
+ break;
+ }
+
+ list_for_each_entry(child, &desc->tx_list, desc_node) {
+ if (child->lli.dscr == dscr_addr) {
+ desc_cur = child;
+ break;
+ }
+ }
+ }
+
+ return desc_cur;
+}
+
+/*
+ * atc_get_bytes_left -
+ * Get the number of bytes residue in dma buffer,
+ * @chan: the channel we want to start
+ */
+static int atc_get_bytes_left(struct dma_chan *chan)
+{
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ struct at_dma *atdma = to_at_dma(chan->device);
+ int chan_id = atchan->chan_common.chan_id;
+ struct at_desc *desc_first = atc_first_active(atchan);
+ struct at_desc *desc_cur;
+ int ret = 0, count = 0;
+
+ /*
+ * Initialize necessary values in the first time.
+ * remain_desc record remain desc length.
+ */
+ if (atchan->remain_desc == 0)
+ /* First descriptor embedds the transaction length */
+ atchan->remain_desc = desc_first->len;
+
+ /*
+ * This happens when current descriptor transfer complete.
+ * The residual buffer size should reduce current descriptor length.
+ */
+ if (unlikely(test_bit(ATC_IS_BTC, &atchan->status))) {
+ clear_bit(ATC_IS_BTC, &atchan->status);
+ desc_cur = atc_get_current_descriptors(atchan,
+ channel_readl(atchan, DSCR));
+ if (!desc_cur) {
+ ret = -EINVAL;
+ goto out;
+ }
+ atchan->remain_desc -= (desc_cur->lli.ctrla & ATC_BTSIZE_MAX)
+ << (desc_first->tx_width);
+ if (atchan->remain_desc < 0) {
+ ret = -EINVAL;
+ goto out;
+ } else {
+ ret = atchan->remain_desc;
+ }
+ } else {
+ /*
+ * Get residual bytes when current
+ * descriptor transfer in progress.
+ */
+ count = (channel_readl(atchan, CTRLA) & ATC_BTSIZE_MAX)
+ << (desc_first->tx_width);
+ ret = atchan->remain_desc - count;
+ }
+ /*
+ * Check fifo empty.
+ */
+ if (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id)))
+ atc_issue_pending(chan);
+
+out:
+ return ret;
+}
+
/**
* atc_chain_complete - finish work for one transaction chain
* @atchan: channel we work on
@@ -327,37 +418,6 @@ static void atc_complete_all(struct at_dma_chan *atchan)
}
/**
- * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
- * @atchan: channel to be cleaned up
- *
- * Called with atchan->lock held and bh disabled
- */
-static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
-{
- struct at_desc *desc, *_desc;
- struct at_desc *child;
-
- dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
-
- list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
- if (!(desc->lli.ctrla & ATC_DONE))
- /* This one is currently in progress */
- return;
-
- list_for_each_entry(child, &desc->tx_list, desc_node)
- if (!(child->lli.ctrla & ATC_DONE))
- /* Currently in progress */
- return;
-
- /*
- * No descriptors so far seem to be in progress, i.e.
- * this chain must be done.
- */
- atc_chain_complete(atchan, desc);
- }
-}
-
-/**
* atc_advance_work - at the end of a transaction, move forward
* @atchan: channel where the transaction ended
*
@@ -496,6 +556,8 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
/* Give information to tasklet */
set_bit(ATC_IS_ERROR, &atchan->status);
}
+ if (pending & AT_DMA_BTC(i))
+ set_bit(ATC_IS_BTC, &atchan->status);
tasklet_schedule(&atchan->tasklet);
ret = IRQ_HANDLED;
}
@@ -615,6 +677,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
/* First descriptor of the chain embedds additional information */
first->txd.cookie = -EBUSY;
first->len = len;
+ first->tx_width = src_width;
/* set end-of-link to the last link descriptor of list*/
set_desc_eol(desc);
@@ -761,6 +824,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
/* First descriptor of the chain embedds additional information */
first->txd.cookie = -EBUSY;
first->len = total_len;
+ first->tx_width = reg_width;
/* first link descriptor of list is responsible of flags */
first->txd.flags = flags; /* client is in control of this ack */
@@ -919,6 +983,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
/* First descriptor of the chain embedds additional information */
first->txd.cookie = -EBUSY;
first->len = buf_len;
+ first->tx_width = reg_width;
return &first->txd;
@@ -1032,34 +1097,36 @@ atc_tx_status(struct dma_chan *chan,
struct dma_tx_state *txstate)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
- dma_cookie_t last_used;
- dma_cookie_t last_complete;
unsigned long flags;
enum dma_status ret;
-
- spin_lock_irqsave(&atchan->lock, flags);
+ int bytes = 0;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_SUCCESS) {
- atc_cleanup_descriptors(atchan);
+ if (ret == DMA_SUCCESS)
+ return ret;
+ /*
+ * There's no point calculating the residue if there's
+ * no txstate to store the value.
+ */
+ if (!txstate)
+ return DMA_ERROR;
- ret = dma_cookie_status(chan, cookie, txstate);
- }
+ spin_lock_irqsave(&atchan->lock, flags);
- last_complete = chan->completed_cookie;
- last_used = chan->cookie;
+ /* Get number of bytes left in the active transactions */
+ bytes = atc_get_bytes_left(chan);
spin_unlock_irqrestore(&atchan->lock, flags);
- if (ret != DMA_SUCCESS)
- dma_set_residue(txstate, atc_first_active(atchan)->len);
-
- if (atc_chan_is_paused(atchan))
- ret = DMA_PAUSED;
+ if (unlikely(bytes < 0)) {
+ dev_vdbg(chan2dev(chan), "get residual bytes error\n");
+ return DMA_ERROR;
+ } else {
+ dma_set_residue(txstate, bytes);
+ }
- dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
- ret, cookie, last_complete ? last_complete : 0,
- last_used ? last_used : 0);
+ dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
+ ret, cookie, bytes);
return ret;
}
@@ -1120,7 +1187,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
*/
BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
- /* if cfg configuration specified take it instad of default */
+ /* if cfg configuration specified take it instead of default */
if (atslave->cfg)
cfg = atslave->cfg;
}
@@ -1143,6 +1210,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&atchan->lock, flags);
atchan->descs_allocated = i;
+ atchan->remain_desc = 0;
list_splice(&tmp_list, &atchan->free_list);
dma_cookie_init(chan);
spin_unlock_irqrestore(&atchan->lock, flags);
@@ -1185,6 +1253,7 @@ static void atc_free_chan_resources(struct dma_chan *chan)
list_splice_init(&atchan->free_list, &list);
atchan->descs_allocated = 0;
atchan->status = 0;
+ atchan->remain_desc = 0;
dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
}
@@ -1223,14 +1292,31 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
if (!atslave)
return NULL;
+
+ atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
/*
* We can fill both SRC_PER and DST_PER, one of these fields will be
* ignored depending on DMA transfer direction.
*/
- per_id = dma_spec->args[1];
- atslave->cfg = ATC_FIFOCFG_HALFFIFO | ATC_DST_H2SEL_HW
- | ATC_SRC_H2SEL_HW | ATC_DST_PER(per_id)
- | ATC_SRC_PER(per_id);
+ per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
+ atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
+ | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
+ /*
+ * We have to translate the value we get from the device tree since
+ * the half FIFO configuration value had to be 0 to keep backward
+ * compatibility.
+ */
+ switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
+ case AT91_DMA_CFG_FIFOCFG_ALAP:
+ atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
+ break;
+ case AT91_DMA_CFG_FIFOCFG_ASAP:
+ atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
+ break;
+ case AT91_DMA_CFG_FIFOCFG_HALF:
+ default:
+ atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
+ }
atslave->dma_dev = &dmac_pdev->dev;
chan = dma_request_channel(mask, at_dma_filter, atslave);
@@ -1374,7 +1460,9 @@ static int __init at_dma_probe(struct platform_device *pdev)
err = PTR_ERR(atdma->clk);
goto err_clk;
}
- clk_enable(atdma->clk);
+ err = clk_prepare_enable(atdma->clk);
+ if (err)
+ goto err_clk_prepare;
/* force dma off, just in case */
at_dma_off(atdma);
@@ -1472,10 +1560,10 @@ err_of_dma_controller_register:
dma_async_device_unregister(&atdma->dma_common);
dma_pool_destroy(atdma->dma_desc_pool);
err_pool_create:
- platform_set_drvdata(pdev, NULL);
free_irq(platform_get_irq(pdev, 0), atdma);
err_irq:
- clk_disable(atdma->clk);
+ clk_disable_unprepare(atdma->clk);
+err_clk_prepare:
clk_put(atdma->clk);
err_clk:
iounmap(atdma->regs);
@@ -1497,7 +1585,6 @@ static int at_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&atdma->dma_common);
dma_pool_destroy(atdma->dma_desc_pool);
- platform_set_drvdata(pdev, NULL);
free_irq(platform_get_irq(pdev, 0), atdma);
list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
@@ -1512,7 +1599,7 @@ static int at_dma_remove(struct platform_device *pdev)
list_del(&chan->device_node);
}
- clk_disable(atdma->clk);
+ clk_disable_unprepare(atdma->clk);
clk_put(atdma->clk);
iounmap(atdma->regs);
@@ -1531,7 +1618,7 @@ static void at_dma_shutdown(struct platform_device *pdev)
struct at_dma *atdma = platform_get_drvdata(pdev);
at_dma_off(platform_get_drvdata(pdev));
- clk_disable(atdma->clk);
+ clk_disable_unprepare(atdma->clk);
}
static int at_dma_prepare(struct device *dev)
@@ -1588,7 +1675,7 @@ static int at_dma_suspend_noirq(struct device *dev)
/* disable DMA controller */
at_dma_off(atdma);
- clk_disable(atdma->clk);
+ clk_disable_unprepare(atdma->clk);
return 0;
}
@@ -1618,7 +1705,7 @@ static int at_dma_resume_noirq(struct device *dev)
struct dma_chan *chan, *_chan;
/* bring back DMA controller */
- clk_enable(atdma->clk);
+ clk_prepare_enable(atdma->clk);
dma_writel(atdma, EN, AT_DMA_ENABLE);
/* clear any pending interrupt */
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index c604d26fd4d3..f31d647acdfa 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -182,6 +182,7 @@ struct at_lli {
* @txd: support for the async_tx api
* @desc_node: node on the channed descriptors list
* @len: total transaction bytecount
+ * @tx_width: transfer width
*/
struct at_desc {
/* FIRST values the hardware uses */
@@ -192,6 +193,7 @@ struct at_desc {
struct dma_async_tx_descriptor txd;
struct list_head desc_node;
size_t len;
+ u32 tx_width;
};
static inline struct at_desc *
@@ -211,6 +213,7 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd)
enum atc_status {
ATC_IS_ERROR = 0,
ATC_IS_PAUSED = 1,
+ ATC_IS_BTC = 2,
ATC_IS_CYCLIC = 24,
};
@@ -228,6 +231,7 @@ enum atc_status {
* @save_cfg: configuration register that is saved on suspend/resume cycle
* @save_dscr: for cyclic operations, preserve next descriptor address in
* the cyclic list on suspend/resume cycle
+ * @remain_desc: to save remain desc length
* @dma_sconfig: configuration for slave transfers, passed via DMA_SLAVE_CONFIG
* @lock: serializes enqueue/dequeue operations to descriptors lists
* @active_list: list of descriptors dmaengine is being running on
@@ -246,6 +250,7 @@ struct at_dma_chan {
struct tasklet_struct tasklet;
u32 save_cfg;
u32 save_dscr;
+ u32 remain_desc;
struct dma_slave_config dma_sconfig;
spinlock_t lock;
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 3b23061cdb41..9bfaddd57ef1 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -22,6 +22,7 @@
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/platform_data/dma-coh901318.h>
+#include <linux/of_dma.h>
#include "coh901318.h"
#include "dmaengine.h"
@@ -1788,6 +1789,35 @@ bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
}
EXPORT_SYMBOL(coh901318_filter_id);
+struct coh901318_filter_args {
+ struct coh901318_base *base;
+ unsigned int ch_nr;
+};
+
+static bool coh901318_filter_base_and_id(struct dma_chan *chan, void *data)
+{
+ struct coh901318_filter_args *args = data;
+
+ if (&args->base->dma_slave == chan->device &&
+ args->ch_nr == to_coh901318_chan(chan)->id)
+ return true;
+
+ return false;
+}
+
+static struct dma_chan *coh901318_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct coh901318_filter_args args = {
+ .base = ofdma->of_dma_data,
+ .ch_nr = dma_spec->args[0],
+ };
+ dma_cap_mask_t cap;
+ dma_cap_zero(cap);
+ dma_cap_set(DMA_SLAVE, cap);
+
+ return dma_request_channel(cap, coh901318_filter_base_and_id, &args);
+}
/*
* DMA channel allocation
*/
@@ -2735,12 +2765,19 @@ static int __init coh901318_probe(struct platform_device *pdev)
if (err)
goto err_register_memcpy;
+ err = of_dma_controller_register(pdev->dev.of_node, coh901318_xlate,
+ base);
+ if (err)
+ goto err_register_of_dma;
+
platform_set_drvdata(pdev, base);
dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
(u32) base->virtbase);
return err;
+ err_register_of_dma:
+ dma_async_device_unregister(&base->dma_memcpy);
err_register_memcpy:
dma_async_device_unregister(&base->dma_slave);
err_register_slave:
@@ -2752,17 +2789,23 @@ static int coh901318_remove(struct platform_device *pdev)
{
struct coh901318_base *base = platform_get_drvdata(pdev);
+ of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&base->dma_memcpy);
dma_async_device_unregister(&base->dma_slave);
coh901318_pool_destroy(&base->pool);
return 0;
}
+static const struct of_device_id coh901318_dt_match[] = {
+ { .compatible = "stericsson,coh901318" },
+ {},
+};
static struct platform_driver coh901318_driver = {
.remove = coh901318_remove,
.driver = {
.name = "coh901318",
+ .of_match_table = coh901318_dt_match,
},
};
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
new file mode 100644
index 000000000000..b0c0c8268d42
--- /dev/null
+++ b/drivers/dma/dma-jz4740.c
@@ -0,0 +1,617 @@
+/*
+ * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
+ * JZ4740 DMAC support
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+
+#include <asm/mach-jz4740/dma.h>
+
+#include "virt-dma.h"
+
+#define JZ_DMA_NR_CHANS 6
+
+#define JZ_REG_DMA_SRC_ADDR(x) (0x00 + (x) * 0x20)
+#define JZ_REG_DMA_DST_ADDR(x) (0x04 + (x) * 0x20)
+#define JZ_REG_DMA_TRANSFER_COUNT(x) (0x08 + (x) * 0x20)
+#define JZ_REG_DMA_REQ_TYPE(x) (0x0C + (x) * 0x20)
+#define JZ_REG_DMA_STATUS_CTRL(x) (0x10 + (x) * 0x20)
+#define JZ_REG_DMA_CMD(x) (0x14 + (x) * 0x20)
+#define JZ_REG_DMA_DESC_ADDR(x) (0x18 + (x) * 0x20)
+
+#define JZ_REG_DMA_CTRL 0x300
+#define JZ_REG_DMA_IRQ 0x304
+#define JZ_REG_DMA_DOORBELL 0x308
+#define JZ_REG_DMA_DOORBELL_SET 0x30C
+
+#define JZ_DMA_STATUS_CTRL_NO_DESC BIT(31)
+#define JZ_DMA_STATUS_CTRL_DESC_INV BIT(6)
+#define JZ_DMA_STATUS_CTRL_ADDR_ERR BIT(4)
+#define JZ_DMA_STATUS_CTRL_TRANSFER_DONE BIT(3)
+#define JZ_DMA_STATUS_CTRL_HALT BIT(2)
+#define JZ_DMA_STATUS_CTRL_COUNT_TERMINATE BIT(1)
+#define JZ_DMA_STATUS_CTRL_ENABLE BIT(0)
+
+#define JZ_DMA_CMD_SRC_INC BIT(23)
+#define JZ_DMA_CMD_DST_INC BIT(22)
+#define JZ_DMA_CMD_RDIL_MASK (0xf << 16)
+#define JZ_DMA_CMD_SRC_WIDTH_MASK (0x3 << 14)
+#define JZ_DMA_CMD_DST_WIDTH_MASK (0x3 << 12)
+#define JZ_DMA_CMD_INTERVAL_LENGTH_MASK (0x7 << 8)
+#define JZ_DMA_CMD_BLOCK_MODE BIT(7)
+#define JZ_DMA_CMD_DESC_VALID BIT(4)
+#define JZ_DMA_CMD_DESC_VALID_MODE BIT(3)
+#define JZ_DMA_CMD_VALID_IRQ_ENABLE BIT(2)
+#define JZ_DMA_CMD_TRANSFER_IRQ_ENABLE BIT(1)
+#define JZ_DMA_CMD_LINK_ENABLE BIT(0)
+
+#define JZ_DMA_CMD_FLAGS_OFFSET 22
+#define JZ_DMA_CMD_RDIL_OFFSET 16
+#define JZ_DMA_CMD_SRC_WIDTH_OFFSET 14
+#define JZ_DMA_CMD_DST_WIDTH_OFFSET 12
+#define JZ_DMA_CMD_TRANSFER_SIZE_OFFSET 8
+#define JZ_DMA_CMD_MODE_OFFSET 7
+
+#define JZ_DMA_CTRL_PRIORITY_MASK (0x3 << 8)
+#define JZ_DMA_CTRL_HALT BIT(3)
+#define JZ_DMA_CTRL_ADDRESS_ERROR BIT(2)
+#define JZ_DMA_CTRL_ENABLE BIT(0)
+
+enum jz4740_dma_width {
+ JZ4740_DMA_WIDTH_32BIT = 0,
+ JZ4740_DMA_WIDTH_8BIT = 1,
+ JZ4740_DMA_WIDTH_16BIT = 2,
+};
+
+enum jz4740_dma_transfer_size {
+ JZ4740_DMA_TRANSFER_SIZE_4BYTE = 0,
+ JZ4740_DMA_TRANSFER_SIZE_1BYTE = 1,
+ JZ4740_DMA_TRANSFER_SIZE_2BYTE = 2,
+ JZ4740_DMA_TRANSFER_SIZE_16BYTE = 3,
+ JZ4740_DMA_TRANSFER_SIZE_32BYTE = 4,
+};
+
+enum jz4740_dma_flags {
+ JZ4740_DMA_SRC_AUTOINC = 0x2,
+ JZ4740_DMA_DST_AUTOINC = 0x1,
+};
+
+enum jz4740_dma_mode {
+ JZ4740_DMA_MODE_SINGLE = 0,
+ JZ4740_DMA_MODE_BLOCK = 1,
+};
+
+struct jz4740_dma_sg {
+ dma_addr_t addr;
+ unsigned int len;
+};
+
+struct jz4740_dma_desc {
+ struct virt_dma_desc vdesc;
+
+ enum dma_transfer_direction direction;
+ bool cyclic;
+
+ unsigned int num_sgs;
+ struct jz4740_dma_sg sg[];
+};
+
+struct jz4740_dmaengine_chan {
+ struct virt_dma_chan vchan;
+ unsigned int id;
+
+ dma_addr_t fifo_addr;
+ unsigned int transfer_shift;
+
+ struct jz4740_dma_desc *desc;
+ unsigned int next_sg;
+};
+
+struct jz4740_dma_dev {
+ struct dma_device ddev;
+ void __iomem *base;
+ struct clk *clk;
+
+ struct jz4740_dmaengine_chan chan[JZ_DMA_NR_CHANS];
+};
+
+static struct jz4740_dma_dev *jz4740_dma_chan_get_dev(
+ struct jz4740_dmaengine_chan *chan)
+{
+ return container_of(chan->vchan.chan.device, struct jz4740_dma_dev,
+ ddev);
+}
+
+static struct jz4740_dmaengine_chan *to_jz4740_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct jz4740_dmaengine_chan, vchan.chan);
+}
+
+static struct jz4740_dma_desc *to_jz4740_dma_desc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct jz4740_dma_desc, vdesc);
+}
+
+static inline uint32_t jz4740_dma_read(struct jz4740_dma_dev *dmadev,
+ unsigned int reg)
+{
+ return readl(dmadev->base + reg);
+}
+
+static inline void jz4740_dma_write(struct jz4740_dma_dev *dmadev,
+ unsigned reg, uint32_t val)
+{
+ writel(val, dmadev->base + reg);
+}
+
+static inline void jz4740_dma_write_mask(struct jz4740_dma_dev *dmadev,
+ unsigned int reg, uint32_t val, uint32_t mask)
+{
+ uint32_t tmp;
+
+ tmp = jz4740_dma_read(dmadev, reg);
+ tmp &= ~mask;
+ tmp |= val;
+ jz4740_dma_write(dmadev, reg, tmp);
+}
+
+static struct jz4740_dma_desc *jz4740_dma_alloc_desc(unsigned int num_sgs)
+{
+ return kzalloc(sizeof(struct jz4740_dma_desc) +
+ sizeof(struct jz4740_dma_sg) * num_sgs, GFP_ATOMIC);
+}
+
+static enum jz4740_dma_width jz4740_dma_width(enum dma_slave_buswidth width)
+{
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ return JZ4740_DMA_WIDTH_8BIT;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ return JZ4740_DMA_WIDTH_16BIT;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ return JZ4740_DMA_WIDTH_32BIT;
+ default:
+ return JZ4740_DMA_WIDTH_32BIT;
+ }
+}
+
+static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst)
+{
+ if (maxburst <= 1)
+ return JZ4740_DMA_TRANSFER_SIZE_1BYTE;
+ else if (maxburst <= 3)
+ return JZ4740_DMA_TRANSFER_SIZE_2BYTE;
+ else if (maxburst <= 15)
+ return JZ4740_DMA_TRANSFER_SIZE_4BYTE;
+ else if (maxburst <= 31)
+ return JZ4740_DMA_TRANSFER_SIZE_16BYTE;
+
+ return JZ4740_DMA_TRANSFER_SIZE_32BYTE;
+}
+
+static int jz4740_dma_slave_config(struct dma_chan *c,
+ const struct dma_slave_config *config)
+{
+ struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
+ struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
+ enum jz4740_dma_width src_width;
+ enum jz4740_dma_width dst_width;
+ enum jz4740_dma_transfer_size transfer_size;
+ enum jz4740_dma_flags flags;
+ uint32_t cmd;
+
+ switch (config->direction) {
+ case DMA_MEM_TO_DEV:
+ flags = JZ4740_DMA_SRC_AUTOINC;
+ transfer_size = jz4740_dma_maxburst(config->dst_maxburst);
+ chan->fifo_addr = config->dst_addr;
+ break;
+ case DMA_DEV_TO_MEM:
+ flags = JZ4740_DMA_DST_AUTOINC;
+ transfer_size = jz4740_dma_maxburst(config->src_maxburst);
+ chan->fifo_addr = config->src_addr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ src_width = jz4740_dma_width(config->src_addr_width);
+ dst_width = jz4740_dma_width(config->dst_addr_width);
+
+ switch (transfer_size) {
+ case JZ4740_DMA_TRANSFER_SIZE_2BYTE:
+ chan->transfer_shift = 1;
+ break;
+ case JZ4740_DMA_TRANSFER_SIZE_4BYTE:
+ chan->transfer_shift = 2;
+ break;
+ case JZ4740_DMA_TRANSFER_SIZE_16BYTE:
+ chan->transfer_shift = 4;
+ break;
+ case JZ4740_DMA_TRANSFER_SIZE_32BYTE:
+ chan->transfer_shift = 5;
+ break;
+ default:
+ chan->transfer_shift = 0;
+ break;
+ }
+
+ cmd = flags << JZ_DMA_CMD_FLAGS_OFFSET;
+ cmd |= src_width << JZ_DMA_CMD_SRC_WIDTH_OFFSET;
+ cmd |= dst_width << JZ_DMA_CMD_DST_WIDTH_OFFSET;
+ cmd |= transfer_size << JZ_DMA_CMD_TRANSFER_SIZE_OFFSET;
+ cmd |= JZ4740_DMA_MODE_SINGLE << JZ_DMA_CMD_MODE_OFFSET;
+ cmd |= JZ_DMA_CMD_TRANSFER_IRQ_ENABLE;
+
+ jz4740_dma_write(dmadev, JZ_REG_DMA_CMD(chan->id), cmd);
+ jz4740_dma_write(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0);
+ jz4740_dma_write(dmadev, JZ_REG_DMA_REQ_TYPE(chan->id),
+ config->slave_id);
+
+ return 0;
+}
+
+static int jz4740_dma_terminate_all(struct dma_chan *c)
+{
+ struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
+ struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0,
+ JZ_DMA_STATUS_CTRL_ENABLE);
+ chan->desc = NULL;
+ vchan_get_all_descriptors(&chan->vchan, &head);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&chan->vchan, &head);
+
+ return 0;
+}
+
+static int jz4740_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct dma_slave_config *config = (struct dma_slave_config *)arg;
+
+ switch (cmd) {
+ case DMA_SLAVE_CONFIG:
+ return jz4740_dma_slave_config(chan, config);
+ case DMA_TERMINATE_ALL:
+ return jz4740_dma_terminate_all(chan);
+ default:
+ return -ENOSYS;
+ }
+}
+
+static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan)
+{
+ struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
+ dma_addr_t src_addr, dst_addr;
+ struct virt_dma_desc *vdesc;
+ struct jz4740_dma_sg *sg;
+
+ jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0,
+ JZ_DMA_STATUS_CTRL_ENABLE);
+
+ if (!chan->desc) {
+ vdesc = vchan_next_desc(&chan->vchan);
+ if (!vdesc)
+ return 0;
+ chan->desc = to_jz4740_dma_desc(vdesc);
+ chan->next_sg = 0;
+ }
+
+ if (chan->next_sg == chan->desc->num_sgs)
+ chan->next_sg = 0;
+
+ sg = &chan->desc->sg[chan->next_sg];
+
+ if (chan->desc->direction == DMA_MEM_TO_DEV) {
+ src_addr = sg->addr;
+ dst_addr = chan->fifo_addr;
+ } else {
+ src_addr = chan->fifo_addr;
+ dst_addr = sg->addr;
+ }
+ jz4740_dma_write(dmadev, JZ_REG_DMA_SRC_ADDR(chan->id), src_addr);
+ jz4740_dma_write(dmadev, JZ_REG_DMA_DST_ADDR(chan->id), dst_addr);
+ jz4740_dma_write(dmadev, JZ_REG_DMA_TRANSFER_COUNT(chan->id),
+ sg->len >> chan->transfer_shift);
+
+ chan->next_sg++;
+
+ jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id),
+ JZ_DMA_STATUS_CTRL_NO_DESC | JZ_DMA_STATUS_CTRL_ENABLE,
+ JZ_DMA_STATUS_CTRL_HALT | JZ_DMA_STATUS_CTRL_NO_DESC |
+ JZ_DMA_STATUS_CTRL_ENABLE);
+
+ jz4740_dma_write_mask(dmadev, JZ_REG_DMA_CTRL,
+ JZ_DMA_CTRL_ENABLE,
+ JZ_DMA_CTRL_HALT | JZ_DMA_CTRL_ENABLE);
+
+ return 0;
+}
+
+static void jz4740_dma_chan_irq(struct jz4740_dmaengine_chan *chan)
+{
+ spin_lock(&chan->vchan.lock);
+ if (chan->desc) {
+ if (chan->desc && chan->desc->cyclic) {
+ vchan_cyclic_callback(&chan->desc->vdesc);
+ } else {
+ if (chan->next_sg == chan->desc->num_sgs) {
+ chan->desc = NULL;
+ vchan_cookie_complete(&chan->desc->vdesc);
+ }
+ }
+ }
+ jz4740_dma_start_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
+}
+
+static irqreturn_t jz4740_dma_irq(int irq, void *devid)
+{
+ struct jz4740_dma_dev *dmadev = devid;
+ uint32_t irq_status;
+ unsigned int i;
+
+ irq_status = readl(dmadev->base + JZ_REG_DMA_IRQ);
+
+ for (i = 0; i < 6; ++i) {
+ if (irq_status & (1 << i)) {
+ jz4740_dma_write_mask(dmadev,
+ JZ_REG_DMA_STATUS_CTRL(i), 0,
+ JZ_DMA_STATUS_CTRL_ENABLE |
+ JZ_DMA_STATUS_CTRL_TRANSFER_DONE);
+
+ jz4740_dma_chan_irq(&dmadev->chan[i]);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void jz4740_dma_issue_pending(struct dma_chan *c)
+{
+ struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ if (vchan_issue_pending(&chan->vchan) && !chan->desc)
+ jz4740_dma_start_transfer(chan);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg(
+ struct dma_chan *c, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
+ struct jz4740_dma_desc *desc;
+ struct scatterlist *sg;
+ unsigned int i;
+
+ desc = jz4740_dma_alloc_desc(sg_len);
+ if (!desc)
+ return NULL;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ desc->sg[i].addr = sg_dma_address(sg);
+ desc->sg[i].len = sg_dma_len(sg);
+ }
+
+ desc->num_sgs = sg_len;
+ desc->direction = direction;
+ desc->cyclic = false;
+
+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic(
+ struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
+ struct jz4740_dma_desc *desc;
+ unsigned int num_periods, i;
+
+ if (buf_len % period_len)
+ return NULL;
+
+ num_periods = buf_len / period_len;
+
+ desc = jz4740_dma_alloc_desc(num_periods);
+ if (!desc)
+ return NULL;
+
+ for (i = 0; i < num_periods; i++) {
+ desc->sg[i].addr = buf_addr;
+ desc->sg[i].len = period_len;
+ buf_addr += period_len;
+ }
+
+ desc->num_sgs = num_periods;
+ desc->direction = direction;
+ desc->cyclic = true;
+
+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static size_t jz4740_dma_desc_residue(struct jz4740_dmaengine_chan *chan,
+ struct jz4740_dma_desc *desc, unsigned int next_sg)
+{
+ struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
+ unsigned int residue, count;
+ unsigned int i;
+
+ residue = 0;
+
+ for (i = next_sg; i < desc->num_sgs; i++)
+ residue += desc->sg[i].len;
+
+ if (next_sg != 0) {
+ count = jz4740_dma_read(dmadev,
+ JZ_REG_DMA_TRANSFER_COUNT(chan->id));
+ residue += count << chan->transfer_shift;
+ }
+
+ return residue;
+}
+
+static enum dma_status jz4740_dma_tx_status(struct dma_chan *c,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
+ struct virt_dma_desc *vdesc;
+ enum dma_status status;
+ unsigned long flags;
+
+ status = dma_cookie_status(c, cookie, state);
+ if (status == DMA_SUCCESS || !state)
+ return status;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ vdesc = vchan_find_desc(&chan->vchan, cookie);
+ if (cookie == chan->desc->vdesc.tx.cookie) {
+ state->residue = jz4740_dma_desc_residue(chan, chan->desc,
+ chan->next_sg);
+ } else if (vdesc) {
+ state->residue = jz4740_dma_desc_residue(chan,
+ to_jz4740_dma_desc(vdesc), 0);
+ } else {
+ state->residue = 0;
+ }
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ return status;
+}
+
+static int jz4740_dma_alloc_chan_resources(struct dma_chan *c)
+{
+ return 0;
+}
+
+static void jz4740_dma_free_chan_resources(struct dma_chan *c)
+{
+ vchan_free_chan_resources(to_virt_chan(c));
+}
+
+static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc)
+{
+ kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc));
+}
+
+static int jz4740_dma_probe(struct platform_device *pdev)
+{
+ struct jz4740_dmaengine_chan *chan;
+ struct jz4740_dma_dev *dmadev;
+ struct dma_device *dd;
+ unsigned int i;
+ struct resource *res;
+ int ret;
+ int irq;
+
+ dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
+ if (!dmadev)
+ return -EINVAL;
+
+ dd = &dmadev->ddev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dmadev->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dmadev->base))
+ return PTR_ERR(dmadev->base);
+
+ dmadev->clk = clk_get(&pdev->dev, "dma");
+ if (IS_ERR(dmadev->clk))
+ return PTR_ERR(dmadev->clk);
+
+ clk_prepare_enable(dmadev->clk);
+
+ dma_cap_set(DMA_SLAVE, dd->cap_mask);
+ dma_cap_set(DMA_CYCLIC, dd->cap_mask);
+ dd->device_alloc_chan_resources = jz4740_dma_alloc_chan_resources;
+ dd->device_free_chan_resources = jz4740_dma_free_chan_resources;
+ dd->device_tx_status = jz4740_dma_tx_status;
+ dd->device_issue_pending = jz4740_dma_issue_pending;
+ dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg;
+ dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
+ dd->device_control = jz4740_dma_control;
+ dd->dev = &pdev->dev;
+ dd->chancnt = JZ_DMA_NR_CHANS;
+ INIT_LIST_HEAD(&dd->channels);
+
+ for (i = 0; i < dd->chancnt; i++) {
+ chan = &dmadev->chan[i];
+ chan->id = i;
+ chan->vchan.desc_free = jz4740_dma_desc_free;
+ vchan_init(&chan->vchan, dd);
+ }
+
+ ret = dma_async_device_register(dd);
+ if (ret)
+ return ret;
+
+ irq = platform_get_irq(pdev, 0);
+ ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev);
+ if (ret)
+ goto err_unregister;
+
+ platform_set_drvdata(pdev, dmadev);
+
+ return 0;
+
+err_unregister:
+ dma_async_device_unregister(dd);
+ return ret;
+}
+
+static int jz4740_dma_remove(struct platform_device *pdev)
+{
+ struct jz4740_dma_dev *dmadev = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ free_irq(irq, dmadev);
+ dma_async_device_unregister(&dmadev->ddev);
+ clk_disable_unprepare(dmadev->clk);
+
+ return 0;
+}
+
+static struct platform_driver jz4740_dma_driver = {
+ .probe = jz4740_dma_probe,
+ .remove = jz4740_dma_remove,
+ .driver = {
+ .name = "jz4740-dma",
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(jz4740_dma_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("JZ4740 DMA driver");
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 93f7992bee5c..9e56745f87bf 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -663,11 +663,6 @@ static bool device_has_all_tx_types(struct dma_device *device)
return false;
#endif
- #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE)
- if (!dma_has_cap(DMA_MEMSET, device->cap_mask))
- return false;
- #endif
-
#if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
if (!dma_has_cap(DMA_XOR, device->cap_mask))
return false;
@@ -729,8 +724,6 @@ int dma_async_device_register(struct dma_device *device)
!device->device_prep_dma_pq);
BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
!device->device_prep_dma_pq_val);
- BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
- !device->device_prep_dma_memset);
BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
!device->device_prep_dma_interrupt);
BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
new file mode 100644
index 000000000000..dde13248b681
--- /dev/null
+++ b/drivers/dma/dw/Kconfig
@@ -0,0 +1,29 @@
+#
+# DMA engine configuration for dw
+#
+
+config DW_DMAC_CORE
+ tristate "Synopsys DesignWare AHB DMA support"
+ depends on GENERIC_HARDIRQS
+ select DMA_ENGINE
+
+config DW_DMAC
+ tristate "Synopsys DesignWare AHB DMA platform driver"
+ select DW_DMAC_CORE
+ select DW_DMAC_BIG_ENDIAN_IO if AVR32
+ default y if CPU_AT32AP7000
+ help
+ Support the Synopsys DesignWare AHB DMA controller. This
+ can be integrated in chips such as the Atmel AT32ap7000.
+
+config DW_DMAC_PCI
+ tristate "Synopsys DesignWare AHB DMA PCI driver"
+ depends on PCI
+ select DW_DMAC_CORE
+ help
+ Support the Synopsys DesignWare AHB DMA controller on the
+ platfroms that enumerate it as a PCI device. For example,
+ Intel Medfield has integrated this GPDMA controller.
+
+config DW_DMAC_BIG_ENDIAN_IO
+ bool
diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile
new file mode 100644
index 000000000000..3eebd1ce2c6b
--- /dev/null
+++ b/drivers/dma/dw/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_DW_DMAC_CORE) += dw_dmac_core.o
+dw_dmac_core-objs := core.o
+
+obj-$(CONFIG_DW_DMAC) += dw_dmac.o
+dw_dmac-objs := platform.o
+
+obj-$(CONFIG_DW_DMAC_PCI) += dw_dmac_pci.o
+dw_dmac_pci-objs := pci.o
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw/core.c
index 2e5deaa82b60..eea479c12173 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw/core.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2007-2008 Atmel Corporation
* Copyright (C) 2010-2011 ST Microelectronics
+ * Copyright (C) 2013 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -19,17 +20,12 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_dma.h>
#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/acpi.h>
-#include <linux/acpi_dma.h>
-#include "dw_dmac_regs.h"
-#include "dmaengine.h"
+#include "../dmaengine.h"
+#include "internal.h"
/*
* This supports the Synopsys "DesignWare AHB Central DMA Controller",
@@ -41,16 +37,6 @@
* which does not support descriptor writeback.
*/
-static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave)
-{
- return slave ? slave->dst_master : 0;
-}
-
-static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
-{
- return slave ? slave->src_master : 1;
-}
-
static inline void dwc_set_masters(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
@@ -556,14 +542,14 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
/* --------------------- Cyclic DMA API extensions -------------------- */
-inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
+dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
return channel_readl(dwc, SAR);
}
EXPORT_SYMBOL(dw_dma_get_src_addr);
-inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
+dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
return channel_readl(dwc, DAR);
@@ -1225,99 +1211,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
}
-/*----------------------------------------------------------------------*/
-
-struct dw_dma_of_filter_args {
- struct dw_dma *dw;
- unsigned int req;
- unsigned int src;
- unsigned int dst;
-};
-
-static bool dw_dma_of_filter(struct dma_chan *chan, void *param)
-{
- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dw_dma_of_filter_args *fargs = param;
-
- /* Ensure the device matches our channel */
- if (chan->device != &fargs->dw->dma)
- return false;
-
- dwc->request_line = fargs->req;
- dwc->src_master = fargs->src;
- dwc->dst_master = fargs->dst;
-
- return true;
-}
-
-static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
- struct of_dma *ofdma)
-{
- struct dw_dma *dw = ofdma->of_dma_data;
- struct dw_dma_of_filter_args fargs = {
- .dw = dw,
- };
- dma_cap_mask_t cap;
-
- if (dma_spec->args_count != 3)
- return NULL;
-
- fargs.req = dma_spec->args[0];
- fargs.src = dma_spec->args[1];
- fargs.dst = dma_spec->args[2];
-
- if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
- fargs.src >= dw->nr_masters ||
- fargs.dst >= dw->nr_masters))
- return NULL;
-
- dma_cap_zero(cap);
- dma_cap_set(DMA_SLAVE, cap);
-
- /* TODO: there should be a simpler way to do this */
- return dma_request_channel(cap, dw_dma_of_filter, &fargs);
-}
-
-#ifdef CONFIG_ACPI
-static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
-{
- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct acpi_dma_spec *dma_spec = param;
-
- if (chan->device->dev != dma_spec->dev ||
- chan->chan_id != dma_spec->chan_id)
- return false;
-
- dwc->request_line = dma_spec->slave_id;
- dwc->src_master = dwc_get_sms(NULL);
- dwc->dst_master = dwc_get_dms(NULL);
-
- return true;
-}
-
-static void dw_dma_acpi_controller_register(struct dw_dma *dw)
-{
- struct device *dev = dw->dma.dev;
- struct acpi_dma_filter_info *info;
- int ret;
-
- info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
- if (!info)
- return;
-
- dma_cap_zero(info->dma_cap);
- dma_cap_set(DMA_SLAVE, info->dma_cap);
- info->filter_fn = dw_dma_acpi_filter;
-
- ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
- info);
- if (ret)
- dev_err(dev, "could not register acpi_dma_controller\n");
-}
-#else /* !CONFIG_ACPI */
-static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
-#endif /* !CONFIG_ACPI */
-
/* --------------------- Cyclic DMA API extensions -------------------- */
/**
@@ -1598,104 +1491,24 @@ static void dw_dma_off(struct dw_dma *dw)
dw->chan[i].initialized = false;
}
-#ifdef CONFIG_OF
-static struct dw_dma_platform_data *
-dw_dma_parse_dt(struct platform_device *pdev)
+int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
{
- struct device_node *np = pdev->dev.of_node;
- struct dw_dma_platform_data *pdata;
- u32 tmp, arr[4];
-
- if (!np) {
- dev_err(&pdev->dev, "Missing DT data\n");
- return NULL;
- }
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return NULL;
-
- if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels))
- return NULL;
-
- if (of_property_read_bool(np, "is_private"))
- pdata->is_private = true;
-
- if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
- pdata->chan_allocation_order = (unsigned char)tmp;
-
- if (!of_property_read_u32(np, "chan_priority", &tmp))
- pdata->chan_priority = tmp;
-
- if (!of_property_read_u32(np, "block_size", &tmp))
- pdata->block_size = tmp;
-
- if (!of_property_read_u32(np, "dma-masters", &tmp)) {
- if (tmp > 4)
- return NULL;
-
- pdata->nr_masters = tmp;
- }
-
- if (!of_property_read_u32_array(np, "data_width", arr,
- pdata->nr_masters))
- for (tmp = 0; tmp < pdata->nr_masters; tmp++)
- pdata->data_width[tmp] = arr[tmp];
-
- return pdata;
-}
-#else
-static inline struct dw_dma_platform_data *
-dw_dma_parse_dt(struct platform_device *pdev)
-{
- return NULL;
-}
-#endif
-
-static int dw_probe(struct platform_device *pdev)
-{
- struct dw_dma_platform_data *pdata;
- struct resource *io;
struct dw_dma *dw;
size_t size;
- void __iomem *regs;
bool autocfg;
unsigned int dw_params;
unsigned int nr_channels;
unsigned int max_blk_size = 0;
- int irq;
int err;
int i;
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!io)
- return -EINVAL;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- regs = devm_ioremap_resource(&pdev->dev, io);
- if (IS_ERR(regs))
- return PTR_ERR(regs);
-
- /* Apply default dma_mask if needed */
- if (!pdev->dev.dma_mask) {
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- }
-
- dw_params = dma_read_byaddr(regs, DW_PARAMS);
+ dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
autocfg = dw_params >> DW_PARAMS_EN & 0x1;
- dev_dbg(&pdev->dev, "DW_PARAMS: 0x%08x\n", dw_params);
-
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata)
- pdata = dw_dma_parse_dt(pdev);
+ dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
if (!pdata && autocfg) {
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
@@ -1712,16 +1525,17 @@ static int dw_probe(struct platform_device *pdev)
nr_channels = pdata->nr_channels;
size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan);
- dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ dw = devm_kzalloc(chip->dev, size, GFP_KERNEL);
if (!dw)
return -ENOMEM;
- dw->clk = devm_clk_get(&pdev->dev, "hclk");
+ dw->clk = devm_clk_get(chip->dev, "hclk");
if (IS_ERR(dw->clk))
return PTR_ERR(dw->clk);
clk_prepare_enable(dw->clk);
- dw->regs = regs;
+ dw->regs = chip->regs;
+ chip->dw = dw;
/* Get hardware configuration parameters */
if (autocfg) {
@@ -1746,18 +1560,16 @@ static int dw_probe(struct platform_device *pdev)
/* Disable BLOCK interrupts as well */
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
- err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0,
+ err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt, 0,
"dw_dmac", dw);
if (err)
return err;
- platform_set_drvdata(pdev, dw);
-
/* Create a pool of consistent memory blocks for hardware descriptors */
- dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev,
+ dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
sizeof(struct dw_desc), 4, 0);
if (!dw->desc_pool) {
- dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
+ dev_err(chip->dev, "No memory for descriptors dma pool\n");
return -ENOMEM;
}
@@ -1798,12 +1610,12 @@ static int dw_probe(struct platform_device *pdev)
/* Hardware configuration */
if (autocfg) {
unsigned int dwc_params;
+ void __iomem *addr = chip->regs + r * sizeof(u32);
- dwc_params = dma_read_byaddr(regs + r * sizeof(u32),
- DWC_PARAMS);
+ dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
- dev_dbg(&pdev->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
- dwc_params);
+ dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
+ dwc_params);
/* Decode maximum block size for given channel. The
* stored 4 bit value represents blocks from 0x00 for 3
@@ -1834,7 +1646,7 @@ static int dw_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
if (pdata->is_private)
dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
- dw->dma.dev = &pdev->dev;
+ dw->dma.dev = chip->dev;
dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
dw->dma.device_free_chan_resources = dwc_free_chan_resources;
@@ -1848,32 +1660,20 @@ static int dw_probe(struct platform_device *pdev)
dma_writel(dw, CFG, DW_CFG_DMA_EN);
- dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n",
+ dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
nr_channels);
dma_async_device_register(&dw->dma);
- if (pdev->dev.of_node) {
- err = of_dma_controller_register(pdev->dev.of_node,
- dw_dma_of_xlate, dw);
- if (err)
- dev_err(&pdev->dev,
- "could not register of_dma_controller\n");
- }
-
- if (ACPI_HANDLE(&pdev->dev))
- dw_dma_acpi_controller_register(dw);
-
return 0;
}
+EXPORT_SYMBOL_GPL(dw_dma_probe);
-static int dw_remove(struct platform_device *pdev)
+int dw_dma_remove(struct dw_dma_chip *chip)
{
- struct dw_dma *dw = platform_get_drvdata(pdev);
+ struct dw_dma *dw = chip->dw;
struct dw_dma_chan *dwc, *_dwc;
- if (pdev->dev.of_node)
- of_dma_controller_free(pdev->dev.of_node);
dw_dma_off(dw);
dma_async_device_unregister(&dw->dma);
@@ -1887,86 +1687,44 @@ static int dw_remove(struct platform_device *pdev)
return 0;
}
+EXPORT_SYMBOL_GPL(dw_dma_remove);
-static void dw_shutdown(struct platform_device *pdev)
+void dw_dma_shutdown(struct dw_dma_chip *chip)
{
- struct dw_dma *dw = platform_get_drvdata(pdev);
+ struct dw_dma *dw = chip->dw;
dw_dma_off(dw);
clk_disable_unprepare(dw->clk);
}
+EXPORT_SYMBOL_GPL(dw_dma_shutdown);
+
+#ifdef CONFIG_PM_SLEEP
-static int dw_suspend_noirq(struct device *dev)
+int dw_dma_suspend(struct dw_dma_chip *chip)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct dw_dma *dw = platform_get_drvdata(pdev);
+ struct dw_dma *dw = chip->dw;
dw_dma_off(dw);
clk_disable_unprepare(dw->clk);
return 0;
}
+EXPORT_SYMBOL_GPL(dw_dma_suspend);
-static int dw_resume_noirq(struct device *dev)
+int dw_dma_resume(struct dw_dma_chip *chip)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct dw_dma *dw = platform_get_drvdata(pdev);
+ struct dw_dma *dw = chip->dw;
clk_prepare_enable(dw->clk);
dma_writel(dw, CFG, DW_CFG_DMA_EN);
return 0;
}
+EXPORT_SYMBOL_GPL(dw_dma_resume);
-static const struct dev_pm_ops dw_dev_pm_ops = {
- .suspend_noirq = dw_suspend_noirq,
- .resume_noirq = dw_resume_noirq,
- .freeze_noirq = dw_suspend_noirq,
- .thaw_noirq = dw_resume_noirq,
- .restore_noirq = dw_resume_noirq,
- .poweroff_noirq = dw_suspend_noirq,
-};
-
-#ifdef CONFIG_OF
-static const struct of_device_id dw_dma_of_id_table[] = {
- { .compatible = "snps,dma-spear1340" },
- {}
-};
-MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
-#endif
-
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id dw_dma_acpi_id_table[] = {
- { "INTL9C60", 0 },
- { }
-};
-#endif
-
-static struct platform_driver dw_driver = {
- .probe = dw_probe,
- .remove = dw_remove,
- .shutdown = dw_shutdown,
- .driver = {
- .name = "dw_dmac",
- .pm = &dw_dev_pm_ops,
- .of_match_table = of_match_ptr(dw_dma_of_id_table),
- .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
- },
-};
-
-static int __init dw_init(void)
-{
- return platform_driver_register(&dw_driver);
-}
-subsys_initcall(dw_init);
-
-static void __exit dw_exit(void)
-{
- platform_driver_unregister(&dw_driver);
-}
-module_exit(dw_exit);
+#endif /* CONFIG_PM_SLEEP */
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
+MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h
new file mode 100644
index 000000000000..32667f9e0dda
--- /dev/null
+++ b/drivers/dma/dw/internal.h
@@ -0,0 +1,70 @@
+/*
+ * Driver for the Synopsys DesignWare DMA Controller
+ *
+ * Copyright (C) 2013 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _DW_DMAC_INTERNAL_H
+#define _DW_DMAC_INTERNAL_H
+
+#include <linux/device.h>
+#include <linux/dw_dmac.h>
+
+#include "regs.h"
+
+/**
+ * struct dw_dma_chip - representation of DesignWare DMA controller hardware
+ * @dev: struct device of the DMA controller
+ * @irq: irq line
+ * @regs: memory mapped I/O space
+ * @dw: struct dw_dma that is filed by dw_dma_probe()
+ */
+struct dw_dma_chip {
+ struct device *dev;
+ int irq;
+ void __iomem *regs;
+ struct dw_dma *dw;
+};
+
+/* Export to the platform drivers */
+int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata);
+int dw_dma_remove(struct dw_dma_chip *chip);
+
+void dw_dma_shutdown(struct dw_dma_chip *chip);
+
+#ifdef CONFIG_PM_SLEEP
+
+int dw_dma_suspend(struct dw_dma_chip *chip);
+int dw_dma_resume(struct dw_dma_chip *chip);
+
+#endif /* CONFIG_PM_SLEEP */
+
+/**
+ * dwc_get_dms - get destination master
+ * @slave: pointer to the custom slave configuration
+ *
+ * Returns destination master in the custom slave configuration if defined, or
+ * default value otherwise.
+ */
+static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave)
+{
+ return slave ? slave->dst_master : 0;
+}
+
+/**
+ * dwc_get_sms - get source master
+ * @slave: pointer to the custom slave configuration
+ *
+ * Returns source master in the custom slave configuration if defined, or
+ * default value otherwise.
+ */
+static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
+{
+ return slave ? slave->src_master : 1;
+}
+
+#endif /* _DW_DMAC_INTERNAL_H */
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
new file mode 100644
index 000000000000..e89fc24b8293
--- /dev/null
+++ b/drivers/dma/dw/pci.c
@@ -0,0 +1,101 @@
+/*
+ * PCI driver for the Synopsys DesignWare DMA Controller
+ *
+ * Copyright (C) 2013 Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+
+#include "internal.h"
+
+static struct dw_dma_platform_data dw_pci_pdata = {
+ .is_private = 1,
+ .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
+ .chan_priority = CHAN_PRIORITY_ASCENDING,
+};
+
+static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+ struct dw_dma_chip *chip;
+ struct dw_dma_platform_data *pdata = (void *)pid->driver_data;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
+ if (ret) {
+ dev_err(&pdev->dev, "I/O memory remapping failed\n");
+ return ret;
+ }
+
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = &pdev->dev;
+ chip->regs = pcim_iomap_table(pdev)[0];
+ chip->irq = pdev->irq;
+
+ ret = dw_dma_probe(chip, pdata);
+ if (ret)
+ return ret;
+
+ pci_set_drvdata(pdev, chip);
+
+ return 0;
+}
+
+static void dw_pci_remove(struct pci_dev *pdev)
+{
+ struct dw_dma_chip *chip = pci_get_drvdata(pdev);
+ int ret;
+
+ ret = dw_dma_remove(chip);
+ if (ret)
+ dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = {
+ /* Medfield */
+ { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_pdata },
+ { PCI_VDEVICE(INTEL, 0x0830), (kernel_ulong_t)&dw_pci_pdata },
+
+ /* BayTrail */
+ { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_pdata },
+ { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_pdata },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
+
+static struct pci_driver dw_pci_driver = {
+ .name = "dw_dmac_pci",
+ .id_table = dw_pci_id_table,
+ .probe = dw_pci_probe,
+ .remove = dw_pci_remove,
+};
+
+module_pci_driver(dw_pci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller PCI driver");
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
new file mode 100644
index 000000000000..6c9449cffae8
--- /dev/null
+++ b/drivers/dma/dw/platform.c
@@ -0,0 +1,317 @@
+/*
+ * Platform driver for the Synopsys DesignWare DMA Controller
+ *
+ * Copyright (C) 2007-2008 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
+ * Copyright (C) 2013 Intel Corporation
+ *
+ * Some parts of this driver are derived from the original dw_dmac.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/acpi.h>
+#include <linux/acpi_dma.h>
+
+#include "internal.h"
+
+struct dw_dma_of_filter_args {
+ struct dw_dma *dw;
+ unsigned int req;
+ unsigned int src;
+ unsigned int dst;
+};
+
+static bool dw_dma_of_filter(struct dma_chan *chan, void *param)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma_of_filter_args *fargs = param;
+
+ /* Ensure the device matches our channel */
+ if (chan->device != &fargs->dw->dma)
+ return false;
+
+ dwc->request_line = fargs->req;
+ dwc->src_master = fargs->src;
+ dwc->dst_master = fargs->dst;
+
+ return true;
+}
+
+static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct dw_dma *dw = ofdma->of_dma_data;
+ struct dw_dma_of_filter_args fargs = {
+ .dw = dw,
+ };
+ dma_cap_mask_t cap;
+
+ if (dma_spec->args_count != 3)
+ return NULL;
+
+ fargs.req = dma_spec->args[0];
+ fargs.src = dma_spec->args[1];
+ fargs.dst = dma_spec->args[2];
+
+ if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
+ fargs.src >= dw->nr_masters ||
+ fargs.dst >= dw->nr_masters))
+ return NULL;
+
+ dma_cap_zero(cap);
+ dma_cap_set(DMA_SLAVE, cap);
+
+ /* TODO: there should be a simpler way to do this */
+ return dma_request_channel(cap, dw_dma_of_filter, &fargs);
+}
+
+#ifdef CONFIG_ACPI
+static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct acpi_dma_spec *dma_spec = param;
+
+ if (chan->device->dev != dma_spec->dev ||
+ chan->chan_id != dma_spec->chan_id)
+ return false;
+
+ dwc->request_line = dma_spec->slave_id;
+ dwc->src_master = dwc_get_sms(NULL);
+ dwc->dst_master = dwc_get_dms(NULL);
+
+ return true;
+}
+
+static void dw_dma_acpi_controller_register(struct dw_dma *dw)
+{
+ struct device *dev = dw->dma.dev;
+ struct acpi_dma_filter_info *info;
+ int ret;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return;
+
+ dma_cap_zero(info->dma_cap);
+ dma_cap_set(DMA_SLAVE, info->dma_cap);
+ info->filter_fn = dw_dma_acpi_filter;
+
+ ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
+ info);
+ if (ret)
+ dev_err(dev, "could not register acpi_dma_controller\n");
+}
+#else /* !CONFIG_ACPI */
+static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
+#endif /* !CONFIG_ACPI */
+
+#ifdef CONFIG_OF
+static struct dw_dma_platform_data *
+dw_dma_parse_dt(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct dw_dma_platform_data *pdata;
+ u32 tmp, arr[4];
+
+ if (!np) {
+ dev_err(&pdev->dev, "Missing DT data\n");
+ return NULL;
+ }
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels))
+ return NULL;
+
+ if (of_property_read_bool(np, "is_private"))
+ pdata->is_private = true;
+
+ if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
+ pdata->chan_allocation_order = (unsigned char)tmp;
+
+ if (!of_property_read_u32(np, "chan_priority", &tmp))
+ pdata->chan_priority = tmp;
+
+ if (!of_property_read_u32(np, "block_size", &tmp))
+ pdata->block_size = tmp;
+
+ if (!of_property_read_u32(np, "dma-masters", &tmp)) {
+ if (tmp > 4)
+ return NULL;
+
+ pdata->nr_masters = tmp;
+ }
+
+ if (!of_property_read_u32_array(np, "data_width", arr,
+ pdata->nr_masters))
+ for (tmp = 0; tmp < pdata->nr_masters; tmp++)
+ pdata->data_width[tmp] = arr[tmp];
+
+ return pdata;
+}
+#else
+static inline struct dw_dma_platform_data *
+dw_dma_parse_dt(struct platform_device *pdev)
+{
+ return NULL;
+}
+#endif
+
+static int dw_probe(struct platform_device *pdev)
+{
+ struct dw_dma_chip *chip;
+ struct device *dev = &pdev->dev;
+ struct resource *mem;
+ struct dw_dma_platform_data *pdata;
+ int err;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->irq = platform_get_irq(pdev, 0);
+ if (chip->irq < 0)
+ return chip->irq;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ chip->regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(chip->regs))
+ return PTR_ERR(chip->regs);
+
+ /* Apply default dma_mask if needed */
+ if (!dev->dma_mask) {
+ dev->dma_mask = &dev->coherent_dma_mask;
+ dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ }
+
+ pdata = dev_get_platdata(dev);
+ if (!pdata)
+ pdata = dw_dma_parse_dt(pdev);
+
+ chip->dev = dev;
+
+ err = dw_dma_probe(chip, pdata);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, chip);
+
+ if (pdev->dev.of_node) {
+ err = of_dma_controller_register(pdev->dev.of_node,
+ dw_dma_of_xlate, chip->dw);
+ if (err)
+ dev_err(&pdev->dev,
+ "could not register of_dma_controller\n");
+ }
+
+ if (ACPI_HANDLE(&pdev->dev))
+ dw_dma_acpi_controller_register(chip->dw);
+
+ return 0;
+}
+
+static int dw_remove(struct platform_device *pdev)
+{
+ struct dw_dma_chip *chip = platform_get_drvdata(pdev);
+
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
+
+ return dw_dma_remove(chip);
+}
+
+static void dw_shutdown(struct platform_device *pdev)
+{
+ struct dw_dma_chip *chip = platform_get_drvdata(pdev);
+
+ dw_dma_shutdown(chip);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id dw_dma_of_id_table[] = {
+ { .compatible = "snps,dma-spear1340" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id dw_dma_acpi_id_table[] = {
+ { "INTL9C60", 0 },
+ { }
+};
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+
+static int dw_suspend_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_dma_chip *chip = platform_get_drvdata(pdev);
+
+ return dw_dma_suspend(chip);
+}
+
+static int dw_resume_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_dma_chip *chip = platform_get_drvdata(pdev);
+
+ return dw_dma_resume(chip);
+}
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define dw_suspend_noirq NULL
+#define dw_resume_noirq NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops dw_dev_pm_ops = {
+ .suspend_noirq = dw_suspend_noirq,
+ .resume_noirq = dw_resume_noirq,
+ .freeze_noirq = dw_suspend_noirq,
+ .thaw_noirq = dw_resume_noirq,
+ .restore_noirq = dw_resume_noirq,
+ .poweroff_noirq = dw_suspend_noirq,
+};
+
+static struct platform_driver dw_driver = {
+ .probe = dw_probe,
+ .remove = dw_remove,
+ .shutdown = dw_shutdown,
+ .driver = {
+ .name = "dw_dmac",
+ .pm = &dw_dev_pm_ops,
+ .of_match_table = of_match_ptr(dw_dma_of_id_table),
+ .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
+ },
+};
+
+static int __init dw_init(void)
+{
+ return platform_driver_register(&dw_driver);
+}
+subsys_initcall(dw_init);
+
+static void __exit dw_exit(void)
+{
+ platform_driver_unregister(&dw_driver);
+}
+module_exit(dw_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw/regs.h
index 9d417200bd57..deb4274f80f4 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw/regs.h
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/dw_dmac.h>
@@ -100,6 +101,12 @@ struct dw_dma_regs {
u32 DW_PARAMS;
};
+/*
+ * Big endian I/O access when reading and writing to the DMA controller
+ * registers. This is needed on some platforms, like the Atmel AVR32
+ * architecture.
+ */
+
#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
#define dma_readl_native ioread32be
#define dma_writel_native iowrite32be
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index cd7e3280fadd..5f3e532436ee 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -24,7 +24,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <mach/edma.h>
+#include <linux/platform_data/edma.h>
#include "dmaengine.h"
#include "virt-dma.h"
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 4fc2980556ad..49e8fbdb8983 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1368,7 +1368,7 @@ static int fsldma_of_probe(struct platform_device *op)
dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
- dev_set_drvdata(&op->dev, fdev);
+ platform_set_drvdata(op, fdev);
/*
* We cannot use of_platform_bus_probe() because there is no
@@ -1417,7 +1417,7 @@ static int fsldma_of_remove(struct platform_device *op)
struct fsldma_device *fdev;
unsigned int i;
- fdev = dev_get_drvdata(&op->dev);
+ fdev = platform_get_drvdata(op);
dma_async_device_unregister(&fdev->common);
fsldma_free_irqs(fdev);
@@ -1428,7 +1428,6 @@ static int fsldma_of_remove(struct platform_device *op)
}
iounmap(fdev->regs);
- dev_set_drvdata(&op->dev, NULL);
kfree(fdev);
return 0;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index f28583370d00..ff2aab973b45 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -27,6 +27,8 @@
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
#include <asm/irq.h>
#include <linux/platform_data/dma-imx.h>
@@ -186,6 +188,11 @@ struct imxdma_engine {
enum imx_dma_type devtype;
};
+struct imxdma_filter_data {
+ struct imxdma_engine *imxdma;
+ int request;
+};
+
static struct platform_device_id imx_dma_devtype[] = {
{
.name = "imx1-dma",
@@ -202,6 +209,22 @@ static struct platform_device_id imx_dma_devtype[] = {
};
MODULE_DEVICE_TABLE(platform, imx_dma_devtype);
+static const struct of_device_id imx_dma_of_dev_id[] = {
+ {
+ .compatible = "fsl,imx1-dma",
+ .data = &imx_dma_devtype[IMX1_DMA],
+ }, {
+ .compatible = "fsl,imx21-dma",
+ .data = &imx_dma_devtype[IMX21_DMA],
+ }, {
+ .compatible = "fsl,imx27-dma",
+ .data = &imx_dma_devtype[IMX27_DMA],
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, imx_dma_of_dev_id);
+
static inline int is_imx1_dma(struct imxdma_engine *imxdma)
{
return imxdma->devtype == IMX1_DMA;
@@ -996,17 +1019,55 @@ static void imxdma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&imxdma->lock, flags);
}
+static bool imxdma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct imxdma_filter_data *fdata = param;
+ struct imxdma_channel *imxdma_chan = to_imxdma_chan(chan);
+
+ if (chan->device->dev != fdata->imxdma->dev)
+ return false;
+
+ imxdma_chan->dma_request = fdata->request;
+ chan->private = NULL;
+
+ return true;
+}
+
+static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ int count = dma_spec->args_count;
+ struct imxdma_engine *imxdma = ofdma->of_dma_data;
+ struct imxdma_filter_data fdata = {
+ .imxdma = imxdma,
+ };
+
+ if (count != 1)
+ return NULL;
+
+ fdata.request = dma_spec->args[0];
+
+ return dma_request_channel(imxdma->dma_device.cap_mask,
+ imxdma_filter_fn, &fdata);
+}
+
static int __init imxdma_probe(struct platform_device *pdev)
{
struct imxdma_engine *imxdma;
struct resource *res;
+ const struct of_device_id *of_id;
int ret, i;
int irq, irq_err;
+ of_id = of_match_device(imx_dma_of_dev_id, &pdev->dev);
+ if (of_id)
+ pdev->id_entry = of_id->data;
+
imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
if (!imxdma)
return -ENOMEM;
+ imxdma->dev = &pdev->dev;
imxdma->devtype = pdev->id_entry->driver_data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1111,7 +1172,6 @@ static int __init imxdma_probe(struct platform_device *pdev)
&imxdma->dma_device.channels);
}
- imxdma->dev = &pdev->dev;
imxdma->dma_device.dev = &pdev->dev;
imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
@@ -1136,8 +1196,19 @@ static int __init imxdma_probe(struct platform_device *pdev)
goto err;
}
+ if (pdev->dev.of_node) {
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ imxdma_xlate, imxdma);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to register of_dma_controller\n");
+ goto err_of_dma_controller;
+ }
+ }
+
return 0;
+err_of_dma_controller:
+ dma_async_device_unregister(&imxdma->dma_device);
err:
clk_disable_unprepare(imxdma->dma_ipg);
clk_disable_unprepare(imxdma->dma_ahb);
@@ -1150,6 +1221,9 @@ static int imxdma_remove(struct platform_device *pdev)
dma_async_device_unregister(&imxdma->dma_device);
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
+
clk_disable_unprepare(imxdma->dma_ipg);
clk_disable_unprepare(imxdma->dma_ahb);
@@ -1159,6 +1233,7 @@ static int imxdma_remove(struct platform_device *pdev)
static struct platform_driver imxdma_driver = {
.driver = {
.name = "imx-dma",
+ .of_match_table = imx_dma_of_dev_id,
},
.id_table = imx_dma_devtype,
.remove = imxdma_remove,
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 092867bf795c..1e44b8cf95da 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -36,6 +36,7 @@
#include <linux/dmaengine.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_dma.h>
#include <asm/irq.h>
#include <linux/platform_data/dma-imx-sdma.h>
@@ -1296,6 +1297,35 @@ err_dma_alloc:
return ret;
}
+static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
+{
+ struct imx_dma_data *data = fn_param;
+
+ if (!imx_dma_is_general_purpose(chan))
+ return false;
+
+ chan->private = data;
+
+ return true;
+}
+
+static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct sdma_engine *sdma = ofdma->of_dma_data;
+ dma_cap_mask_t mask = sdma->dma_device.cap_mask;
+ struct imx_dma_data data;
+
+ if (dma_spec->args_count != 3)
+ return NULL;
+
+ data.dma_request = dma_spec->args[0];
+ data.peripheral_type = dma_spec->args[1];
+ data.priority = dma_spec->args[2];
+
+ return dma_request_channel(mask, sdma_filter_fn, &data);
+}
+
static int __init sdma_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
@@ -1443,10 +1473,20 @@ static int __init sdma_probe(struct platform_device *pdev)
goto err_init;
}
+ if (np) {
+ ret = of_dma_controller_register(np, sdma_xlate, sdma);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register controller\n");
+ goto err_register;
+ }
+ }
+
dev_info(sdma->dev, "initialized\n");
return 0;
+err_register:
+ dma_async_device_unregister(&sdma->dma_device);
err_init:
kfree(sdma->script_addrs);
err_alloc:
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index a0de82e21a7c..a975ebebea8a 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -1405,7 +1405,7 @@ static int dma_runtime_idle(struct device *dev)
return -EAGAIN;
}
- return pm_schedule_suspend(dev, 0);
+ return 0;
}
/******************************************************************************
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 17a2393b3e25..5ff6fc1819dc 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -1105,12 +1105,11 @@ static ssize_t cap_show(struct dma_chan *c, char *page)
{
struct dma_device *dma = c->device;
- return sprintf(page, "copy%s%s%s%s%s%s\n",
+ return sprintf(page, "copy%s%s%s%s%s\n",
dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
- dma_has_cap(DMA_MEMSET, dma->cap_mask) ? " fill" : "",
dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
}
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index 29bf9448035d..212d584fe427 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -123,7 +123,6 @@ static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len
struct ioat_ring_ent {
union {
struct ioat_dma_descriptor *hw;
- struct ioat_fill_descriptor *fill;
struct ioat_xor_descriptor *xor;
struct ioat_xor_ext_descriptor *xor_ex;
struct ioat_pq_descriptor *pq;
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index ca6ea9b3551b..b642e035579b 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -311,14 +311,6 @@ static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat,
if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */
ioat_dma_unmap(chan, flags, len, desc->hw);
break;
- case IOAT_OP_FILL: {
- struct ioat_fill_descriptor *hw = desc->fill;
-
- if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
- ioat_unmap(pdev, hw->dst_addr - offset, len,
- PCI_DMA_FROMDEVICE, flags, 1);
- break;
- }
case IOAT_OP_XOR_VAL:
case IOAT_OP_XOR: {
struct ioat_xor_descriptor *xor = desc->xor;
@@ -824,51 +816,6 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
}
static struct dma_async_tx_descriptor *
-ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value,
- size_t len, unsigned long flags)
-{
- struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
- struct ioat_ring_ent *desc;
- size_t total_len = len;
- struct ioat_fill_descriptor *fill;
- u64 src_data = (0x0101010101010101ULL) * (value & 0xff);
- int num_descs, idx, i;
-
- num_descs = ioat2_xferlen_to_descs(ioat, len);
- if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
- idx = ioat->head;
- else
- return NULL;
- i = 0;
- do {
- size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
-
- desc = ioat2_get_ring_ent(ioat, idx + i);
- fill = desc->fill;
-
- fill->size = xfer_size;
- fill->src_data = src_data;
- fill->dst_addr = dest;
- fill->ctl = 0;
- fill->ctl_f.op = IOAT_OP_FILL;
-
- len -= xfer_size;
- dest += xfer_size;
- dump_desc_dbg(ioat, desc);
- } while (++i < num_descs);
-
- desc->txd.flags = flags;
- desc->len = total_len;
- fill->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
- fill->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
- fill->ctl_f.compl_write = 1;
- dump_desc_dbg(ioat, desc);
-
- /* we leave the channel locked to ensure in order submission */
- return &desc->txd;
-}
-
-static struct dma_async_tx_descriptor *
__ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
size_t len, unsigned long flags)
@@ -1431,7 +1378,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
struct page *xor_srcs[IOAT_NUM_SRC_TEST];
struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
- dma_addr_t dma_addr, dest_dma;
+ dma_addr_t dest_dma;
struct dma_async_tx_descriptor *tx;
struct dma_chan *dma_chan;
dma_cookie_t cookie;
@@ -1598,56 +1545,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
goto free_resources;
}
- /* skip memset if the capability is not present */
- if (!dma_has_cap(DMA_MEMSET, dma_chan->device->cap_mask))
- goto free_resources;
-
- /* test memset */
- op = IOAT_OP_FILL;
-
- dma_addr = dma_map_page(dev, dest, 0,
- PAGE_SIZE, DMA_FROM_DEVICE);
- tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE,
- DMA_PREP_INTERRUPT |
- DMA_COMPL_SKIP_SRC_UNMAP |
- DMA_COMPL_SKIP_DEST_UNMAP);
- if (!tx) {
- dev_err(dev, "Self-test memset prep failed\n");
- err = -ENODEV;
- goto dma_unmap;
- }
-
- async_tx_ack(tx);
- init_completion(&cmp);
- tx->callback = ioat3_dma_test_callback;
- tx->callback_param = &cmp;
- cookie = tx->tx_submit(tx);
- if (cookie < 0) {
- dev_err(dev, "Self-test memset setup failed\n");
- err = -ENODEV;
- goto dma_unmap;
- }
- dma->device_issue_pending(dma_chan);
-
- tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
-
- if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
- dev_err(dev, "Self-test memset timed out\n");
- err = -ENODEV;
- goto dma_unmap;
- }
-
- dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
-
- for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
- u32 *ptr = page_address(dest);
- if (ptr[i]) {
- dev_err(dev, "Self-test memset failed compare\n");
- err = -ENODEV;
- goto free_resources;
- }
- }
-
/* test for non-zero parity sum */
op = IOAT_OP_XOR_VAL;
@@ -1706,8 +1603,7 @@ dma_unmap:
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
DMA_TO_DEVICE);
- } else if (op == IOAT_OP_FILL)
- dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
+ }
free_resources:
dma->device_free_chan_resources(dma_chan);
out:
@@ -1944,12 +1840,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
}
}
- if (is_raid_device && (device->cap & IOAT_CAP_FILL_BLOCK)) {
- dma_cap_set(DMA_MEMSET, dma->cap_mask);
- dma->device_prep_dma_memset = ioat3_prep_memset_lock;
- }
-
-
dma->device_tx_status = ioat3_tx_status;
device->cleanup_fn = ioat3_cleanup_event;
device->timer_fn = ioat3_timer_event;
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 5ee57d402a6e..62f83e983d8d 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -100,33 +100,6 @@ struct ioat_dma_descriptor {
uint64_t user2;
};
-struct ioat_fill_descriptor {
- uint32_t size;
- union {
- uint32_t ctl;
- struct {
- unsigned int int_en:1;
- unsigned int rsvd:1;
- unsigned int dest_snoop_dis:1;
- unsigned int compl_write:1;
- unsigned int fence:1;
- unsigned int rsvd2:2;
- unsigned int dest_brk:1;
- unsigned int bundle:1;
- unsigned int rsvd4:15;
- #define IOAT_OP_FILL 0x01
- unsigned int op:8;
- } ctl_f;
- };
- uint64_t src_data;
- uint64_t dst_addr;
- uint64_t next;
- uint64_t rsv1;
- uint64_t next_dst_addr;
- uint64_t user1;
- uint64_t user2;
-};
-
struct ioat_xor_descriptor {
uint32_t size;
union {
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 7dafb9f3785f..cc727ec78c4e 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -633,39 +633,6 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
}
static struct dma_async_tx_descriptor *
-iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
- int value, size_t len, unsigned long flags)
-{
- struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
- struct iop_adma_desc_slot *sw_desc, *grp_start;
- int slot_cnt, slots_per_op;
-
- if (unlikely(!len))
- return NULL;
- BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
-
- dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
- __func__, len);
-
- spin_lock_bh(&iop_chan->lock);
- slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op);
- sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
- if (sw_desc) {
- grp_start = sw_desc->group_head;
- iop_desc_init_memset(grp_start, flags);
- iop_desc_set_byte_count(grp_start, iop_chan, len);
- iop_desc_set_block_fill_val(grp_start, value);
- iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
- sw_desc->unmap_src_cnt = 1;
- sw_desc->unmap_len = len;
- sw_desc->async_tx.flags = flags;
- }
- spin_unlock_bh(&iop_chan->lock);
-
- return sw_desc ? &sw_desc->async_tx : NULL;
-}
-
-static struct dma_async_tx_descriptor *
iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
unsigned long flags)
@@ -1050,7 +1017,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
- dma_addr_t dma_addr, dest_dma;
+ dma_addr_t dest_dma;
struct dma_async_tx_descriptor *tx;
struct dma_chan *dma_chan;
dma_cookie_t cookie;
@@ -1176,33 +1143,6 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
goto free_resources;
}
- /* test memset */
- dma_addr = dma_map_page(dma_chan->device->dev, dest, 0,
- PAGE_SIZE, DMA_FROM_DEVICE);
- tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-
- cookie = iop_adma_tx_submit(tx);
- iop_adma_issue_pending(dma_chan);
- msleep(8);
-
- if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
- dev_err(dma_chan->device->dev,
- "Self-test memset timed out, disabling\n");
- err = -ENODEV;
- goto free_resources;
- }
-
- for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
- u32 *ptr = page_address(dest);
- if (ptr[i]) {
- dev_err(dma_chan->device->dev,
- "Self-test memset failed compare, disabling\n");
- err = -ENODEV;
- goto free_resources;
- }
- }
-
/* test for non-zero parity sum */
zero_sum_result = 0;
for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
@@ -1487,8 +1427,6 @@ static int iop_adma_probe(struct platform_device *pdev)
/* set prep routines based on capability */
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy;
- if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
- dma_dev->device_prep_dma_memset = iop_adma_prep_dma_memset;
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
dma_dev->max_xor = iop_adma_get_max_xor();
dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor;
@@ -1556,8 +1494,7 @@ static int iop_adma_probe(struct platform_device *pdev)
goto err_free_iop_chan;
}
- if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) ||
- dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
+ if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
ret = iop_adma_xor_val_self_test(adev);
dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
if (ret)
@@ -1579,12 +1516,11 @@ static int iop_adma_probe(struct platform_device *pdev)
goto err_free_iop_chan;
}
- dev_info(&pdev->dev, "Intel(R) IOP: ( %s%s%s%s%s%s%s)\n",
+ dev_info(&pdev->dev, "Intel(R) IOP: ( %s%s%s%s%s%s)\n",
dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
- dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 43d5a6c33297..9b9366537d73 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -154,6 +154,10 @@ static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac)
{
writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
tdmac->reg_base + TDCR);
+
+ /* disable irq */
+ writel(0, tdmac->reg_base + TDIMR);
+
tdmac->status = DMA_SUCCESS;
}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index d64ae14f2706..200f1a3c9a44 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -89,11 +89,6 @@ static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
hw_desc->phy_next_desc = 0;
}
-static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
-{
- desc->value = val;
-}
-
static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
dma_addr_t addr)
{
@@ -128,22 +123,6 @@ static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
__raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
}
-static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
-{
- __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
-}
-
-static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
-{
- __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
-}
-
-static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
-{
- __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
- __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
-}
-
static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
{
u32 val = __raw_readl(XOR_INTR_MASK(chan));
@@ -186,8 +165,6 @@ static int mv_can_chain(struct mv_xor_desc_slot *desc)
if (chain_old_tail->type != desc->type)
return 0;
- if (desc->type == DMA_MEMSET)
- return 0;
return 1;
}
@@ -205,9 +182,6 @@ static void mv_set_mode(struct mv_xor_chan *chan,
case DMA_MEMCPY:
op_mode = XOR_OPERATION_MODE_MEMCPY;
break;
- case DMA_MEMSET:
- op_mode = XOR_OPERATION_MODE_MEMSET;
- break;
default:
dev_err(mv_chan_to_devp(chan),
"error: unsupported operation %d\n",
@@ -274,18 +248,9 @@ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
if (sw_desc->type != mv_chan->current_type)
mv_set_mode(mv_chan, sw_desc->type);
- if (sw_desc->type == DMA_MEMSET) {
- /* for memset requests we need to program the engine, no
- * descriptors used.
- */
- struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
- mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
- mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
- mv_chan_set_value(mv_chan, sw_desc->value);
- } else {
- /* set the hardware chain */
- mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
- }
+ /* set the hardware chain */
+ mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
+
mv_chan->pending += sw_desc->slot_cnt;
mv_xor_issue_pending(&mv_chan->dmachan);
}
@@ -688,43 +653,6 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
}
static struct dma_async_tx_descriptor *
-mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
- size_t len, unsigned long flags)
-{
- struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
- struct mv_xor_desc_slot *sw_desc, *grp_start;
- int slot_cnt;
-
- dev_dbg(mv_chan_to_devp(mv_chan),
- "%s dest: %x len: %u flags: %ld\n",
- __func__, dest, len, flags);
- if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
- return NULL;
-
- BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
-
- spin_lock_bh(&mv_chan->lock);
- slot_cnt = mv_chan_memset_slot_count(len);
- sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
- if (sw_desc) {
- sw_desc->type = DMA_MEMSET;
- sw_desc->async_tx.flags = flags;
- grp_start = sw_desc->group_head;
- mv_desc_init(grp_start, flags);
- mv_desc_set_byte_count(grp_start, len);
- mv_desc_set_dest_addr(sw_desc->group_head, dest);
- mv_desc_set_block_fill_val(grp_start, value);
- sw_desc->unmap_src_cnt = 1;
- sw_desc->unmap_len = len;
- }
- spin_unlock_bh(&mv_chan->lock);
- dev_dbg(mv_chan_to_devp(mv_chan),
- "%s sw_desc %p async_tx %p \n",
- __func__, sw_desc, &sw_desc->async_tx);
- return sw_desc ? &sw_desc->async_tx : NULL;
-}
-
-static struct dma_async_tx_descriptor *
mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned long flags)
{
@@ -1137,8 +1065,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
/* set prep routines based on capability */
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
- if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
- dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
dma_dev->max_xor = 8;
dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
@@ -1187,9 +1113,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
goto err_free_irq;
}
- dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s%s)\n",
+ dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n",
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
- dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
@@ -1298,8 +1223,6 @@ static int mv_xor_probe(struct platform_device *pdev)
dma_cap_set(DMA_MEMCPY, cap_mask);
if (of_property_read_bool(np, "dmacap,xor"))
dma_cap_set(DMA_XOR, cap_mask);
- if (of_property_read_bool(np, "dmacap,memset"))
- dma_cap_set(DMA_MEMSET, cap_mask);
if (of_property_read_bool(np, "dmacap,interrupt"))
dma_cap_set(DMA_INTERRUPT, cap_mask);
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index c632a4761fcf..c619359cb7fe 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -31,7 +31,6 @@
#define XOR_OPERATION_MODE_XOR 0
#define XOR_OPERATION_MODE_MEMCPY 2
-#define XOR_OPERATION_MODE_MEMSET 4
#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4))
#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4))
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index b48a79c28845..719593002ab7 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -693,7 +693,7 @@ static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param)
return true;
}
-struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec,
+static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct mxs_dma_engine *mxs_dma = ofdma->of_dma_data;
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 7aa0864cd487..75334bdd2c56 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -35,8 +35,7 @@ static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec)
struct of_dma *ofdma;
list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
- if ((ofdma->of_node == dma_spec->np) &&
- (ofdma->of_dma_nbcells == dma_spec->args_count))
+ if (ofdma->of_node == dma_spec->np)
return ofdma;
pr_debug("%s: can't find DMA controller %s\n", __func__,
@@ -64,8 +63,6 @@ int of_dma_controller_register(struct device_node *np,
void *data)
{
struct of_dma *ofdma;
- int nbcells;
- const __be32 *prop;
if (!np || !of_dma_xlate) {
pr_err("%s: not enough information provided\n", __func__);
@@ -76,19 +73,7 @@ int of_dma_controller_register(struct device_node *np,
if (!ofdma)
return -ENOMEM;
- prop = of_get_property(np, "#dma-cells", NULL);
- if (prop)
- nbcells = be32_to_cpup(prop);
-
- if (!prop || !nbcells) {
- pr_err("%s: #dma-cells property is missing or invalid\n",
- __func__);
- kfree(ofdma);
- return -EINVAL;
- }
-
ofdma->of_node = np;
- ofdma->of_dma_nbcells = nbcells;
ofdma->of_dma_xlate = of_dma_xlate;
ofdma->of_dma_data = data;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index a17553f7c028..593827b3fdd4 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -157,7 +157,6 @@ enum pl330_reqtype {
#define PERIPH_REV_R0P0 0
#define PERIPH_REV_R1P0 1
#define PERIPH_REV_R1P1 2
-#define PCELL_ID 0xff0
#define CR0_PERIPH_REQ_SET (1 << 0)
#define CR0_BOOT_EN_SET (1 << 1)
@@ -193,8 +192,6 @@ enum pl330_reqtype {
#define INTEG_CFG 0x0
#define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
-#define PCELL_ID_VAL 0xb105f00d
-
#define PL330_STATE_STOPPED (1 << 0)
#define PL330_STATE_EXECUTING (1 << 1)
#define PL330_STATE_WFE (1 << 2)
@@ -292,7 +289,6 @@ static unsigned cmd_line;
/* Populated by the PL330 core driver for DMA API driver's info */
struct pl330_config {
u32 periph_id;
- u32 pcell_id;
#define DMAC_MODE_NS (1 << 0)
unsigned int mode;
unsigned int data_bus_width:10; /* In number of bits */
@@ -505,7 +501,7 @@ struct pl330_dmac {
/* Maximum possible events/irqs */
int events[32];
/* BUS address of MicroCode buffer */
- u32 mcode_bus;
+ dma_addr_t mcode_bus;
/* CPU address of MicroCode buffer */
void *mcode_cpu;
/* List of all Channel threads */
@@ -650,19 +646,6 @@ static inline bool _manager_ns(struct pl330_thread *thrd)
return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false;
}
-static inline u32 get_id(struct pl330_info *pi, u32 off)
-{
- void __iomem *regs = pi->base;
- u32 id = 0;
-
- id |= (readb(regs + off + 0x0) << 0);
- id |= (readb(regs + off + 0x4) << 8);
- id |= (readb(regs + off + 0x8) << 16);
- id |= (readb(regs + off + 0xc) << 24);
-
- return id;
-}
-
static inline u32 get_revision(u32 periph_id)
{
return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK;
@@ -1986,9 +1969,6 @@ static void read_dmac_config(struct pl330_info *pi)
pi->pcfg.num_events = val;
pi->pcfg.irq_ns = readl(regs + CR3);
-
- pi->pcfg.periph_id = get_id(pi, PERIPH_ID);
- pi->pcfg.pcell_id = get_id(pi, PCELL_ID);
}
static inline void _reset_thread(struct pl330_thread *thrd)
@@ -2098,10 +2078,8 @@ static int pl330_add(struct pl330_info *pi)
regs = pi->base;
/* Check if we can handle this DMAC */
- if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL
- || get_id(pi, PCELL_ID) != PCELL_ID_VAL) {
- dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n",
- get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID));
+ if ((pi->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
+ dev_err(pi->dev, "PERIPH_ID 0x%x !\n", pi->pcfg.periph_id);
return -EINVAL;
}
@@ -2485,10 +2463,10 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
struct dma_pl330_chan *pch = to_pchan(chan);
unsigned long flags;
- spin_lock_irqsave(&pch->lock, flags);
-
tasklet_kill(&pch->task);
+ spin_lock_irqsave(&pch->lock, flags);
+
pl330_release_channel(pch->pl330_chid);
pch->pl330_chid = NULL;
@@ -2916,6 +2894,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
if (ret)
return ret;
+ pi->pcfg.periph_id = adev->periphid;
ret = pl330_add(pi);
if (ret)
goto probe_err1;
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 5d3d95569a1e..370ff8265630 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -2323,47 +2323,6 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy(
}
/**
- * ppc440spe_adma_prep_dma_memset - prepare CDB for a MEMSET operation
- */
-static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset(
- struct dma_chan *chan, dma_addr_t dma_dest, int value,
- size_t len, unsigned long flags)
-{
- struct ppc440spe_adma_chan *ppc440spe_chan;
- struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
- int slot_cnt, slots_per_op;
-
- ppc440spe_chan = to_ppc440spe_adma_chan(chan);
-
- if (unlikely(!len))
- return NULL;
-
- BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
-
- spin_lock_bh(&ppc440spe_chan->lock);
-
- dev_dbg(ppc440spe_chan->device->common.dev,
- "ppc440spe adma%d: %s cal: %u len: %u int_en %d\n",
- ppc440spe_chan->device->id, __func__, value, len,
- flags & DMA_PREP_INTERRUPT ? 1 : 0);
-
- slot_cnt = slots_per_op = 1;
- sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
- slots_per_op);
- if (sw_desc) {
- group_start = sw_desc->group_head;
- ppc440spe_desc_init_memset(group_start, value, flags);
- ppc440spe_adma_set_dest(group_start, dma_dest, 0);
- ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
- sw_desc->unmap_len = len;
- sw_desc->async_tx.flags = flags;
- }
- spin_unlock_bh(&ppc440spe_chan->lock);
-
- return sw_desc ? &sw_desc->async_tx : NULL;
-}
-
-/**
* ppc440spe_adma_prep_dma_xor - prepare CDB for a XOR operation
*/
static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor(
@@ -4125,7 +4084,6 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
case PPC440SPE_DMA1_ID:
dma_cap_set(DMA_MEMCPY, adev->common.cap_mask);
dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
- dma_cap_set(DMA_MEMSET, adev->common.cap_mask);
dma_cap_set(DMA_PQ, adev->common.cap_mask);
dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask);
dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask);
@@ -4151,10 +4109,6 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
adev->common.device_prep_dma_memcpy =
ppc440spe_adma_prep_dma_memcpy;
}
- if (dma_has_cap(DMA_MEMSET, adev->common.cap_mask)) {
- adev->common.device_prep_dma_memset =
- ppc440spe_adma_prep_dma_memset;
- }
if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) {
adev->common.max_xor = XOR_MAX_OPS;
adev->common.device_prep_dma_xor =
@@ -4217,7 +4171,6 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "",
dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : "",
dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "",
- dma_has_cap(DMA_MEMSET, adev->common.cap_mask) ? "memset " : "",
dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "intr " : "");
}
@@ -4481,7 +4434,7 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev)
adev->dev = &ofdev->dev;
adev->common.dev = &ofdev->dev;
INIT_LIST_HEAD(&adev->common.channels);
- dev_set_drvdata(&ofdev->dev, adev);
+ platform_set_drvdata(ofdev, adev);
/* create a channel */
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
@@ -4594,14 +4547,13 @@ out:
*/
static int ppc440spe_adma_remove(struct platform_device *ofdev)
{
- struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev);
+ struct ppc440spe_adma_device *adev = platform_get_drvdata(ofdev);
struct device_node *np = ofdev->dev.of_node;
struct resource res;
struct dma_chan *chan, *_chan;
struct ppc_dma_chan_ref *ref, *_ref;
struct ppc440spe_adma_chan *ppc440spe_chan;
- dev_set_drvdata(&ofdev->dev, NULL);
if (adev->id < PPC440SPE_ADMA_ENGINES_NUM)
ppc440spe_adma_devices[adev->id] = -1;
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index c07ca4612e46..c962138dde96 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -1,3 +1,3 @@
-obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o
+obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o
obj-$(CONFIG_SH_DMAE) += shdma.o
obj-$(CONFIG_SUDMAC) += sudmac.o
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 4acb85a10250..28ca36121631 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -175,7 +175,18 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
{
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
const struct shdma_ops *ops = sdev->ops;
- int ret;
+ int ret, match;
+
+ if (schan->dev->of_node) {
+ match = schan->hw_req;
+ ret = ops->set_slave(schan, match, true);
+ if (ret < 0)
+ return ret;
+
+ slave_id = schan->slave_id;
+ } else {
+ match = slave_id;
+ }
if (slave_id < 0 || slave_id >= slave_num)
return -EINVAL;
@@ -183,7 +194,7 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
if (test_and_set_bit(slave_id, shdma_slave_used))
return -EBUSY;
- ret = ops->set_slave(schan, slave_id, false);
+ ret = ops->set_slave(schan, match, false);
if (ret < 0) {
clear_bit(slave_id, shdma_slave_used);
return ret;
@@ -206,23 +217,26 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
* services would have to provide their own filters, which first would check
* the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
* this, and only then, in case of a match, call this common filter.
+ * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate().
+ * In that case the MID-RID value is used for slave channel filtering and is
+ * passed to this function in the "arg" parameter.
*/
bool shdma_chan_filter(struct dma_chan *chan, void *arg)
{
struct shdma_chan *schan = to_shdma_chan(chan);
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
const struct shdma_ops *ops = sdev->ops;
- int slave_id = (int)arg;
+ int match = (int)arg;
int ret;
- if (slave_id < 0)
+ if (match < 0)
/* No slave requested - arbitrary channel */
return true;
- if (slave_id >= slave_num)
+ if (!schan->dev->of_node && match >= slave_num)
return false;
- ret = ops->set_slave(schan, slave_id, true);
+ ret = ops->set_slave(schan, match, true);
if (ret < 0)
return false;
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c
new file mode 100644
index 000000000000..11bcb05cd79c
--- /dev/null
+++ b/drivers/dma/sh/shdma-of.c
@@ -0,0 +1,82 @@
+/*
+ * SHDMA Device Tree glue
+ *
+ * Copyright (C) 2013 Renesas Electronics Inc.
+ * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/shdma-base.h>
+
+#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
+
+static struct dma_chan *shdma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ u32 id = dma_spec->args[0];
+ dma_cap_mask_t mask;
+ struct dma_chan *chan;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ dma_cap_zero(mask);
+ /* Only slave DMA channels can be allocated via DT */
+ dma_cap_set(DMA_SLAVE, mask);
+
+ chan = dma_request_channel(mask, shdma_chan_filter, (void *)id);
+ if (chan)
+ to_shdma_chan(chan)->hw_req = id;
+
+ return chan;
+}
+
+static int shdma_of_probe(struct platform_device *pdev)
+{
+ const struct of_dev_auxdata *lookup = pdev->dev.platform_data;
+ int ret;
+
+ if (!lookup)
+ return -EINVAL;
+
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ shdma_of_xlate, pdev);
+ if (ret < 0)
+ return ret;
+
+ ret = of_platform_populate(pdev->dev.of_node, NULL, lookup, &pdev->dev);
+ if (ret < 0)
+ of_dma_controller_free(pdev->dev.of_node);
+
+ return ret;
+}
+
+static const struct of_device_id shdma_of_match[] = {
+ { .compatible = "renesas,shdma-mux", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
+
+static struct platform_driver shdma_of = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "shdma-of",
+ .of_match_table = shdma_of_match,
+ },
+ .probe = shdma_of_probe,
+};
+
+module_platform_driver(shdma_of);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SH-DMA driver DT glue");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
index b70709b030d8..b67f45f5c271 100644
--- a/drivers/dma/sh/shdma.c
+++ b/drivers/dma/sh/shdma.c
@@ -301,20 +301,32 @@ static void sh_dmae_setup_xfer(struct shdma_chan *schan,
}
}
+/*
+ * Find a slave channel configuration from the contoller list by either a slave
+ * ID in the non-DT case, or by a MID/RID value in the DT case
+ */
static const struct sh_dmae_slave_config *dmae_find_slave(
- struct sh_dmae_chan *sh_chan, int slave_id)
+ struct sh_dmae_chan *sh_chan, int match)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
struct sh_dmae_pdata *pdata = shdev->pdata;
const struct sh_dmae_slave_config *cfg;
int i;
- if (slave_id >= SH_DMA_SLAVE_NUMBER)
- return NULL;
+ if (!sh_chan->shdma_chan.dev->of_node) {
+ if (match >= SH_DMA_SLAVE_NUMBER)
+ return NULL;
- for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
- if (cfg->slave_id == slave_id)
- return cfg;
+ for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
+ if (cfg->slave_id == match)
+ return cfg;
+ } else {
+ for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
+ if (cfg->mid_rid == match) {
+ sh_chan->shdma_chan.slave_id = cfg->slave_id;
+ return cfg;
+ }
+ }
return NULL;
}
@@ -729,7 +741,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
goto eshdma;
/* platform data */
- shdev->pdata = pdev->dev.platform_data;
+ shdev->pdata = pdata;
if (pdata->chcr_offset)
shdev->chcr_offset = pdata->chcr_offset;
@@ -920,11 +932,18 @@ static int sh_dmae_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id sh_dmae_of_match[] = {
+ { .compatible = "renesas,shdma", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
+
static struct platform_driver sh_dmae_driver = {
.driver = {
.owner = THIS_MODULE,
.pm = &sh_dmae_pm,
.name = SH_DMAE_DRV_NAME,
+ .of_match_table = sh_dmae_of_match,
},
.remove = sh_dmae_remove,
.shutdown = sh_dmae_shutdown,
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 1765a0a2736d..716b23e4f327 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -466,12 +466,29 @@ static enum dma_status
sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
unsigned long flags;
enum dma_status ret;
+ struct sirfsoc_dma_desc *sdesc;
+ int cid = schan->chan.chan_id;
+ unsigned long dma_pos;
+ unsigned long dma_request_bytes;
+ unsigned long residue;
spin_lock_irqsave(&schan->lock, flags);
+
+ sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
+ node);
+ dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) *
+ (sdesc->width * SIRFSOC_DMA_WORD_LEN);
+
ret = dma_cookie_status(chan, cookie, txstate);
+ dma_pos = readl_relaxed(sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR)
+ << 2;
+ residue = dma_request_bytes - (dma_pos - sdesc->addr);
+ dma_set_residue(txstate, residue);
+
spin_unlock_irqrestore(&schan->lock, flags);
return ret;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 71bf4ec300ea..5ab5880d5c90 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -17,6 +17,8 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
#include <linux/amba/bus.h>
#include <linux/regulator/consumer.h>
#include <linux/platform_data/dma-ste-dma40.h>
@@ -45,15 +47,63 @@
#define D40_LCLA_LINK_PER_EVENT_GRP 128
#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
+/* Max number of logical channels per physical channel */
+#define D40_MAX_LOG_CHAN_PER_PHY 32
+
/* Attempts before giving up to trying to get pages that are aligned */
#define MAX_LCLA_ALLOC_ATTEMPTS 256
/* Bit markings for allocation map */
-#define D40_ALLOC_FREE (1 << 31)
-#define D40_ALLOC_PHY (1 << 30)
+#define D40_ALLOC_FREE BIT(31)
+#define D40_ALLOC_PHY BIT(30)
#define D40_ALLOC_LOG_FREE 0
-#define MAX(a, b) (((a) < (b)) ? (b) : (a))
+#define D40_MEMCPY_MAX_CHANS 8
+
+/* Reserved event lines for memcpy only. */
+#define DB8500_DMA_MEMCPY_EV_0 51
+#define DB8500_DMA_MEMCPY_EV_1 56
+#define DB8500_DMA_MEMCPY_EV_2 57
+#define DB8500_DMA_MEMCPY_EV_3 58
+#define DB8500_DMA_MEMCPY_EV_4 59
+#define DB8500_DMA_MEMCPY_EV_5 60
+
+static int dma40_memcpy_channels[] = {
+ DB8500_DMA_MEMCPY_EV_0,
+ DB8500_DMA_MEMCPY_EV_1,
+ DB8500_DMA_MEMCPY_EV_2,
+ DB8500_DMA_MEMCPY_EV_3,
+ DB8500_DMA_MEMCPY_EV_4,
+ DB8500_DMA_MEMCPY_EV_5,
+};
+
+/* Default configuration for physcial memcpy */
+static struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
+ .mode = STEDMA40_MODE_PHYSICAL,
+ .dir = DMA_MEM_TO_MEM,
+
+ .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .src_info.psize = STEDMA40_PSIZE_PHY_1,
+ .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+
+ .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .dst_info.psize = STEDMA40_PSIZE_PHY_1,
+ .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+};
+
+/* Default configuration for logical memcpy */
+static struct stedma40_chan_cfg dma40_memcpy_conf_log = {
+ .mode = STEDMA40_MODE_LOGICAL,
+ .dir = DMA_MEM_TO_MEM,
+
+ .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .src_info.psize = STEDMA40_PSIZE_LOG_1,
+ .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+
+ .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .dst_info.psize = STEDMA40_PSIZE_LOG_1,
+ .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
+};
/**
* enum 40_command - The different commands and/or statuses.
@@ -171,6 +221,9 @@ static u32 d40_backup_regs_chan[] = {
D40_CHAN_REG_SDLNK,
};
+#define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
+ BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
+
/**
* struct d40_interrupt_lookup - lookup table for interrupt handler
*
@@ -471,6 +524,8 @@ struct d40_gen_dmac {
* @phy_start: Physical memory start of the DMA registers.
* @phy_size: Size of the DMA register map.
* @irq: The IRQ number.
+ * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem
+ * transfers).
* @num_phy_chans: The number of physical channels. Read from HW. This
* is the number of available channels for this driver, not counting "Secure
* mode" allocated physical channels.
@@ -514,6 +569,7 @@ struct d40_base {
phys_addr_t phy_start;
resource_size_t phy_size;
int irq;
+ int num_memcpy_chans;
int num_phy_chans;
int num_log_chans;
struct device_dma_parameters dma_parms;
@@ -534,7 +590,7 @@ struct d40_base {
resource_size_t lcpa_size;
struct kmem_cache *desc_slab;
u32 reg_val_backup[BACKUP_REGS_SZ];
- u32 reg_val_backup_v4[MAX(BACKUP_REGS_SZ_V4A, BACKUP_REGS_SZ_V4B)];
+ u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
u32 *reg_val_backup_chan;
u16 gcc_pwr_off_mask;
bool initialized;
@@ -792,7 +848,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
* that uses linked lists.
*/
if (!(chan->phy_chan->use_soft_lli &&
- chan->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM))
+ chan->dma_cfg.dir == DMA_DEV_TO_MEM))
curr_lcla = d40_lcla_alloc_one(chan, desc);
first_lcla = curr_lcla;
@@ -954,20 +1010,21 @@ static int d40_psize_2_burst_size(bool is_log, int psize)
/*
* The dma only supports transmitting packages up to
- * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
- * dma elements required to send the entire sg list
+ * STEDMA40_MAX_SEG_SIZE * data_width, where data_width is stored in Bytes.
+ *
+ * Calculate the total number of dma elements required to send the entire sg list.
*/
static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
{
int dmalen;
u32 max_w = max(data_width1, data_width2);
u32 min_w = min(data_width1, data_width2);
- u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
+ u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
if (seg_max > STEDMA40_MAX_SEG_SIZE)
- seg_max -= (1 << max_w);
+ seg_max -= max_w;
- if (!IS_ALIGNED(size, 1 << max_w))
+ if (!IS_ALIGNED(size, max_w))
return -EINVAL;
if (size <= seg_max)
@@ -1257,21 +1314,17 @@ static void __d40_config_set_event(struct d40_chan *d40c,
static void d40_config_set_event(struct d40_chan *d40c,
enum d40_events event_type)
{
- /* Enable event line connected to device (or memcpy) */
- if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
- (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
- u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
+ u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
+ /* Enable event line connected to device (or memcpy) */
+ if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
+ (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
__d40_config_set_event(d40c, event_type, event,
D40_CHAN_REG_SSLNK);
- }
-
- if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
- u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
+ if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM)
__d40_config_set_event(d40c, event_type, event,
D40_CHAN_REG_SDLNK);
- }
}
static u32 d40_chan_has_events(struct d40_chan *d40c)
@@ -1417,7 +1470,7 @@ static u32 d40_residue(struct d40_chan *d40c)
>> D40_SREG_ELEM_PHY_ECNT_POS;
}
- return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
+ return num_elt * d40c->dma_cfg.dst_info.data_width;
}
static bool d40_tx_is_linked(struct d40_chan *d40c)
@@ -1693,7 +1746,7 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
}
/* ACK interrupt */
- writel(1 << idx, base->virtbase + il[row].clr);
+ writel(BIT(idx), base->virtbase + il[row].clr);
spin_lock(&d40c->lock);
@@ -1715,8 +1768,6 @@ static int d40_validate_conf(struct d40_chan *d40c,
struct stedma40_chan_cfg *conf)
{
int res = 0;
- u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
- u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
if (!conf->dir) {
@@ -1724,48 +1775,14 @@ static int d40_validate_conf(struct d40_chan *d40c,
res = -EINVAL;
}
- if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
- d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
- d40c->runtime_addr == 0) {
-
- chan_err(d40c, "Invalid TX channel address (%d)\n",
- conf->dst_dev_type);
- res = -EINVAL;
- }
-
- if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
- d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
- d40c->runtime_addr == 0) {
- chan_err(d40c, "Invalid RX channel address (%d)\n",
- conf->src_dev_type);
- res = -EINVAL;
- }
-
- if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
- dst_event_group == STEDMA40_DEV_DST_MEMORY) {
- chan_err(d40c, "Invalid dst\n");
+ if ((is_log && conf->dev_type > d40c->base->num_log_chans) ||
+ (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
+ (conf->dev_type < 0)) {
+ chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
res = -EINVAL;
}
- if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
- src_event_group == STEDMA40_DEV_SRC_MEMORY) {
- chan_err(d40c, "Invalid src\n");
- res = -EINVAL;
- }
-
- if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
- dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
- chan_err(d40c, "No event line\n");
- res = -EINVAL;
- }
-
- if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
- (src_event_group != dst_event_group)) {
- chan_err(d40c, "Invalid event group\n");
- res = -EINVAL;
- }
-
- if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
+ if (conf->dir == DMA_DEV_TO_DEV) {
/*
* DMAC HW supports it. Will be added to this driver,
* in case any dma client requires it.
@@ -1775,9 +1792,9 @@ static int d40_validate_conf(struct d40_chan *d40c,
}
if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
- (1 << conf->src_info.data_width) !=
+ conf->src_info.data_width !=
d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
- (1 << conf->dst_info.data_width)) {
+ conf->dst_info.data_width) {
/*
* The DMAC hardware only supports
* src (burst x width) == dst (burst x width)
@@ -1819,8 +1836,8 @@ static bool d40_alloc_mask_set(struct d40_phy_res *phy,
if (phy->allocated_src == D40_ALLOC_FREE)
phy->allocated_src = D40_ALLOC_LOG_FREE;
- if (!(phy->allocated_src & (1 << log_event_line))) {
- phy->allocated_src |= 1 << log_event_line;
+ if (!(phy->allocated_src & BIT(log_event_line))) {
+ phy->allocated_src |= BIT(log_event_line);
goto found;
} else
goto not_found;
@@ -1831,8 +1848,8 @@ static bool d40_alloc_mask_set(struct d40_phy_res *phy,
if (phy->allocated_dst == D40_ALLOC_FREE)
phy->allocated_dst = D40_ALLOC_LOG_FREE;
- if (!(phy->allocated_dst & (1 << log_event_line))) {
- phy->allocated_dst |= 1 << log_event_line;
+ if (!(phy->allocated_dst & BIT(log_event_line))) {
+ phy->allocated_dst |= BIT(log_event_line);
goto found;
} else
goto not_found;
@@ -1862,11 +1879,11 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
/* Logical channel */
if (is_src) {
- phy->allocated_src &= ~(1 << log_event_line);
+ phy->allocated_src &= ~BIT(log_event_line);
if (phy->allocated_src == D40_ALLOC_LOG_FREE)
phy->allocated_src = D40_ALLOC_FREE;
} else {
- phy->allocated_dst &= ~(1 << log_event_line);
+ phy->allocated_dst &= ~BIT(log_event_line);
if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
phy->allocated_dst = D40_ALLOC_FREE;
}
@@ -1882,7 +1899,7 @@ out:
static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
{
- int dev_type;
+ int dev_type = d40c->dma_cfg.dev_type;
int event_group;
int event_line;
struct d40_phy_res *phys;
@@ -1896,14 +1913,12 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
phys = d40c->base->phy_res;
num_phy_chans = d40c->base->num_phy_chans;
- if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
- dev_type = d40c->dma_cfg.src_dev_type;
+ if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
log_num = 2 * dev_type;
is_src = true;
- } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
- d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
+ } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
+ d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
/* dst event lines are used for logical memcpy */
- dev_type = d40c->dma_cfg.dst_dev_type;
log_num = 2 * dev_type + 1;
is_src = false;
} else
@@ -1913,7 +1928,7 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
event_line = D40_TYPE_TO_EVENT(dev_type);
if (!is_log) {
- if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
+ if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
/* Find physical half channel */
if (d40c->dma_cfg.use_fixed_channel) {
i = d40c->dma_cfg.phy_channel;
@@ -2014,14 +2029,23 @@ static int d40_config_memcpy(struct d40_chan *d40c)
dma_cap_mask_t cap = d40c->chan.device->cap_mask;
if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
- d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
- d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
- d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
- memcpy[d40c->chan.chan_id];
+ d40c->dma_cfg = dma40_memcpy_conf_log;
+ d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
+
+ d40_log_cfg(&d40c->dma_cfg,
+ &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
} else if (dma_has_cap(DMA_MEMCPY, cap) &&
dma_has_cap(DMA_SLAVE, cap)) {
- d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
+ d40c->dma_cfg = dma40_memcpy_conf_phy;
+
+ /* Generate interrrupt at end of transfer or relink. */
+ d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
+
+ /* Generate interrupt on error. */
+ d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
+ d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
+
} else {
chan_err(d40c, "No memcpy\n");
return -EINVAL;
@@ -2034,7 +2058,7 @@ static int d40_free_dma(struct d40_chan *d40c)
{
int res = 0;
- u32 event;
+ u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
struct d40_phy_res *phy = d40c->phy_chan;
bool is_src;
@@ -2052,14 +2076,12 @@ static int d40_free_dma(struct d40_chan *d40c)
return -EINVAL;
}
- if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
- d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
- event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
+ if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
+ d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
is_src = false;
- } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
- event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
+ else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
is_src = true;
- } else {
+ else {
chan_err(d40c, "Unknown direction\n");
return -EINVAL;
}
@@ -2100,7 +2122,7 @@ static bool d40_is_paused(struct d40_chan *d40c)
unsigned long flags;
void __iomem *active_reg;
u32 status;
- u32 event;
+ u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
spin_lock_irqsave(&d40c->lock, flags);
@@ -2119,12 +2141,10 @@ static bool d40_is_paused(struct d40_chan *d40c)
goto _exit;
}
- if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
- d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
- event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
+ if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
+ d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
status = readl(chanbase + D40_CHAN_REG_SDLNK);
- } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
- event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
+ } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
status = readl(chanbase + D40_CHAN_REG_SSLNK);
} else {
chan_err(d40c, "Unknown direction\n");
@@ -2255,24 +2275,6 @@ err:
return NULL;
}
-static dma_addr_t
-d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction)
-{
- struct stedma40_platform_data *plat = chan->base->plat_data;
- struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
- dma_addr_t addr = 0;
-
- if (chan->runtime_addr)
- return chan->runtime_addr;
-
- if (direction == DMA_DEV_TO_MEM)
- addr = plat->dev_rx[cfg->src_dev_type];
- else if (direction == DMA_MEM_TO_DEV)
- addr = plat->dev_tx[cfg->dst_dev_type];
-
- return addr;
-}
-
static struct dma_async_tx_descriptor *
d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
struct scatterlist *sg_dst, unsigned int sg_len,
@@ -2299,14 +2301,10 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
if (sg_next(&sg_src[sg_len - 1]) == sg_src)
desc->cyclic = true;
- if (direction != DMA_TRANS_NONE) {
- dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
-
- if (direction == DMA_DEV_TO_MEM)
- src_dev_addr = dev_addr;
- else if (direction == DMA_MEM_TO_DEV)
- dst_dev_addr = dev_addr;
- }
+ if (direction == DMA_DEV_TO_MEM)
+ src_dev_addr = chan->runtime_addr;
+ else if (direction == DMA_MEM_TO_DEV)
+ dst_dev_addr = chan->runtime_addr;
if (chan_is_logical(chan))
ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
@@ -2366,7 +2364,7 @@ static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
u32 rtreg;
u32 event = D40_TYPE_TO_EVENT(dev_type);
u32 group = D40_TYPE_TO_GROUP(dev_type);
- u32 bit = 1 << event;
+ u32 bit = BIT(event);
u32 prioreg;
struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
@@ -2397,13 +2395,57 @@ static void d40_set_prio_realtime(struct d40_chan *d40c)
if (d40c->base->rev < 3)
return;
- if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
- (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
- __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true);
+ if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
+ (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
+ __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
- if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) ||
- (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
- __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false);
+ if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) ||
+ (d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
+ __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
+}
+
+#define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1)
+#define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1)
+#define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
+#define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
+
+static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct stedma40_chan_cfg cfg;
+ dma_cap_mask_t cap;
+ u32 flags;
+
+ memset(&cfg, 0, sizeof(struct stedma40_chan_cfg));
+
+ dma_cap_zero(cap);
+ dma_cap_set(DMA_SLAVE, cap);
+
+ cfg.dev_type = dma_spec->args[0];
+ flags = dma_spec->args[2];
+
+ switch (D40_DT_FLAGS_MODE(flags)) {
+ case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break;
+ case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break;
+ }
+
+ switch (D40_DT_FLAGS_DIR(flags)) {
+ case 0:
+ cfg.dir = DMA_MEM_TO_DEV;
+ cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
+ break;
+ case 1:
+ cfg.dir = DMA_DEV_TO_MEM;
+ cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
+ break;
+ }
+
+ if (D40_DT_FLAGS_FIXED_CHAN(flags)) {
+ cfg.phy_channel = dma_spec->args[1];
+ cfg.use_fixed_channel = true;
+ }
+
+ return dma_request_channel(cap, stedma40_filter, &cfg);
}
/* DMA ENGINE functions */
@@ -2435,23 +2477,21 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
}
pm_runtime_get_sync(d40c->base->dev);
- /* Fill in basic CFG register values */
- d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
- &d40c->dst_def_cfg, chan_is_logical(d40c));
d40_set_prio_realtime(d40c);
if (chan_is_logical(d40c)) {
- d40_log_cfg(&d40c->dma_cfg,
- &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
-
- if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
+ if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
d40c->lcpa = d40c->base->lcpa_base +
- d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
+ d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
else
d40c->lcpa = d40c->base->lcpa_base +
- d40c->dma_cfg.dst_dev_type *
+ d40c->dma_cfg.dev_type *
D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
+
+ /* Unmask the Global Interrupt Mask. */
+ d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
+ d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
}
dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
@@ -2641,33 +2681,10 @@ static void d40_terminate_all(struct dma_chan *chan)
static int
dma40_config_to_halfchannel(struct d40_chan *d40c,
struct stedma40_half_channel_info *info,
- enum dma_slave_buswidth width,
u32 maxburst)
{
- enum stedma40_periph_data_width addr_width;
int psize;
- switch (width) {
- case DMA_SLAVE_BUSWIDTH_1_BYTE:
- addr_width = STEDMA40_BYTE_WIDTH;
- break;
- case DMA_SLAVE_BUSWIDTH_2_BYTES:
- addr_width = STEDMA40_HALFWORD_WIDTH;
- break;
- case DMA_SLAVE_BUSWIDTH_4_BYTES:
- addr_width = STEDMA40_WORD_WIDTH;
- break;
- case DMA_SLAVE_BUSWIDTH_8_BYTES:
- addr_width = STEDMA40_DOUBLEWORD_WIDTH;
- break;
- default:
- dev_err(d40c->base->dev,
- "illegal peripheral address width "
- "requested (%d)\n",
- width);
- return -EINVAL;
- }
-
if (chan_is_logical(d40c)) {
if (maxburst >= 16)
psize = STEDMA40_PSIZE_LOG_16;
@@ -2688,7 +2705,6 @@ dma40_config_to_halfchannel(struct d40_chan *d40c,
psize = STEDMA40_PSIZE_PHY_1;
}
- info->data_width = addr_width;
info->psize = psize;
info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
@@ -2712,21 +2728,14 @@ static int d40_set_runtime_config(struct dma_chan *chan,
dst_maxburst = config->dst_maxburst;
if (config->direction == DMA_DEV_TO_MEM) {
- dma_addr_t dev_addr_rx =
- d40c->base->plat_data->dev_rx[cfg->src_dev_type];
-
config_addr = config->src_addr;
- if (dev_addr_rx)
- dev_dbg(d40c->base->dev,
- "channel has a pre-wired RX address %08x "
- "overriding with %08x\n",
- dev_addr_rx, config_addr);
- if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
+
+ if (cfg->dir != DMA_DEV_TO_MEM)
dev_dbg(d40c->base->dev,
"channel was not configured for peripheral "
"to memory transfer (%d) overriding\n",
cfg->dir);
- cfg->dir = STEDMA40_PERIPH_TO_MEM;
+ cfg->dir = DMA_DEV_TO_MEM;
/* Configure the memory side */
if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
@@ -2735,21 +2744,14 @@ static int d40_set_runtime_config(struct dma_chan *chan,
dst_maxburst = src_maxburst;
} else if (config->direction == DMA_MEM_TO_DEV) {
- dma_addr_t dev_addr_tx =
- d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
-
config_addr = config->dst_addr;
- if (dev_addr_tx)
- dev_dbg(d40c->base->dev,
- "channel has a pre-wired TX address %08x "
- "overriding with %08x\n",
- dev_addr_tx, config_addr);
- if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
+
+ if (cfg->dir != DMA_MEM_TO_DEV)
dev_dbg(d40c->base->dev,
"channel was not configured for memory "
"to peripheral transfer (%d) overriding\n",
cfg->dir);
- cfg->dir = STEDMA40_MEM_TO_PERIPH;
+ cfg->dir = DMA_MEM_TO_DEV;
/* Configure the memory side */
if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
@@ -2763,6 +2765,11 @@ static int d40_set_runtime_config(struct dma_chan *chan,
return -EINVAL;
}
+ if (config_addr <= 0) {
+ dev_err(d40c->base->dev, "no address supplied\n");
+ return -EINVAL;
+ }
+
if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
dev_err(d40c->base->dev,
"src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
@@ -2781,14 +2788,24 @@ static int d40_set_runtime_config(struct dma_chan *chan,
src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
}
+ /* Only valid widths are; 1, 2, 4 and 8. */
+ if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
+ src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
+ dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
+ dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
+ ((src_addr_width > 1) && (src_addr_width & 1)) ||
+ ((dst_addr_width > 1) && (dst_addr_width & 1)))
+ return -EINVAL;
+
+ cfg->src_info.data_width = src_addr_width;
+ cfg->dst_info.data_width = dst_addr_width;
+
ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
- src_addr_width,
src_maxburst);
if (ret)
return ret;
ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
- dst_addr_width,
dst_maxburst);
if (ret)
return ret;
@@ -2797,8 +2814,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
if (chan_is_logical(d40c))
d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
else
- d40_phy_cfg(cfg, &d40c->src_def_cfg,
- &d40c->dst_def_cfg, false);
+ d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
/* These settings will take precedence later */
d40c->runtime_addr = config_addr;
@@ -2929,7 +2945,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
}
d40_chan_init(base, &base->dma_memcpy, base->log_chans,
- base->num_log_chans, base->plat_data->memcpy_len);
+ base->num_log_chans, base->num_memcpy_chans);
dma_cap_zero(base->dma_memcpy.cap_mask);
dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
@@ -3123,13 +3139,14 @@ static int __init d40_phy_res_init(struct d40_base *base)
static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
{
- struct stedma40_platform_data *plat_data;
+ struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
struct clk *clk = NULL;
void __iomem *virtbase = NULL;
struct resource *res = NULL;
struct d40_base *base = NULL;
int num_log_chans = 0;
int num_phy_chans;
+ int num_memcpy_chans;
int clk_ret = -EINVAL;
int i;
u32 pid;
@@ -3189,8 +3206,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
* DB8540v1 has revision 4
*/
rev = AMBA_REV_BITS(pid);
-
- plat_data = pdev->dev.platform_data;
+ if (rev < 2) {
+ d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
+ goto failure;
+ }
/* The number of physical channels on this HW */
if (plat_data->num_of_phy_chans)
@@ -3198,26 +3217,20 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
else
num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
- dev_info(&pdev->dev, "hardware revision: %d @ 0x%x with %d physical channels\n",
- rev, res->start, num_phy_chans);
-
- if (rev < 2) {
- d40_err(&pdev->dev, "hardware revision: %d is not supported",
- rev);
- goto failure;
- }
+ /* The number of channels used for memcpy */
+ if (plat_data->num_of_memcpy_chans)
+ num_memcpy_chans = plat_data->num_of_memcpy_chans;
+ else
+ num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels);
- /* Count the number of logical channels in use */
- for (i = 0; i < plat_data->dev_len; i++)
- if (plat_data->dev_rx[i] != 0)
- num_log_chans++;
+ num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
- for (i = 0; i < plat_data->dev_len; i++)
- if (plat_data->dev_tx[i] != 0)
- num_log_chans++;
+ dev_info(&pdev->dev,
+ "hardware rev: %d @ 0x%x with %d physical and %d logical channels\n",
+ rev, res->start, num_phy_chans, num_log_chans);
base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
- (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
+ (num_phy_chans + num_log_chans + num_memcpy_chans) *
sizeof(struct d40_chan), GFP_KERNEL);
if (base == NULL) {
@@ -3227,6 +3240,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
base->rev = rev;
base->clk = clk;
+ base->num_memcpy_chans = num_memcpy_chans;
base->num_phy_chans = num_phy_chans;
base->num_log_chans = num_log_chans;
base->phy_start = res->start;
@@ -3278,17 +3292,11 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
if (!base->lookup_phy_chans)
goto failure;
- if (num_log_chans + plat_data->memcpy_len) {
- /*
- * The max number of logical channels are event lines for all
- * src devices and dst devices
- */
- base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
- sizeof(struct d40_chan *),
- GFP_KERNEL);
- if (!base->lookup_log_chans)
- goto failure;
- }
+ base->lookup_log_chans = kzalloc(num_log_chans *
+ sizeof(struct d40_chan *),
+ GFP_KERNEL);
+ if (!base->lookup_log_chans)
+ goto failure;
base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
sizeof(d40_backup_regs_chan),
@@ -3472,17 +3480,82 @@ failure:
return ret;
}
+static int __init d40_of_probe(struct platform_device *pdev,
+ struct device_node *np)
+{
+ struct stedma40_platform_data *pdata;
+ int num_phy = 0, num_memcpy = 0, num_disabled = 0;
+ const const __be32 *list;
+
+ pdata = devm_kzalloc(&pdev->dev,
+ sizeof(struct stedma40_platform_data),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ /* If absent this value will be obtained from h/w. */
+ of_property_read_u32(np, "dma-channels", &num_phy);
+ if (num_phy > 0)
+ pdata->num_of_phy_chans = num_phy;
+
+ list = of_get_property(np, "memcpy-channels", &num_memcpy);
+ num_memcpy /= sizeof(*list);
+
+ if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
+ d40_err(&pdev->dev,
+ "Invalid number of memcpy channels specified (%d)\n",
+ num_memcpy);
+ return -EINVAL;
+ }
+ pdata->num_of_memcpy_chans = num_memcpy;
+
+ of_property_read_u32_array(np, "memcpy-channels",
+ dma40_memcpy_channels,
+ num_memcpy);
+
+ list = of_get_property(np, "disabled-channels", &num_disabled);
+ num_disabled /= sizeof(*list);
+
+ if (num_disabled > STEDMA40_MAX_PHYS || num_disabled < 0) {
+ d40_err(&pdev->dev,
+ "Invalid number of disabled channels specified (%d)\n",
+ num_disabled);
+ return -EINVAL;
+ }
+
+ of_property_read_u32_array(np, "disabled-channels",
+ pdata->disabled_channels,
+ num_disabled);
+ pdata->disabled_channels[num_disabled] = -1;
+
+ pdev->dev.platform_data = pdata;
+
+ return 0;
+}
+
static int __init d40_probe(struct platform_device *pdev)
{
- int err;
+ struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
+ struct device_node *np = pdev->dev.of_node;
int ret = -ENOENT;
- struct d40_base *base;
+ struct d40_base *base = NULL;
struct resource *res = NULL;
int num_reserved_chans;
u32 val;
- base = d40_hw_detect_init(pdev);
+ if (!plat_data) {
+ if (np) {
+ if(d40_of_probe(pdev, np)) {
+ ret = -ENOMEM;
+ goto failure;
+ }
+ } else {
+ d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
+ goto failure;
+ }
+ }
+ base = d40_hw_detect_init(pdev);
if (!base)
goto failure;
@@ -3575,6 +3648,7 @@ static int __init d40_probe(struct platform_device *pdev)
base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
if (IS_ERR(base->lcpa_regulator)) {
d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
+ ret = PTR_ERR(base->lcpa_regulator);
base->lcpa_regulator = NULL;
goto failure;
}
@@ -3590,19 +3664,26 @@ static int __init d40_probe(struct platform_device *pdev)
}
base->initialized = true;
- err = d40_dmaengine_init(base, num_reserved_chans);
- if (err)
+ ret = d40_dmaengine_init(base, num_reserved_chans);
+ if (ret)
goto failure;
base->dev->dma_parms = &base->dma_parms;
- err = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
- if (err) {
+ ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
+ if (ret) {
d40_err(&pdev->dev, "Failed to set dma max seg size\n");
goto failure;
}
d40_hw_init(base);
+ if (np) {
+ ret = of_dma_controller_register(np, d40_xlate, NULL);
+ if (ret)
+ dev_err(&pdev->dev,
+ "could not register of_dma_controller\n");
+ }
+
dev_info(base->dev, "initialized\n");
return 0;
@@ -3656,11 +3737,17 @@ failure:
return ret;
}
+static const struct of_device_id d40_match[] = {
+ { .compatible = "stericsson,dma40", },
+ {}
+};
+
static struct platform_driver d40_driver = {
.driver = {
.owner = THIS_MODULE,
.name = D40_NAME,
.pm = DMA40_PM_OPS,
+ .of_match_table = d40_match,
},
};
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index 7180e0d41722..27b818dee7c7 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -10,6 +10,18 @@
#include "ste_dma40_ll.h"
+u8 d40_width_to_bits(enum dma_slave_buswidth width)
+{
+ if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
+ return STEDMA40_ESIZE_8_BIT;
+ else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
+ return STEDMA40_ESIZE_16_BIT;
+ else if (width == DMA_SLAVE_BUSWIDTH_8_BYTES)
+ return STEDMA40_ESIZE_64_BIT;
+ else
+ return STEDMA40_ESIZE_32_BIT;
+}
+
/* Sets up proper LCSP1 and LCSP3 register for a logical channel */
void d40_log_cfg(struct stedma40_chan_cfg *cfg,
u32 *lcsp1, u32 *lcsp3)
@@ -18,106 +30,100 @@ void d40_log_cfg(struct stedma40_chan_cfg *cfg,
u32 l1 = 0; /* src */
/* src is mem? -> increase address pos */
- if (cfg->dir == STEDMA40_MEM_TO_PERIPH ||
- cfg->dir == STEDMA40_MEM_TO_MEM)
- l1 |= 1 << D40_MEM_LCSP1_SCFG_INCR_POS;
+ if (cfg->dir == DMA_MEM_TO_DEV ||
+ cfg->dir == DMA_MEM_TO_MEM)
+ l1 |= BIT(D40_MEM_LCSP1_SCFG_INCR_POS);
/* dst is mem? -> increase address pos */
- if (cfg->dir == STEDMA40_PERIPH_TO_MEM ||
- cfg->dir == STEDMA40_MEM_TO_MEM)
- l3 |= 1 << D40_MEM_LCSP3_DCFG_INCR_POS;
+ if (cfg->dir == DMA_DEV_TO_MEM ||
+ cfg->dir == DMA_MEM_TO_MEM)
+ l3 |= BIT(D40_MEM_LCSP3_DCFG_INCR_POS);
/* src is hw? -> master port 1 */
- if (cfg->dir == STEDMA40_PERIPH_TO_MEM ||
- cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
- l1 |= 1 << D40_MEM_LCSP1_SCFG_MST_POS;
+ if (cfg->dir == DMA_DEV_TO_MEM ||
+ cfg->dir == DMA_DEV_TO_DEV)
+ l1 |= BIT(D40_MEM_LCSP1_SCFG_MST_POS);
/* dst is hw? -> master port 1 */
- if (cfg->dir == STEDMA40_MEM_TO_PERIPH ||
- cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
- l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS;
+ if (cfg->dir == DMA_MEM_TO_DEV ||
+ cfg->dir == DMA_DEV_TO_DEV)
+ l3 |= BIT(D40_MEM_LCSP3_DCFG_MST_POS);
- l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS;
+ l3 |= BIT(D40_MEM_LCSP3_DCFG_EIM_POS);
l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS;
- l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS;
+ l3 |= d40_width_to_bits(cfg->dst_info.data_width)
+ << D40_MEM_LCSP3_DCFG_ESIZE_POS;
- l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS;
+ l1 |= BIT(D40_MEM_LCSP1_SCFG_EIM_POS);
l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
- l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS;
+ l1 |= d40_width_to_bits(cfg->src_info.data_width)
+ << D40_MEM_LCSP1_SCFG_ESIZE_POS;
*lcsp1 = l1;
*lcsp3 = l3;
}
-/* Sets up SRC and DST CFG register for both logical and physical channels */
-void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
- u32 *src_cfg, u32 *dst_cfg, bool is_log)
+void d40_phy_cfg(struct stedma40_chan_cfg *cfg, u32 *src_cfg, u32 *dst_cfg)
{
u32 src = 0;
u32 dst = 0;
- if (!is_log) {
- /* Physical channel */
- if ((cfg->dir == STEDMA40_PERIPH_TO_MEM) ||
- (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) {
- /* Set master port to 1 */
- src |= 1 << D40_SREG_CFG_MST_POS;
- src |= D40_TYPE_TO_EVENT(cfg->src_dev_type);
-
- if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
- src |= 1 << D40_SREG_CFG_PHY_TM_POS;
- else
- src |= 3 << D40_SREG_CFG_PHY_TM_POS;
- }
- if ((cfg->dir == STEDMA40_MEM_TO_PERIPH) ||
- (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) {
- /* Set master port to 1 */
- dst |= 1 << D40_SREG_CFG_MST_POS;
- dst |= D40_TYPE_TO_EVENT(cfg->dst_dev_type);
-
- if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
- dst |= 1 << D40_SREG_CFG_PHY_TM_POS;
- else
- dst |= 3 << D40_SREG_CFG_PHY_TM_POS;
- }
- /* Interrupt on end of transfer for destination */
- dst |= 1 << D40_SREG_CFG_TIM_POS;
-
- /* Generate interrupt on error */
- src |= 1 << D40_SREG_CFG_EIM_POS;
- dst |= 1 << D40_SREG_CFG_EIM_POS;
-
- /* PSIZE */
- if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) {
- src |= 1 << D40_SREG_CFG_PHY_PEN_POS;
- src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS;
- }
- if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) {
- dst |= 1 << D40_SREG_CFG_PHY_PEN_POS;
- dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS;
- }
-
- /* Element size */
- src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS;
- dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS;
-
- /* Set the priority bit to high for the physical channel */
- if (cfg->high_priority) {
- src |= 1 << D40_SREG_CFG_PRI_POS;
- dst |= 1 << D40_SREG_CFG_PRI_POS;
- }
-
- } else {
- /* Logical channel */
- dst |= 1 << D40_SREG_CFG_LOG_GIM_POS;
- src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
+ if ((cfg->dir == DMA_DEV_TO_MEM) ||
+ (cfg->dir == DMA_DEV_TO_DEV)) {
+ /* Set master port to 1 */
+ src |= BIT(D40_SREG_CFG_MST_POS);
+ src |= D40_TYPE_TO_EVENT(cfg->dev_type);
+
+ if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
+ src |= BIT(D40_SREG_CFG_PHY_TM_POS);
+ else
+ src |= 3 << D40_SREG_CFG_PHY_TM_POS;
+ }
+ if ((cfg->dir == DMA_MEM_TO_DEV) ||
+ (cfg->dir == DMA_DEV_TO_DEV)) {
+ /* Set master port to 1 */
+ dst |= BIT(D40_SREG_CFG_MST_POS);
+ dst |= D40_TYPE_TO_EVENT(cfg->dev_type);
+
+ if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
+ dst |= BIT(D40_SREG_CFG_PHY_TM_POS);
+ else
+ dst |= 3 << D40_SREG_CFG_PHY_TM_POS;
+ }
+ /* Interrupt on end of transfer for destination */
+ dst |= BIT(D40_SREG_CFG_TIM_POS);
+
+ /* Generate interrupt on error */
+ src |= BIT(D40_SREG_CFG_EIM_POS);
+ dst |= BIT(D40_SREG_CFG_EIM_POS);
+
+ /* PSIZE */
+ if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) {
+ src |= BIT(D40_SREG_CFG_PHY_PEN_POS);
+ src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS;
+ }
+ if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) {
+ dst |= BIT(D40_SREG_CFG_PHY_PEN_POS);
+ dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS;
+ }
+
+ /* Element size */
+ src |= d40_width_to_bits(cfg->src_info.data_width)
+ << D40_SREG_CFG_ESIZE_POS;
+ dst |= d40_width_to_bits(cfg->dst_info.data_width)
+ << D40_SREG_CFG_ESIZE_POS;
+
+ /* Set the priority bit to high for the physical channel */
+ if (cfg->high_priority) {
+ src |= BIT(D40_SREG_CFG_PRI_POS);
+ dst |= BIT(D40_SREG_CFG_PRI_POS);
}
if (cfg->src_info.big_endian)
- src |= 1 << D40_SREG_CFG_LBE_POS;
+ src |= BIT(D40_SREG_CFG_LBE_POS);
if (cfg->dst_info.big_endian)
- dst |= 1 << D40_SREG_CFG_LBE_POS;
+ dst |= BIT(D40_SREG_CFG_LBE_POS);
*src_cfg = src;
*dst_cfg = dst;
@@ -143,23 +149,22 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli,
num_elems = 2 << psize;
/* Must be aligned */
- if (!IS_ALIGNED(data, 0x1 << data_width))
+ if (!IS_ALIGNED(data, data_width))
return -EINVAL;
/* Transfer size can't be smaller than (num_elms * elem_size) */
- if (data_size < num_elems * (0x1 << data_width))
+ if (data_size < num_elems * data_width)
return -EINVAL;
/* The number of elements. IE now many chunks */
- lli->reg_elt = (data_size >> data_width) << D40_SREG_ELEM_PHY_ECNT_POS;
+ lli->reg_elt = (data_size / data_width) << D40_SREG_ELEM_PHY_ECNT_POS;
/*
* Distance to next element sized entry.
* Usually the size of the element unless you want gaps.
*/
if (addr_inc)
- lli->reg_elt |= (0x1 << data_width) <<
- D40_SREG_ELEM_PHY_EIDX_POS;
+ lli->reg_elt |= data_width << D40_SREG_ELEM_PHY_EIDX_POS;
/* Where the data is */
lli->reg_ptr = data;
@@ -167,18 +172,20 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli,
/* If this scatter list entry is the last one, no next link */
if (next_lli == 0)
- lli->reg_lnk = 0x1 << D40_SREG_LNK_PHY_TCP_POS;
+ lli->reg_lnk = BIT(D40_SREG_LNK_PHY_TCP_POS);
else
lli->reg_lnk = next_lli;
/* Set/clear interrupt generation on this link item.*/
if (term_int)
- lli->reg_cfg |= 0x1 << D40_SREG_CFG_TIM_POS;
+ lli->reg_cfg |= BIT(D40_SREG_CFG_TIM_POS);
else
- lli->reg_cfg &= ~(0x1 << D40_SREG_CFG_TIM_POS);
+ lli->reg_cfg &= ~BIT(D40_SREG_CFG_TIM_POS);
- /* Post link */
- lli->reg_lnk |= 0 << D40_SREG_LNK_PHY_PRE_POS;
+ /*
+ * Post link - D40_SREG_LNK_PHY_PRE_POS = 0
+ * Relink happens after transfer completion.
+ */
return 0;
}
@@ -187,16 +194,16 @@ static int d40_seg_size(int size, int data_width1, int data_width2)
{
u32 max_w = max(data_width1, data_width2);
u32 min_w = min(data_width1, data_width2);
- u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
+ u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
if (seg_max > STEDMA40_MAX_SEG_SIZE)
- seg_max -= (1 << max_w);
+ seg_max -= max_w;
if (size <= seg_max)
return size;
if (size <= 2 * seg_max)
- return ALIGN(size / 2, 1 << max_w);
+ return ALIGN(size / 2, max_w);
return seg_max;
}
@@ -362,10 +369,10 @@ static void d40_log_fill_lli(struct d40_log_lli *lli,
lli->lcsp13 = reg_cfg;
/* The number of elements to transfer */
- lli->lcsp02 = ((data_size >> data_width) <<
+ lli->lcsp02 = ((data_size / data_width) <<
D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK;
- BUG_ON((data_size >> data_width) > STEDMA40_MAX_SEG_SIZE);
+ BUG_ON((data_size / data_width) > STEDMA40_MAX_SEG_SIZE);
/* 16 LSBs address of the current element */
lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index fdde8ef77542..1b47312bc574 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -432,8 +432,7 @@ enum d40_lli_flags {
void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
u32 *src_cfg,
- u32 *dst_cfg,
- bool is_log);
+ u32 *dst_cfg);
void d40_log_cfg(struct stedma40_chan_cfg *cfg,
u32 *lcsp1,
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 33f59ecd256e..f137914d7b16 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1191,6 +1191,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
INIT_LIST_HEAD(&tdc->cb_desc);
tdc->config_init = false;
+ tdc->isr_handler = NULL;
spin_unlock_irqrestore(&tdc->lock, flags);
while (!list_empty(&dma_desc_list)) {
@@ -1334,7 +1335,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"request_irq failed with err %d channel %d\n",
- i, ret);
+ ret, i);
goto err_irq;
}
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 26107ba6edb3..0ef43c136aa7 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -811,8 +811,6 @@ static int td_remove(struct platform_device *pdev)
kfree(td);
release_mem_region(iomem->start, resource_size(iomem));
- platform_set_drvdata(pdev, NULL);
-
dev_dbg(&pdev->dev, "Removed...\n");
return 0;
}