aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/acpi-dma.c5
-rw-r--r--drivers/dma/at_xdmac.c20
-rw-r--r--drivers/dma/dma-axi-dmac.c8
-rw-r--r--drivers/dma/dmaengine.c178
-rw-r--r--drivers/dma/dw/platform.c7
-rw-r--r--drivers/dma/edma.c10
-rw-r--r--drivers/dma/fsl-edma.c85
-rw-r--r--drivers/dma/hsu/hsu.c17
-rw-r--r--drivers/dma/hsu/hsu.h1
-rw-r--r--drivers/dma/idma64.c22
-rw-r--r--drivers/dma/idma64.h3
-rw-r--r--drivers/dma/img-mdc-dma.c78
-rw-r--r--drivers/dma/ioat/dca.c2
-rw-r--r--drivers/dma/ioat/dma.h34
-rw-r--r--drivers/dma/ioat/registers.h16
-rw-r--r--drivers/dma/omap-dma.c82
-rw-r--r--drivers/dma/pxa_dma.c1
-rw-r--r--drivers/dma/sh/Kconfig6
-rw-r--r--drivers/dma/sh/Makefile1
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c669
-rw-r--r--drivers/dma/sh/usb-dmac.c4
-rw-r--r--drivers/dma/ste_dma40.c87
-rw-r--r--drivers/dma/virt-dma.c46
-rw-r--r--drivers/dma/virt-dma.h25
24 files changed, 448 insertions, 959 deletions
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 16d0daa058a5..eed6bda01790 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -15,6 +15,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -72,7 +73,9 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
si = (const struct acpi_csrt_shared_info *)&grp[1];
/* Match device by MMIO and IRQ */
- if (si->mmio_base_low != mem || si->gsi_interrupt != irq)
+ if (si->mmio_base_low != lower_32_bits(mem) ||
+ si->mmio_base_high != upper_32_bits(mem) ||
+ si->gsi_interrupt != irq)
return 0;
dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n",
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b5e132d4bae5..d0ae4613b87e 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -863,8 +863,12 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
* access. Hopefully we can access DDR through both ports (at least on
* SAMA5D4x), so we can use the same interface for source and dest,
* that solves the fact we don't know the direction.
+ * ERRATA: Even if useless for memory transfers, the PERID has to not
+ * match the one of another channel. If not, it could lead to spurious
+ * flag status.
*/
- u32 chan_cc = AT_XDMAC_CC_DIF(0)
+ u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
+ | AT_XDMAC_CC_DIF(0)
| AT_XDMAC_CC_SIF(0)
| AT_XDMAC_CC_MBSIZE_SIXTEEN
| AT_XDMAC_CC_TYPE_MEM_TRAN;
@@ -1039,8 +1043,12 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
* access DDR through both ports (at least on SAMA5D4x), so we can use
* the same interface for source and dest, that solves the fact we
* don't know the direction.
+ * ERRATA: Even if useless for memory transfers, the PERID has to not
+ * match the one of another channel. If not, it could lead to spurious
+ * flag status.
*/
- u32 chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM
+ u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
+ | AT_XDMAC_CC_DAM_INCREMENTED_AM
| AT_XDMAC_CC_SAM_INCREMENTED_AM
| AT_XDMAC_CC_DIF(0)
| AT_XDMAC_CC_SIF(0)
@@ -1140,8 +1148,12 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
* access. Hopefully we can access DDR through both ports (at least on
* SAMA5D4x), so we can use the same interface for source and dest,
* that solves the fact we don't know the direction.
+ * ERRATA: Even if useless for memory transfers, the PERID has to not
+ * match the one of another channel. If not, it could lead to spurious
+ * flag status.
*/
- u32 chan_cc = AT_XDMAC_CC_DAM_UBS_AM
+ u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
+ | AT_XDMAC_CC_DAM_UBS_AM
| AT_XDMAC_CC_SAM_INCREMENTED_AM
| AT_XDMAC_CC_DIF(0)
| AT_XDMAC_CC_SIF(0)
@@ -1995,8 +2007,6 @@ static int at_xdmac_remove(struct platform_device *pdev)
dma_async_device_unregister(&atxdmac->dma);
clk_disable_unprepare(atxdmac->clk);
- synchronize_irq(atxdmac->irq);
-
free_irq(atxdmac->irq, atxdmac->dma.dev);
for (i = 0; i < atxdmac->dma.chancnt; i++) {
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 5b2395e7e04d..c3468094393e 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -307,6 +307,13 @@ static int axi_dmac_terminate_all(struct dma_chan *c)
return 0;
}
+static void axi_dmac_synchronize(struct dma_chan *c)
+{
+ struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+
+ vchan_synchronize(&chan->vchan);
+}
+
static void axi_dmac_issue_pending(struct dma_chan *c)
{
struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
@@ -613,6 +620,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic;
dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
dma_dev->device_terminate_all = axi_dmac_terminate_all;
+ dma_dev->device_synchronize = axi_dmac_synchronize;
dma_dev->dev = &pdev->dev;
dma_dev->chancnt = 1;
dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 3ecec1445adf..c50a247be2e0 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -43,6 +43,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -265,8 +266,11 @@ static void dma_chan_put(struct dma_chan *chan)
module_put(dma_chan_to_owner(chan));
/* This channel is not in use anymore, free it */
- if (!chan->client_count && chan->device->device_free_chan_resources)
+ if (!chan->client_count && chan->device->device_free_chan_resources) {
+ /* Make sure all operations have completed */
+ dmaengine_synchronize(chan);
chan->device->device_free_chan_resources(chan);
+ }
/* If the channel is used via a DMA request router, free the mapping */
if (chan->router && chan->router->route_free) {
@@ -493,6 +497,7 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
caps->dst_addr_widths = device->dst_addr_widths;
caps->directions = device->directions;
caps->residue_granularity = device->residue_granularity;
+ caps->descriptor_reuse = device->descriptor_reuse;
/*
* Some devices implement only pause (e.g. to get residuum) but no
@@ -511,7 +516,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
{
struct dma_chan *chan;
- if (!__dma_device_satisfies_mask(dev, mask)) {
+ if (mask && !__dma_device_satisfies_mask(dev, mask)) {
pr_debug("%s: wrong capabilities\n", __func__);
return NULL;
}
@@ -542,6 +547,42 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
return NULL;
}
+static struct dma_chan *find_candidate(struct dma_device *device,
+ const dma_cap_mask_t *mask,
+ dma_filter_fn fn, void *fn_param)
+{
+ struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
+ int err;
+
+ if (chan) {
+ /* Found a suitable channel, try to grab, prep, and return it.
+ * We first set DMA_PRIVATE to disable balance_ref_count as this
+ * channel will not be published in the general-purpose
+ * allocator
+ */
+ dma_cap_set(DMA_PRIVATE, device->cap_mask);
+ device->privatecnt++;
+ err = dma_chan_get(chan);
+
+ if (err) {
+ if (err == -ENODEV) {
+ pr_debug("%s: %s module removed\n", __func__,
+ dma_chan_name(chan));
+ list_del_rcu(&device->global_node);
+ } else
+ pr_debug("%s: failed to get %s: (%d)\n",
+ __func__, dma_chan_name(chan), err);
+
+ if (--device->privatecnt == 0)
+ dma_cap_clear(DMA_PRIVATE, device->cap_mask);
+
+ chan = ERR_PTR(err);
+ }
+ }
+
+ return chan ? chan : ERR_PTR(-EPROBE_DEFER);
+}
+
/**
* dma_get_slave_channel - try to get specific channel exclusively
* @chan: target channel
@@ -580,7 +621,6 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
{
dma_cap_mask_t mask;
struct dma_chan *chan;
- int err;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@@ -588,23 +628,11 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
/* lock against __dma_request_channel */
mutex_lock(&dma_list_mutex);
- chan = private_candidate(&mask, device, NULL, NULL);
- if (chan) {
- dma_cap_set(DMA_PRIVATE, device->cap_mask);
- device->privatecnt++;
- err = dma_chan_get(chan);
- if (err) {
- pr_debug("%s: failed to get %s: (%d)\n",
- __func__, dma_chan_name(chan), err);
- chan = NULL;
- if (--device->privatecnt == 0)
- dma_cap_clear(DMA_PRIVATE, device->cap_mask);
- }
- }
+ chan = find_candidate(device, &mask, NULL, NULL);
mutex_unlock(&dma_list_mutex);
- return chan;
+ return IS_ERR(chan) ? NULL : chan;
}
EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
@@ -621,35 +649,15 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
{
struct dma_device *device, *_d;
struct dma_chan *chan = NULL;
- int err;
/* Find a channel */
mutex_lock(&dma_list_mutex);
list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
- chan = private_candidate(mask, device, fn, fn_param);
- if (chan) {
- /* Found a suitable channel, try to grab, prep, and
- * return it. We first set DMA_PRIVATE to disable
- * balance_ref_count as this channel will not be
- * published in the general-purpose allocator
- */
- dma_cap_set(DMA_PRIVATE, device->cap_mask);
- device->privatecnt++;
- err = dma_chan_get(chan);
+ chan = find_candidate(device, mask, fn, fn_param);
+ if (!IS_ERR(chan))
+ break;
- if (err == -ENODEV) {
- pr_debug("%s: %s module removed\n",
- __func__, dma_chan_name(chan));
- list_del_rcu(&device->global_node);
- } else if (err)
- pr_debug("%s: failed to get %s: (%d)\n",
- __func__, dma_chan_name(chan), err);
- else
- break;
- if (--device->privatecnt == 0)
- dma_cap_clear(DMA_PRIVATE, device->cap_mask);
- chan = NULL;
- }
+ chan = NULL;
}
mutex_unlock(&dma_list_mutex);
@@ -662,27 +670,73 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
}
EXPORT_SYMBOL_GPL(__dma_request_channel);
+static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
+ const char *name,
+ struct device *dev)
+{
+ int i;
+
+ if (!device->filter.mapcnt)
+ return NULL;
+
+ for (i = 0; i < device->filter.mapcnt; i++) {
+ const struct dma_slave_map *map = &device->filter.map[i];
+
+ if (!strcmp(map->devname, dev_name(dev)) &&
+ !strcmp(map->slave, name))
+ return map;
+ }
+
+ return NULL;
+}
+
/**
- * dma_request_slave_channel_reason - try to allocate an exclusive slave channel
+ * dma_request_chan - try to allocate an exclusive slave channel
* @dev: pointer to client device structure
* @name: slave channel name
*
* Returns pointer to appropriate DMA channel on success or an error pointer.
*/
-struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
- const char *name)
+struct dma_chan *dma_request_chan(struct device *dev, const char *name)
{
+ struct dma_device *d, *_d;
+ struct dma_chan *chan = NULL;
+
/* If device-tree is present get slave info from here */
if (dev->of_node)
- return of_dma_request_slave_channel(dev->of_node, name);
+ chan = of_dma_request_slave_channel(dev->of_node, name);
/* If device was enumerated by ACPI get slave info from here */
- if (ACPI_HANDLE(dev))
- return acpi_dma_request_slave_chan_by_name(dev, name);
+ if (has_acpi_companion(dev) && !chan)
+ chan = acpi_dma_request_slave_chan_by_name(dev, name);
+
+ if (chan) {
+ /* Valid channel found or requester need to be deferred */
+ if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
+ return chan;
+ }
- return ERR_PTR(-ENODEV);
+ /* Try to find the channel via the DMA filter map(s) */
+ mutex_lock(&dma_list_mutex);
+ list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
+ dma_cap_mask_t mask;
+ const struct dma_slave_map *map = dma_filter_match(d, name, dev);
+
+ if (!map)
+ continue;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ chan = find_candidate(d, &mask, d->filter.fn, map->param);
+ if (!IS_ERR(chan))
+ break;
+ }
+ mutex_unlock(&dma_list_mutex);
+
+ return chan ? chan : ERR_PTR(-EPROBE_DEFER);
}
-EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
+EXPORT_SYMBOL_GPL(dma_request_chan);
/**
* dma_request_slave_channel - try to allocate an exclusive slave channel
@@ -694,17 +748,35 @@ EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
struct dma_chan *dma_request_slave_channel(struct device *dev,
const char *name)
{
- struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
+ struct dma_chan *ch = dma_request_chan(dev, name);
if (IS_ERR(ch))
return NULL;
- dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
- ch->device->privatecnt++;
-
return ch;
}
EXPORT_SYMBOL_GPL(dma_request_slave_channel);
+/**
+ * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
+ * @mask: capabilities that the channel must satisfy
+ *
+ * Returns pointer to appropriate DMA channel on success or an error pointer.
+ */
+struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
+{
+ struct dma_chan *chan;
+
+ if (!mask)
+ return ERR_PTR(-ENODEV);
+
+ chan = __dma_request_channel(mask, NULL, NULL);
+ if (!chan)
+ chan = ERR_PTR(-ENODEV);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
+
void dma_release_channel(struct dma_chan *chan)
{
mutex_lock(&dma_list_mutex);
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 68a4815750b5..5a417bbdfbd7 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -103,18 +103,21 @@ dw_dma_parse_dt(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct dw_dma_platform_data *pdata;
u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
+ u32 nr_channels;
if (!np) {
dev_err(&pdev->dev, "Missing DT data\n");
return NULL;
}
+ if (of_property_read_u32(np, "dma-channels", &nr_channels))
+ return NULL;
+
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
- if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels))
- return NULL;
+ pdata->nr_channels = nr_channels;
if (of_property_read_bool(np, "is_private"))
pdata->is_private = true;
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 6b03e4e84e6b..6b3e9d991010 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -2297,6 +2297,10 @@ static int edma_probe(struct platform_device *pdev)
edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot);
}
+ ecc->dma_slave.filter.map = info->slave_map;
+ ecc->dma_slave.filter.mapcnt = info->slavecnt;
+ ecc->dma_slave.filter.fn = edma_filter_fn;
+
ret = dma_async_device_register(&ecc->dma_slave);
if (ret) {
dev_err(dev, "slave ddev registration failed (%d)\n", ret);
@@ -2404,7 +2408,13 @@ static struct platform_driver edma_driver = {
},
};
+static int edma_tptc_probe(struct platform_device *pdev)
+{
+ return 0;
+}
+
static struct platform_driver edma_tptc_driver = {
+ .probe = edma_tptc_probe,
.driver = {
.name = "edma3-tptc",
.of_match_table = edma_tptc_of_ids,
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 915eec3cc279..be2e62b87948 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -116,6 +116,10 @@
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
+enum fsl_edma_pm_state {
+ RUNNING = 0,
+ SUSPENDED,
+};
struct fsl_edma_hw_tcd {
__le32 saddr;
@@ -147,6 +151,9 @@ struct fsl_edma_slave_config {
struct fsl_edma_chan {
struct virt_dma_chan vchan;
enum dma_status status;
+ enum fsl_edma_pm_state pm_state;
+ bool idle;
+ u32 slave_id;
struct fsl_edma_engine *edma;
struct fsl_edma_desc *edesc;
struct fsl_edma_slave_config fsc;
@@ -298,6 +305,7 @@ static int fsl_edma_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
fsl_edma_disable_request(fsl_chan);
fsl_chan->edesc = NULL;
+ fsl_chan->idle = true;
vchan_get_all_descriptors(&fsl_chan->vchan, &head);
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
@@ -313,6 +321,7 @@ static int fsl_edma_pause(struct dma_chan *chan)
if (fsl_chan->edesc) {
fsl_edma_disable_request(fsl_chan);
fsl_chan->status = DMA_PAUSED;
+ fsl_chan->idle = true;
}
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return 0;
@@ -327,6 +336,7 @@ static int fsl_edma_resume(struct dma_chan *chan)
if (fsl_chan->edesc) {
fsl_edma_enable_request(fsl_chan);
fsl_chan->status = DMA_IN_PROGRESS;
+ fsl_chan->idle = false;
}
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return 0;
@@ -648,6 +658,7 @@ static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
fsl_edma_enable_request(fsl_chan);
fsl_chan->status = DMA_IN_PROGRESS;
+ fsl_chan->idle = false;
}
static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
@@ -676,6 +687,7 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
vchan_cookie_complete(&fsl_chan->edesc->vdesc);
fsl_chan->edesc = NULL;
fsl_chan->status = DMA_COMPLETE;
+ fsl_chan->idle = true;
} else {
vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
}
@@ -704,6 +716,7 @@ static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
edma_writeb(fsl_edma, EDMA_CERR_CERR(ch),
fsl_edma->membase + EDMA_CERR);
fsl_edma->chans[ch].status = DMA_ERROR;
+ fsl_edma->chans[ch].idle = true;
}
}
return IRQ_HANDLED;
@@ -724,6 +737,12 @@ static void fsl_edma_issue_pending(struct dma_chan *chan)
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ if (unlikely(fsl_chan->pm_state != RUNNING)) {
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ /* cannot submit due to suspend */
+ return;
+ }
+
if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
fsl_edma_xfer_desc(fsl_chan);
@@ -735,6 +754,7 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
{
struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
struct dma_chan *chan, *_chan;
+ struct fsl_edma_chan *fsl_chan;
unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR;
if (dma_spec->args_count != 2)
@@ -748,8 +768,10 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
chan = dma_get_slave_channel(chan);
if (chan) {
chan->device->privatecnt++;
- fsl_edma_chan_mux(to_fsl_edma_chan(chan),
- dma_spec->args[1], true);
+ fsl_chan = to_fsl_edma_chan(chan);
+ fsl_chan->slave_id = dma_spec->args[1];
+ fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id,
+ true);
mutex_unlock(&fsl_edma->fsl_edma_mutex);
return chan;
}
@@ -888,7 +910,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
fsl_chan->edma = fsl_edma;
-
+ fsl_chan->pm_state = RUNNING;
+ fsl_chan->slave_id = 0;
+ fsl_chan->idle = true;
fsl_chan->vchan.desc_free = fsl_edma_free_desc;
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
@@ -959,6 +983,60 @@ static int fsl_edma_remove(struct platform_device *pdev)
return 0;
}
+static int fsl_edma_suspend_late(struct device *dev)
+{
+ struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
+ struct fsl_edma_chan *fsl_chan;
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+ fsl_chan = &fsl_edma->chans[i];
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ /* Make sure chan is idle or will force disable. */
+ if (unlikely(!fsl_chan->idle)) {
+ dev_warn(dev, "WARN: There is non-idle channel.");
+ fsl_edma_disable_request(fsl_chan);
+ fsl_edma_chan_mux(fsl_chan, 0, false);
+ }
+
+ fsl_chan->pm_state = SUSPENDED;
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ }
+
+ return 0;
+}
+
+static int fsl_edma_resume_early(struct device *dev)
+{
+ struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
+ struct fsl_edma_chan *fsl_chan;
+ int i;
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+ fsl_chan = &fsl_edma->chans[i];
+ fsl_chan->pm_state = RUNNING;
+ edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
+ if (fsl_chan->slave_id != 0)
+ fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
+ }
+
+ edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA,
+ fsl_edma->membase + EDMA_CR);
+
+ return 0;
+}
+
+/*
+ * eDMA provides the service to others, so it should be suspend late
+ * and resume early. When eDMA suspend, all of the clients should stop
+ * the DMA data transmission and let the channel idle.
+ */
+static const struct dev_pm_ops fsl_edma_pm_ops = {
+ .suspend_late = fsl_edma_suspend_late,
+ .resume_early = fsl_edma_resume_early,
+};
+
static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,vf610-edma", },
{ /* sentinel */ }
@@ -969,6 +1047,7 @@ static struct platform_driver fsl_edma_driver = {
.driver = {
.name = "fsl-edma",
.of_match_table = fsl_edma_dt_ids,
+ .pm = &fsl_edma_pm_ops,
},
.probe = fsl_edma_probe,
.remove = fsl_edma_remove,
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index 823ad728aecf..eef145edb936 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -228,6 +228,8 @@ static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg(
for_each_sg(sgl, sg, sg_len, i) {
desc->sg[i].addr = sg_dma_address(sg);
desc->sg[i].len = sg_dma_len(sg);
+
+ desc->length += sg_dma_len(sg);
}
desc->nents = sg_len;
@@ -249,21 +251,10 @@ static void hsu_dma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
}
-static size_t hsu_dma_desc_size(struct hsu_dma_desc *desc)
-{
- size_t bytes = 0;
- unsigned int i;
-
- for (i = desc->active; i < desc->nents; i++)
- bytes += desc->sg[i].len;
-
- return bytes;
-}
-
static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
{
struct hsu_dma_desc *desc = hsuc->desc;
- size_t bytes = hsu_dma_desc_size(desc);
+ size_t bytes = desc->length;
int i;
i = desc->active % HSU_DMA_CHAN_NR_DESC;
@@ -294,7 +285,7 @@ static enum dma_status hsu_dma_tx_status(struct dma_chan *chan,
dma_set_residue(state, bytes);
status = hsuc->desc->status;
} else if (vdesc) {
- bytes = hsu_dma_desc_size(to_hsu_dma_desc(vdesc));
+ bytes = to_hsu_dma_desc(vdesc)->length;
dma_set_residue(state, bytes);
}
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index f06579c6d548..578a8ee8cd05 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -65,6 +65,7 @@ struct hsu_dma_desc {
enum dma_transfer_direction direction;
struct hsu_dma_sg *sg;
unsigned int nents;
+ size_t length;
unsigned int active;
enum dma_status status;
};
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 7d56b47e4fcf..3cb7b2c78197 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -178,20 +178,12 @@ static irqreturn_t idma64_irq(int irq, void *dev)
if (!status)
return IRQ_NONE;
- /* Disable interrupts */
- channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask);
- channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
-
status_xfer = dma_readl(idma64, RAW(XFER));
status_err = dma_readl(idma64, RAW(ERROR));
for (i = 0; i < idma64->dma.chancnt; i++)
idma64_chan_irq(idma64, i, status_err, status_xfer);
- /* Re-enable interrupts */
- channel_set_bit(idma64, MASK(XFER), idma64->all_chan_mask);
- channel_set_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
-
return IRQ_HANDLED;
}
@@ -239,7 +231,7 @@ static void idma64_vdesc_free(struct virt_dma_desc *vdesc)
idma64_desc_free(idma64c, to_idma64_desc(vdesc));
}
-static u64 idma64_hw_desc_fill(struct idma64_hw_desc *hw,
+static void idma64_hw_desc_fill(struct idma64_hw_desc *hw,
struct dma_slave_config *config,
enum dma_transfer_direction direction, u64 llp)
{
@@ -276,26 +268,26 @@ static u64 idma64_hw_desc_fill(struct idma64_hw_desc *hw,
IDMA64C_CTLL_SRC_WIDTH(src_width);
lli->llp = llp;
- return hw->llp;
}
static void idma64_desc_fill(struct idma64_chan *idma64c,
struct idma64_desc *desc)
{
struct dma_slave_config *config = &idma64c->config;
- struct idma64_hw_desc *hw = &desc->hw[desc->ndesc - 1];
+ unsigned int i = desc->ndesc;
+ struct idma64_hw_desc *hw = &desc->hw[i - 1];
struct idma64_lli *lli = hw->lli;
u64 llp = 0;
- unsigned int i = desc->ndesc;
/* Fill the hardware descriptors and link them to a list */
do {
hw = &desc->hw[--i];
- llp = idma64_hw_desc_fill(hw, config, desc->direction, llp);
+ idma64_hw_desc_fill(hw, config, desc->direction, llp);
+ llp = hw->llp;
desc->length += hw->len;
} while (i);
- /* Trigger interrupt after last block */
+ /* Trigger an interrupt after the last block is transfered */
lli->ctllo |= IDMA64C_CTLL_INT_EN;
}
@@ -596,6 +588,8 @@ static int idma64_probe(struct idma64_chip *chip)
idma64->dma.dev = chip->dev;
+ dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
+
ret = dma_async_device_register(&idma64->dma);
if (ret)
return ret;
diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h
index f6aeff0af8a5..8423f13ed0da 100644
--- a/drivers/dma/idma64.h
+++ b/drivers/dma/idma64.h
@@ -54,7 +54,8 @@
#define IDMA64C_CTLL_LLP_S_EN (1 << 28) /* src block chain */
/* Bitfields in CTL_HI */
-#define IDMA64C_CTLH_BLOCK_TS(x) ((x) & ((1 << 17) - 1))
+#define IDMA64C_CTLH_BLOCK_TS_MASK ((1 << 17) - 1)
+#define IDMA64C_CTLH_BLOCK_TS(x) ((x) & IDMA64C_CTLH_BLOCK_TS_MASK)
#define IDMA64C_CTLH_DONE (1 << 17)
/* Bitfields in CFG_LO */
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index 9ca56830cc63..a4c53be482cf 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -651,6 +651,48 @@ static enum dma_status mdc_tx_status(struct dma_chan *chan,
return ret;
}
+static unsigned int mdc_get_new_events(struct mdc_chan *mchan)
+{
+ u32 val, processed, done1, done2;
+ unsigned int ret;
+
+ val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
+ processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
+ MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
+ /*
+ * CMDS_DONE may have incremented between reading CMDS_PROCESSED
+ * and clearing INT_ACTIVE. Re-read CMDS_PROCESSED to ensure we
+ * didn't miss a command completion.
+ */
+ do {
+ val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
+
+ done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
+ MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
+
+ val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK <<
+ MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) |
+ MDC_CMDS_PROCESSED_INT_ACTIVE);
+
+ val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT;
+
+ mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED);
+
+ val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
+
+ done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
+ MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
+ } while (done1 != done2);
+
+ if (done1 >= processed)
+ ret = done1 - processed;
+ else
+ ret = ((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1) -
+ processed) + done1;
+
+ return ret;
+}
+
static int mdc_terminate_all(struct dma_chan *chan)
{
struct mdc_chan *mchan = to_mdc_chan(chan);
@@ -667,6 +709,8 @@ static int mdc_terminate_all(struct dma_chan *chan)
mchan->desc = NULL;
vchan_get_all_descriptors(&mchan->vc, &head);
+ mdc_get_new_events(mchan);
+
spin_unlock_irqrestore(&mchan->vc.lock, flags);
if (mdesc)
@@ -703,35 +747,17 @@ static irqreturn_t mdc_chan_irq(int irq, void *dev_id)
{
struct mdc_chan *mchan = (struct mdc_chan *)dev_id;
struct mdc_tx_desc *mdesc;
- u32 val, processed, done1, done2;
- unsigned int i;
+ unsigned int i, new_events;
spin_lock(&mchan->vc.lock);
- val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
- processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
- MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
- /*
- * CMDS_DONE may have incremented between reading CMDS_PROCESSED
- * and clearing INT_ACTIVE. Re-read CMDS_PROCESSED to ensure we
- * didn't miss a command completion.
- */
- do {
- val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
- done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
- MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
- val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK <<
- MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) |
- MDC_CMDS_PROCESSED_INT_ACTIVE);
- val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT;
- mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED);
- val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
- done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
- MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
- } while (done1 != done2);
-
dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr);
+ new_events = mdc_get_new_events(mchan);
+
+ if (!new_events)
+ goto out;
+
mdesc = mchan->desc;
if (!mdesc) {
dev_warn(mdma2dev(mchan->mdma),
@@ -740,8 +766,7 @@ static irqreturn_t mdc_chan_irq(int irq, void *dev_id)
goto out;
}
- for (i = processed; i != done1;
- i = (i + 1) % (MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1)) {
+ for (i = 0; i < new_events; i++) {
/*
* The first interrupt in a transfer indicates that the
* command list has been loaded, not that a command has
@@ -979,7 +1004,6 @@ static int mdc_dma_remove(struct platform_device *pdev)
vc.chan.device_node) {
list_del(&mchan->vc.chan.device_node);
- synchronize_irq(mchan->irq);
devm_free_irq(&pdev->dev, mchan->irq, mchan);
tasklet_kill(&mchan->vc.task);
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 2cb7c308d5c7..0b9b6b07db9e 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -224,7 +224,7 @@ static u8 ioat_dca_get_tag(struct dca_provider *dca,
return tag;
}
-static struct dca_ops ioat_dca_ops = {
+static const struct dca_ops ioat_dca_ops = {
.add_requester = ioat_dca_add_requester,
.remove_requester = ioat_dca_remove_requester,
.get_tag = ioat_dca_get_tag,
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 8f4e607d5817..b8f48074789f 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -235,43 +235,11 @@ ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index)
return ioat_dma->idx[index];
}
-static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan)
-{
- u8 ver = ioat_chan->ioat_dma->version;
- u64 status;
- u32 status_lo;
-
- /* We need to read the low address first as this causes the
- * chipset to latch the upper bits for the subsequent read
- */
- status_lo = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver));
- status = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver));
- status <<= 32;
- status |= status_lo;
-
- return status;
-}
-
-#if BITS_PER_LONG == 64
-
static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan)
{
- u8 ver = ioat_chan->ioat_dma->version;
- u64 status;
-
- /* With IOAT v3.3 the status register is 64bit. */
- if (ver >= IOAT_VER_3_3)
- status = readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET(ver));
- else
- status = ioat_chansts_32(ioat_chan);
-
- return status;
+ return readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET);
}
-#else
-#define ioat_chansts ioat_chansts_32
-#endif
-
static inline u64 ioat_chansts_to_addr(u64 status)
{
return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 909352f74c89..4994a3623aee 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -99,19 +99,9 @@
#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */
#define IOAT_DMA_COMP_V2 0x0002 /* Compatibility with DMA version 2 */
-
-#define IOAT1_CHANSTS_OFFSET 0x04 /* 64-bit Channel Status Register */
-#define IOAT2_CHANSTS_OFFSET 0x08 /* 64-bit Channel Status Register */
-#define IOAT_CHANSTS_OFFSET(ver) ((ver) < IOAT_VER_2_0 \
- ? IOAT1_CHANSTS_OFFSET : IOAT2_CHANSTS_OFFSET)
-#define IOAT1_CHANSTS_OFFSET_LOW 0x04
-#define IOAT2_CHANSTS_OFFSET_LOW 0x08
-#define IOAT_CHANSTS_OFFSET_LOW(ver) ((ver) < IOAT_VER_2_0 \
- ? IOAT1_CHANSTS_OFFSET_LOW : IOAT2_CHANSTS_OFFSET_LOW)
-#define IOAT1_CHANSTS_OFFSET_HIGH 0x08
-#define IOAT2_CHANSTS_OFFSET_HIGH 0x0C
-#define IOAT_CHANSTS_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \
- ? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH)
+/* IOAT1 define left for i7300_idle driver to not fail compiling */
+#define IOAT1_CHANSTS_OFFSET 0x04
+#define IOAT_CHANSTS_OFFSET 0x08 /* 64-bit Channel Status Register */
#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR (~0x3fULL)
#define IOAT_CHANSTS_SOFT_ERR 0x10ULL
#define IOAT_CHANSTS_UNAFFILIATED_ERR 0x8ULL
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 1dfc71c90123..9794b073d7d7 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -28,8 +28,6 @@
struct omap_dmadev {
struct dma_device ddev;
spinlock_t lock;
- struct tasklet_struct task;
- struct list_head pending;
void __iomem *base;
const struct omap_dma_reg *reg_map;
struct omap_system_dma_plat_info *plat;
@@ -42,7 +40,6 @@ struct omap_dmadev {
struct omap_chan {
struct virt_dma_chan vc;
- struct list_head node;
void __iomem *channel_base;
const struct omap_dma_reg *reg_map;
uint32_t ccr;
@@ -454,33 +451,6 @@ static void omap_dma_callback(int ch, u16 status, void *data)
spin_unlock_irqrestore(&c->vc.lock, flags);
}
-/*
- * This callback schedules all pending channels. We could be more
- * clever here by postponing allocation of the real DMA channels to
- * this point, and freeing them when our virtual channel becomes idle.
- *
- * We would then need to deal with 'all channels in-use'
- */
-static void omap_dma_sched(unsigned long data)
-{
- struct omap_dmadev *d = (struct omap_dmadev *)data;
- LIST_HEAD(head);
-
- spin_lock_irq(&d->lock);
- list_splice_tail_init(&d->pending, &head);
- spin_unlock_irq(&d->lock);
-
- while (!list_empty(&head)) {
- struct omap_chan *c = list_first_entry(&head,
- struct omap_chan, node);
-
- spin_lock_irq(&c->vc.lock);
- list_del_init(&c->node);
- omap_dma_start_desc(c);
- spin_unlock_irq(&c->vc.lock);
- }
-}
-
static irqreturn_t omap_dma_irq(int irq, void *devid)
{
struct omap_dmadev *od = devid;
@@ -703,8 +673,14 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
struct omap_chan *c = to_omap_dma_chan(chan);
struct virt_dma_desc *vd;
enum dma_status ret;
+ uint32_t ccr;
unsigned long flags;
+ ccr = omap_dma_chan_read(c, CCR);
+ /* The channel is no longer active, handle the completion right away */
+ if (!(ccr & CCR_ENABLE))
+ omap_dma_callback(c->dma_ch, 0, c);
+
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_COMPLETE || !txstate)
return ret;
@@ -719,7 +695,7 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
if (d->dir == DMA_MEM_TO_DEV)
pos = omap_dma_get_src_pos(c);
- else if (d->dir == DMA_DEV_TO_MEM)
+ else if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM)
pos = omap_dma_get_dst_pos(c);
else
pos = 0;
@@ -739,22 +715,8 @@ static void omap_dma_issue_pending(struct dma_chan *chan)
unsigned long flags;
spin_lock_irqsave(&c->vc.lock, flags);
- if (vchan_issue_pending(&c->vc) && !c->desc) {
- /*
- * c->cyclic is used only by audio and in this case the DMA need
- * to be started without delay.
- */
- if (!c->cyclic) {
- struct omap_dmadev *d = to_omap_dma_dev(chan->device);
- spin_lock(&d->lock);
- if (list_empty(&c->node))
- list_add_tail(&c->node, &d->pending);
- spin_unlock(&d->lock);
- tasklet_schedule(&d->task);
- } else {
- omap_dma_start_desc(c);
- }
- }
+ if (vchan_issue_pending(&c->vc) && !c->desc)
+ omap_dma_start_desc(c);
spin_unlock_irqrestore(&c->vc.lock, flags);
}
@@ -768,7 +730,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
struct scatterlist *sgent;
struct omap_desc *d;
dma_addr_t dev_addr;
- unsigned i, j = 0, es, en, frame_bytes;
+ unsigned i, es, en, frame_bytes;
u32 burst;
if (dir == DMA_DEV_TO_MEM) {
@@ -845,13 +807,12 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
en = burst;
frame_bytes = es_bytes[es] * en;
for_each_sg(sgl, sgent, sglen, i) {
- d->sg[j].addr = sg_dma_address(sgent);
- d->sg[j].en = en;
- d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
- j++;
+ d->sg[i].addr = sg_dma_address(sgent);
+ d->sg[i].en = en;
+ d->sg[i].fn = sg_dma_len(sgent) / frame_bytes;
}
- d->sglen = j;
+ d->sglen = sglen;
return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
}
@@ -1018,17 +979,11 @@ static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config
static int omap_dma_terminate_all(struct dma_chan *chan)
{
struct omap_chan *c = to_omap_dma_chan(chan);
- struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&c->vc.lock, flags);
- /* Prevent this channel being scheduled */
- spin_lock(&d->lock);
- list_del_init(&c->node);
- spin_unlock(&d->lock);
-
/*
* Stop DMA activity: we assume the callback will not be called
* after omap_dma_stop() returns (even if it does, it will see
@@ -1102,14 +1057,12 @@ static int omap_dma_chan_init(struct omap_dmadev *od)
c->reg_map = od->reg_map;
c->vc.desc_free = omap_dma_desc_free;
vchan_init(&c->vc, &od->ddev);
- INIT_LIST_HEAD(&c->node);
return 0;
}
static void omap_dma_free(struct omap_dmadev *od)
{
- tasklet_kill(&od->task);
while (!list_empty(&od->ddev.channels)) {
struct omap_chan *c = list_first_entry(&od->ddev.channels,
struct omap_chan, vc.chan.device_node);
@@ -1165,12 +1118,9 @@ static int omap_dma_probe(struct platform_device *pdev)
od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
od->ddev.dev = &pdev->dev;
INIT_LIST_HEAD(&od->ddev.channels);
- INIT_LIST_HEAD(&od->pending);
spin_lock_init(&od->lock);
spin_lock_init(&od->irq_lock);
- tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
-
od->dma_requests = OMAP_SDMA_REQUESTS;
if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
"dma-requests",
@@ -1203,6 +1153,10 @@ static int omap_dma_probe(struct platform_device *pdev)
return rc;
}
+ od->ddev.filter.map = od->plat->slave_map;
+ od->ddev.filter.mapcnt = od->plat->slavecnt;
+ od->ddev.filter.fn = omap_dma_filter_fn;
+
rc = dma_async_device_register(&od->ddev);
if (rc) {
pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index fc4156afa070..f2a0310ae771 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -1414,6 +1414,7 @@ static int pxad_probe(struct platform_device *op)
pdev->slave.dst_addr_widths = widths;
pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ pdev->slave.descriptor_reuse = true;
pdev->slave.dev = &op->dev;
ret = pxad_init_dmadev(op, pdev, dma_channels);
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 9fda65af841e..f32c430eb16c 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -47,12 +47,6 @@ config RCAR_DMAC
This driver supports the general purpose DMA controller found in the
Renesas R-Car second generation SoCs.
-config RCAR_HPB_DMAE
- tristate "Renesas R-Car HPB DMAC support"
- depends on SH_DMAE_BASE
- help
- Enable support for the Renesas R-Car series DMA controllers.
-
config RENESAS_USB_DMAC
tristate "Renesas USB-DMA Controller"
depends on ARCH_SHMOBILE || COMPILE_TEST
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index 0133e4658196..f1e2fd64f279 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -14,6 +14,5 @@ shdma-objs := $(shdma-y)
obj-$(CONFIG_SH_DMAE) += shdma.o
obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o
-obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o
obj-$(CONFIG_SUDMAC) += sudmac.o
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
deleted file mode 100644
index 749f26ecd3b3..000000000000
--- a/drivers/dma/sh/rcar-hpbdma.c
+++ /dev/null
@@ -1,669 +0,0 @@
-/*
- * Copyright (C) 2011-2013 Renesas Electronics Corporation
- * Copyright (C) 2013 Cogent Embedded, Inc.
- *
- * This file is based on the drivers/dma/sh/shdma.c
- *
- * Renesas SuperH DMA Engine support
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * - DMA of SuperH does not have Hardware DMA chain mode.
- * - max DMA size is 16MB.
- *
- */
-
-#include <linux/dmaengine.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/platform_data/dma-rcar-hpbdma.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/shdma-base.h>
-#include <linux/slab.h>
-
-/* DMA channel registers */
-#define HPB_DMAE_DSAR0 0x00
-#define HPB_DMAE_DDAR0 0x04
-#define HPB_DMAE_DTCR0 0x08
-#define HPB_DMAE_DSAR1 0x0C
-#define HPB_DMAE_DDAR1 0x10
-#define HPB_DMAE_DTCR1 0x14
-#define HPB_DMAE_DSASR 0x18
-#define HPB_DMAE_DDASR 0x1C
-#define HPB_DMAE_DTCSR 0x20
-#define HPB_DMAE_DPTR 0x24
-#define HPB_DMAE_DCR 0x28
-#define HPB_DMAE_DCMDR 0x2C
-#define HPB_DMAE_DSTPR 0x30
-#define HPB_DMAE_DSTSR 0x34
-#define HPB_DMAE_DDBGR 0x38
-#define HPB_DMAE_DDBGR2 0x3C
-#define HPB_DMAE_CHAN(n) (0x40 * (n))
-
-/* DMA command register (DCMDR) bits */
-#define HPB_DMAE_DCMDR_BDOUT BIT(7)
-#define HPB_DMAE_DCMDR_DQSPD BIT(6)
-#define HPB_DMAE_DCMDR_DQSPC BIT(5)
-#define HPB_DMAE_DCMDR_DMSPD BIT(4)
-#define HPB_DMAE_DCMDR_DMSPC BIT(3)
-#define HPB_DMAE_DCMDR_DQEND BIT(2)
-#define HPB_DMAE_DCMDR_DNXT BIT(1)
-#define HPB_DMAE_DCMDR_DMEN BIT(0)
-
-/* DMA forced stop register (DSTPR) bits */
-#define HPB_DMAE_DSTPR_DMSTP BIT(0)
-
-/* DMA status register (DSTSR) bits */
-#define HPB_DMAE_DSTSR_DQSTS BIT(2)
-#define HPB_DMAE_DSTSR_DMSTS BIT(0)
-
-/* DMA common registers */
-#define HPB_DMAE_DTIMR 0x00
-#define HPB_DMAE_DINTSR0 0x0C
-#define HPB_DMAE_DINTSR1 0x10
-#define HPB_DMAE_DINTCR0 0x14
-#define HPB_DMAE_DINTCR1 0x18
-#define HPB_DMAE_DINTMR0 0x1C
-#define HPB_DMAE_DINTMR1 0x20
-#define HPB_DMAE_DACTSR0 0x24
-#define HPB_DMAE_DACTSR1 0x28
-#define HPB_DMAE_HSRSTR(n) (0x40 + (n) * 4)
-#define HPB_DMAE_HPB_DMASPR(n) (0x140 + (n) * 4)
-#define HPB_DMAE_HPB_DMLVLR0 0x160
-#define HPB_DMAE_HPB_DMLVLR1 0x164
-#define HPB_DMAE_HPB_DMSHPT0 0x168
-#define HPB_DMAE_HPB_DMSHPT1 0x16C
-
-#define HPB_DMA_SLAVE_NUMBER 256
-#define HPB_DMA_TCR_MAX 0x01000000 /* 16 MiB */
-
-struct hpb_dmae_chan {
- struct shdma_chan shdma_chan;
- int xfer_mode; /* DMA transfer mode */
-#define XFER_SINGLE 1
-#define XFER_DOUBLE 2
- unsigned plane_idx; /* current DMA information set */
- bool first_desc; /* first/next transfer */
- int xmit_shift; /* log_2(bytes_per_xfer) */
- void __iomem *base;
- const struct hpb_dmae_slave_config *cfg;
- char dev_id[16]; /* unique name per DMAC of channel */
- dma_addr_t slave_addr;
-};
-
-struct hpb_dmae_device {
- struct shdma_dev shdma_dev;
- spinlock_t reg_lock; /* comm_reg operation lock */
- struct hpb_dmae_pdata *pdata;
- void __iomem *chan_reg;
- void __iomem *comm_reg;
- void __iomem *reset_reg;
- void __iomem *mode_reg;
-};
-
-struct hpb_dmae_regs {
- u32 sar; /* SAR / source address */
- u32 dar; /* DAR / destination address */
- u32 tcr; /* TCR / transfer count */
-};
-
-struct hpb_desc {
- struct shdma_desc shdma_desc;
- struct hpb_dmae_regs hw;
- unsigned plane_idx;
-};
-
-#define to_chan(schan) container_of(schan, struct hpb_dmae_chan, shdma_chan)
-#define to_desc(sdesc) container_of(sdesc, struct hpb_desc, shdma_desc)
-#define to_dev(sc) container_of(sc->shdma_chan.dma_chan.device, \
- struct hpb_dmae_device, shdma_dev.dma_dev)
-
-static void ch_reg_write(struct hpb_dmae_chan *hpb_dc, u32 data, u32 reg)
-{
- iowrite32(data, hpb_dc->base + reg);
-}
-
-static u32 ch_reg_read(struct hpb_dmae_chan *hpb_dc, u32 reg)
-{
- return ioread32(hpb_dc->base + reg);
-}
-
-static void dcmdr_write(struct hpb_dmae_device *hpbdev, u32 data)
-{
- iowrite32(data, hpbdev->chan_reg + HPB_DMAE_DCMDR);
-}
-
-static void hsrstr_write(struct hpb_dmae_device *hpbdev, u32 ch)
-{
- iowrite32(0x1, hpbdev->comm_reg + HPB_DMAE_HSRSTR(ch));
-}
-
-static u32 dintsr_read(struct hpb_dmae_device *hpbdev, u32 ch)
-{
- u32 v;
-
- if (ch < 32)
- v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR0) >> ch;
- else
- v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR1) >> (ch - 32);
- return v & 0x1;
-}
-
-static void dintcr_write(struct hpb_dmae_device *hpbdev, u32 ch)
-{
- if (ch < 32)
- iowrite32((0x1 << ch), hpbdev->comm_reg + HPB_DMAE_DINTCR0);
- else
- iowrite32((0x1 << (ch - 32)),
- hpbdev->comm_reg + HPB_DMAE_DINTCR1);
-}
-
-static void asyncmdr_write(struct hpb_dmae_device *hpbdev, u32 data)
-{
- iowrite32(data, hpbdev->mode_reg);
-}
-
-static u32 asyncmdr_read(struct hpb_dmae_device *hpbdev)
-{
- return ioread32(hpbdev->mode_reg);
-}
-
-static void hpb_dmae_enable_int(struct hpb_dmae_device *hpbdev, u32 ch)
-{
- u32 intreg;
-
- spin_lock_irq(&hpbdev->reg_lock);
- if (ch < 32) {
- intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR0);
- iowrite32(BIT(ch) | intreg,
- hpbdev->comm_reg + HPB_DMAE_DINTMR0);
- } else {
- intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR1);
- iowrite32(BIT(ch - 32) | intreg,
- hpbdev->comm_reg + HPB_DMAE_DINTMR1);
- }
- spin_unlock_irq(&hpbdev->reg_lock);
-}
-
-static void hpb_dmae_async_reset(struct hpb_dmae_device *hpbdev, u32 data)
-{
- u32 rstr;
- int timeout = 10000; /* 100 ms */
-
- spin_lock(&hpbdev->reg_lock);
- rstr = ioread32(hpbdev->reset_reg);
- rstr |= data;
- iowrite32(rstr, hpbdev->reset_reg);
- do {
- rstr = ioread32(hpbdev->reset_reg);
- if ((rstr & data) == data)
- break;
- udelay(10);
- } while (timeout--);
-
- if (timeout < 0)
- dev_err(hpbdev->shdma_dev.dma_dev.dev,
- "%s timeout\n", __func__);
-
- rstr &= ~data;
- iowrite32(rstr, hpbdev->reset_reg);
- spin_unlock(&hpbdev->reg_lock);
-}
-
-static void hpb_dmae_set_async_mode(struct hpb_dmae_device *hpbdev,
- u32 mask, u32 data)
-{
- u32 mode;
-
- spin_lock_irq(&hpbdev->reg_lock);
- mode = asyncmdr_read(hpbdev);
- mode &= ~mask;
- mode |= data;
- asyncmdr_write(hpbdev, mode);
- spin_unlock_irq(&hpbdev->reg_lock);
-}
-
-static void hpb_dmae_ctl_stop(struct hpb_dmae_device *hpbdev)
-{
- dcmdr_write(hpbdev, HPB_DMAE_DCMDR_DQSPD);
-}
-
-static void hpb_dmae_reset(struct hpb_dmae_device *hpbdev)
-{
- u32 ch;
-
- for (ch = 0; ch < hpbdev->pdata->num_hw_channels; ch++)
- hsrstr_write(hpbdev, ch);
-}
-
-static unsigned int calc_xmit_shift(struct hpb_dmae_chan *hpb_chan)
-{
- struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
- struct hpb_dmae_pdata *pdata = hpbdev->pdata;
- int width = ch_reg_read(hpb_chan, HPB_DMAE_DCR);
- int i;
-
- switch (width & (HPB_DMAE_DCR_SPDS_MASK | HPB_DMAE_DCR_DPDS_MASK)) {
- case HPB_DMAE_DCR_SPDS_8BIT | HPB_DMAE_DCR_DPDS_8BIT:
- default:
- i = XMIT_SZ_8BIT;
- break;
- case HPB_DMAE_DCR_SPDS_16BIT | HPB_DMAE_DCR_DPDS_16BIT:
- i = XMIT_SZ_16BIT;
- break;
- case HPB_DMAE_DCR_SPDS_32BIT | HPB_DMAE_DCR_DPDS_32BIT:
- i = XMIT_SZ_32BIT;
- break;
- }
- return pdata->ts_shift[i];
-}
-
-static void hpb_dmae_set_reg(struct hpb_dmae_chan *hpb_chan,
- struct hpb_dmae_regs *hw, unsigned plane)
-{
- ch_reg_write(hpb_chan, hw->sar,
- plane ? HPB_DMAE_DSAR1 : HPB_DMAE_DSAR0);
- ch_reg_write(hpb_chan, hw->dar,
- plane ? HPB_DMAE_DDAR1 : HPB_DMAE_DDAR0);
- ch_reg_write(hpb_chan, hw->tcr >> hpb_chan->xmit_shift,
- plane ? HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0);
-}
-
-static void hpb_dmae_start(struct hpb_dmae_chan *hpb_chan, bool next)
-{
- ch_reg_write(hpb_chan, (next ? HPB_DMAE_DCMDR_DNXT : 0) |
- HPB_DMAE_DCMDR_DMEN, HPB_DMAE_DCMDR);
-}
-
-static void hpb_dmae_halt(struct shdma_chan *schan)
-{
- struct hpb_dmae_chan *chan = to_chan(schan);
-
- ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR);
- ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR);
-
- chan->plane_idx = 0;
- chan->first_desc = true;
-}
-
-static const struct hpb_dmae_slave_config *
-hpb_dmae_find_slave(struct hpb_dmae_chan *hpb_chan, int slave_id)
-{
- struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
- struct hpb_dmae_pdata *pdata = hpbdev->pdata;
- int i;
-
- if (slave_id >= HPB_DMA_SLAVE_NUMBER)
- return NULL;
-
- for (i = 0; i < pdata->num_slaves; i++)
- if (pdata->slaves[i].id == slave_id)
- return pdata->slaves + i;
-
- return NULL;
-}
-
-static void hpb_dmae_start_xfer(struct shdma_chan *schan,
- struct shdma_desc *sdesc)
-{
- struct hpb_dmae_chan *chan = to_chan(schan);
- struct hpb_dmae_device *hpbdev = to_dev(chan);
- struct hpb_desc *desc = to_desc(sdesc);
-
- if (chan->cfg->flags & HPB_DMAE_SET_ASYNC_RESET)
- hpb_dmae_async_reset(hpbdev, chan->cfg->rstr);
-
- desc->plane_idx = chan->plane_idx;
- hpb_dmae_set_reg(chan, &desc->hw, chan->plane_idx);
- hpb_dmae_start(chan, !chan->first_desc);
-
- if (chan->xfer_mode == XFER_DOUBLE) {
- chan->plane_idx ^= 1;
- chan->first_desc = false;
- }
-}
-
-static bool hpb_dmae_desc_completed(struct shdma_chan *schan,
- struct shdma_desc *sdesc)
-{
- /*
- * This is correct since we always have at most single
- * outstanding DMA transfer per channel, and by the time
- * we get completion interrupt the transfer is completed.
- * This will change if we ever use alternating DMA
- * information sets and submit two descriptors at once.
- */
- return true;
-}
-
-static bool hpb_dmae_chan_irq(struct shdma_chan *schan, int irq)
-{
- struct hpb_dmae_chan *chan = to_chan(schan);
- struct hpb_dmae_device *hpbdev = to_dev(chan);
- int ch = chan->cfg->dma_ch;
-
- /* Check Complete DMA Transfer */
- if (dintsr_read(hpbdev, ch)) {
- /* Clear Interrupt status */
- dintcr_write(hpbdev, ch);
- return true;
- }
- return false;
-}
-
-static int hpb_dmae_desc_setup(struct shdma_chan *schan,
- struct shdma_desc *sdesc,
- dma_addr_t src, dma_addr_t dst, size_t *len)
-{
- struct hpb_desc *desc = to_desc(sdesc);
-
- if (*len > (size_t)HPB_DMA_TCR_MAX)
- *len = (size_t)HPB_DMA_TCR_MAX;
-
- desc->hw.sar = src;
- desc->hw.dar = dst;
- desc->hw.tcr = *len;
-
- return 0;
-}
-
-static size_t hpb_dmae_get_partial(struct shdma_chan *schan,
- struct shdma_desc *sdesc)
-{
- struct hpb_desc *desc = to_desc(sdesc);
- struct hpb_dmae_chan *chan = to_chan(schan);
- u32 tcr = ch_reg_read(chan, desc->plane_idx ?
- HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0);
-
- return (desc->hw.tcr - tcr) << chan->xmit_shift;
-}
-
-static bool hpb_dmae_channel_busy(struct shdma_chan *schan)
-{
- struct hpb_dmae_chan *chan = to_chan(schan);
- u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR);
-
- if (chan->xfer_mode == XFER_DOUBLE)
- return dstsr & HPB_DMAE_DSTSR_DQSTS;
- else
- return dstsr & HPB_DMAE_DSTSR_DMSTS;
-}
-
-static int
-hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
- const struct hpb_dmae_slave_config *cfg)
-{
- struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
- struct hpb_dmae_pdata *pdata = hpbdev->pdata;
- const struct hpb_dmae_channel *channel = pdata->channels;
- int slave_id = cfg->id;
- int i, err;
-
- for (i = 0; i < pdata->num_channels; i++, channel++) {
- if (channel->s_id == slave_id) {
- struct device *dev = hpb_chan->shdma_chan.dev;
-
- hpb_chan->base = hpbdev->chan_reg +
- HPB_DMAE_CHAN(cfg->dma_ch);
-
- dev_dbg(dev, "Detected Slave device\n");
- dev_dbg(dev, " -- slave_id : 0x%x\n", slave_id);
- dev_dbg(dev, " -- cfg->dma_ch : %d\n", cfg->dma_ch);
- dev_dbg(dev, " -- channel->ch_irq: %d\n",
- channel->ch_irq);
- break;
- }
- }
-
- err = shdma_request_irq(&hpb_chan->shdma_chan, channel->ch_irq,
- IRQF_SHARED, hpb_chan->dev_id);
- if (err) {
- dev_err(hpb_chan->shdma_chan.dev,
- "DMA channel request_irq %d failed with error %d\n",
- channel->ch_irq, err);
- return err;
- }
-
- hpb_chan->plane_idx = 0;
- hpb_chan->first_desc = true;
-
- if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) == 0) {
- hpb_chan->xfer_mode = XFER_SINGLE;
- } else if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) ==
- (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) {
- hpb_chan->xfer_mode = XFER_DOUBLE;
- } else {
- dev_err(hpb_chan->shdma_chan.dev, "DCR setting error");
- return -EINVAL;
- }
-
- if (cfg->flags & HPB_DMAE_SET_ASYNC_MODE)
- hpb_dmae_set_async_mode(hpbdev, cfg->mdm, cfg->mdr);
- ch_reg_write(hpb_chan, cfg->dcr, HPB_DMAE_DCR);
- ch_reg_write(hpb_chan, cfg->port, HPB_DMAE_DPTR);
- hpb_chan->xmit_shift = calc_xmit_shift(hpb_chan);
- hpb_dmae_enable_int(hpbdev, cfg->dma_ch);
-
- return 0;
-}
-
-static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id,
- dma_addr_t slave_addr, bool try)
-{
- struct hpb_dmae_chan *chan = to_chan(schan);
- const struct hpb_dmae_slave_config *sc =
- hpb_dmae_find_slave(chan, slave_id);
-
- if (!sc)
- return -ENODEV;
- if (try)
- return 0;
- chan->cfg = sc;
- chan->slave_addr = slave_addr ? : sc->addr;
- return hpb_dmae_alloc_chan_resources(chan, sc);
-}
-
-static void hpb_dmae_setup_xfer(struct shdma_chan *schan, int slave_id)
-{
-}
-
-static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan)
-{
- struct hpb_dmae_chan *chan = to_chan(schan);
-
- return chan->slave_addr;
-}
-
-static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i)
-{
- return &((struct hpb_desc *)buf)[i].shdma_desc;
-}
-
-static const struct shdma_ops hpb_dmae_ops = {
- .desc_completed = hpb_dmae_desc_completed,
- .halt_channel = hpb_dmae_halt,
- .channel_busy = hpb_dmae_channel_busy,
- .slave_addr = hpb_dmae_slave_addr,
- .desc_setup = hpb_dmae_desc_setup,
- .set_slave = hpb_dmae_set_slave,
- .setup_xfer = hpb_dmae_setup_xfer,
- .start_xfer = hpb_dmae_start_xfer,
- .embedded_desc = hpb_dmae_embedded_desc,
- .chan_irq = hpb_dmae_chan_irq,
- .get_partial = hpb_dmae_get_partial,
-};
-
-static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id)
-{
- struct shdma_dev *sdev = &hpbdev->shdma_dev;
- struct platform_device *pdev =
- to_platform_device(hpbdev->shdma_dev.dma_dev.dev);
- struct hpb_dmae_chan *new_hpb_chan;
- struct shdma_chan *schan;
-
- /* Alloc channel */
- new_hpb_chan = devm_kzalloc(&pdev->dev,
- sizeof(struct hpb_dmae_chan), GFP_KERNEL);
- if (!new_hpb_chan) {
- dev_err(hpbdev->shdma_dev.dma_dev.dev,
- "No free memory for allocating DMA channels!\n");
- return -ENOMEM;
- }
-
- schan = &new_hpb_chan->shdma_chan;
- schan->max_xfer_len = HPB_DMA_TCR_MAX;
-
- shdma_chan_probe(sdev, schan, id);
-
- if (pdev->id >= 0)
- snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id),
- "hpb-dmae%d.%d", pdev->id, id);
- else
- snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id),
- "hpb-dma.%d", id);
-
- return 0;
-}
-
-static int hpb_dmae_probe(struct platform_device *pdev)
-{
- const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
- DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES;
- struct hpb_dmae_pdata *pdata = pdev->dev.platform_data;
- struct hpb_dmae_device *hpbdev;
- struct dma_device *dma_dev;
- struct resource *chan, *comm, *rest, *mode, *irq_res;
- int err, i;
-
- /* Get platform data */
- if (!pdata || !pdata->num_channels)
- return -ENODEV;
-
- chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- comm = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- rest = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- mode = platform_get_resource(pdev, IORESOURCE_MEM, 3);
-
- irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq_res)
- return -ENODEV;
-
- hpbdev = devm_kzalloc(&pdev->dev, sizeof(struct hpb_dmae_device),
- GFP_KERNEL);
- if (!hpbdev) {
- dev_err(&pdev->dev, "Not enough memory\n");
- return -ENOMEM;
- }
-
- hpbdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
- if (IS_ERR(hpbdev->chan_reg))
- return PTR_ERR(hpbdev->chan_reg);
-
- hpbdev->comm_reg = devm_ioremap_resource(&pdev->dev, comm);
- if (IS_ERR(hpbdev->comm_reg))
- return PTR_ERR(hpbdev->comm_reg);
-
- hpbdev->reset_reg = devm_ioremap_resource(&pdev->dev, rest);
- if (IS_ERR(hpbdev->reset_reg))
- return PTR_ERR(hpbdev->reset_reg);
-
- hpbdev->mode_reg = devm_ioremap_resource(&pdev->dev, mode);
- if (IS_ERR(hpbdev->mode_reg))
- return PTR_ERR(hpbdev->mode_reg);
-
- dma_dev = &hpbdev->shdma_dev.dma_dev;
-
- spin_lock_init(&hpbdev->reg_lock);
-
- /* Platform data */
- hpbdev->pdata = pdata;
-
- pm_runtime_enable(&pdev->dev);
- err = pm_runtime_get_sync(&pdev->dev);
- if (err < 0)
- dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
-
- /* Reset DMA controller */
- hpb_dmae_reset(hpbdev);
-
- pm_runtime_put(&pdev->dev);
-
- dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
- dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
- dma_dev->src_addr_widths = widths;
- dma_dev->dst_addr_widths = widths;
- dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
- dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
-
- hpbdev->shdma_dev.ops = &hpb_dmae_ops;
- hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc);
- err = shdma_init(&pdev->dev, &hpbdev->shdma_dev, pdata->num_channels);
- if (err < 0)
- goto error;
-
- /* Create DMA channels */
- for (i = 0; i < pdata->num_channels; i++)
- hpb_dmae_chan_probe(hpbdev, i);
-
- platform_set_drvdata(pdev, hpbdev);
- err = dma_async_device_register(dma_dev);
- if (!err)
- return 0;
-
- shdma_cleanup(&hpbdev->shdma_dev);
-error:
- pm_runtime_disable(&pdev->dev);
- return err;
-}
-
-static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
-{
- struct shdma_chan *schan;
- int i;
-
- shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) {
- BUG_ON(!schan);
-
- shdma_chan_remove(schan);
- }
-}
-
-static int hpb_dmae_remove(struct platform_device *pdev)
-{
- struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev);
-
- dma_async_device_unregister(&hpbdev->shdma_dev.dma_dev);
-
- pm_runtime_disable(&pdev->dev);
-
- hpb_dmae_chan_remove(hpbdev);
-
- return 0;
-}
-
-static void hpb_dmae_shutdown(struct platform_device *pdev)
-{
- struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev);
- hpb_dmae_ctl_stop(hpbdev);
-}
-
-static struct platform_driver hpb_dmae_driver = {
- .probe = hpb_dmae_probe,
- .remove = hpb_dmae_remove,
- .shutdown = hpb_dmae_shutdown,
- .driver = {
- .name = "hpb-dma-engine",
- },
-};
-module_platform_driver(hpb_dmae_driver);
-
-MODULE_AUTHOR("Max Filippov <max.filippov@cogentembedded.com>");
-MODULE_DESCRIPTION("Renesas HPB DMA Engine driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index ebd8a5f398b0..16fb33006a17 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -448,7 +448,7 @@ usb_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
static int usb_dmac_chan_terminate_all(struct dma_chan *chan)
{
struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
- struct usb_dmac_desc *desc;
+ struct usb_dmac_desc *desc, *_desc;
unsigned long flags;
LIST_HEAD(head);
LIST_HEAD(list);
@@ -459,7 +459,7 @@ static int usb_dmac_chan_terminate_all(struct dma_chan *chan)
if (uchan->desc)
uchan->desc = NULL;
list_splice_init(&uchan->desc_got, &list);
- list_for_each_entry(desc, &list, node)
+ list_for_each_entry_safe(desc, _desc, &list, node)
list_move_tail(&desc->node, &uchan->desc_freed);
spin_unlock_irqrestore(&uchan->vc.lock, flags);
vchan_dma_desc_free_list(&uchan->vc, &head);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index dd3e7ba273ad..6fb8307468ab 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3543,8 +3543,8 @@ static int __init d40_probe(struct platform_device *pdev)
struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
struct device_node *np = pdev->dev.of_node;
int ret = -ENOENT;
- struct d40_base *base = NULL;
- struct resource *res = NULL;
+ struct d40_base *base;
+ struct resource *res;
int num_reserved_chans;
u32 val;
@@ -3552,17 +3552,17 @@ static int __init d40_probe(struct platform_device *pdev)
if (np) {
if (d40_of_probe(pdev, np)) {
ret = -ENOMEM;
- goto failure;
+ goto report_failure;
}
} else {
d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
- goto failure;
+ goto report_failure;
}
}
base = d40_hw_detect_init(pdev);
if (!base)
- goto failure;
+ goto report_failure;
num_reserved_chans = d40_phy_res_init(base);
@@ -3693,51 +3693,48 @@ static int __init d40_probe(struct platform_device *pdev)
return 0;
failure:
- if (base) {
- if (base->desc_slab)
- kmem_cache_destroy(base->desc_slab);
- if (base->virtbase)
- iounmap(base->virtbase);
-
- if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
- iounmap(base->lcla_pool.base);
- base->lcla_pool.base = NULL;
- }
+ kmem_cache_destroy(base->desc_slab);
+ if (base->virtbase)
+ iounmap(base->virtbase);
- if (base->lcla_pool.dma_addr)
- dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
- SZ_1K * base->num_phy_chans,
- DMA_TO_DEVICE);
-
- if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
- free_pages((unsigned long)base->lcla_pool.base,
- base->lcla_pool.pages);
-
- kfree(base->lcla_pool.base_unaligned);
-
- if (base->phy_lcpa)
- release_mem_region(base->phy_lcpa,
- base->lcpa_size);
- if (base->phy_start)
- release_mem_region(base->phy_start,
- base->phy_size);
- if (base->clk) {
- clk_disable_unprepare(base->clk);
- clk_put(base->clk);
- }
+ if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
+ iounmap(base->lcla_pool.base);
+ base->lcla_pool.base = NULL;
+ }
- if (base->lcpa_regulator) {
- regulator_disable(base->lcpa_regulator);
- regulator_put(base->lcpa_regulator);
- }
+ if (base->lcla_pool.dma_addr)
+ dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
+ SZ_1K * base->num_phy_chans,
+ DMA_TO_DEVICE);
- kfree(base->lcla_pool.alloc_map);
- kfree(base->lookup_log_chans);
- kfree(base->lookup_phy_chans);
- kfree(base->phy_res);
- kfree(base);
+ if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
+ free_pages((unsigned long)base->lcla_pool.base,
+ base->lcla_pool.pages);
+
+ kfree(base->lcla_pool.base_unaligned);
+
+ if (base->phy_lcpa)
+ release_mem_region(base->phy_lcpa,
+ base->lcpa_size);
+ if (base->phy_start)
+ release_mem_region(base->phy_start,
+ base->phy_size);
+ if (base->clk) {
+ clk_disable_unprepare(base->clk);
+ clk_put(base->clk);
+ }
+
+ if (base->lcpa_regulator) {
+ regulator_disable(base->lcpa_regulator);
+ regulator_put(base->lcpa_regulator);
}
+ kfree(base->lcla_pool.alloc_map);
+ kfree(base->lookup_log_chans);
+ kfree(base->lookup_phy_chans);
+ kfree(base->phy_res);
+ kfree(base);
+report_failure:
d40_err(&pdev->dev, "probe failed\n");
return ret;
}
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index 6f80432a3f0a..a35c211857dd 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave(&vc->lock, flags);
cookie = dma_cookie_assign(tx);
- list_add_tail(&vd->node, &vc->desc_submitted);
+ list_move_tail(&vd->node, &vc->desc_submitted);
spin_unlock_irqrestore(&vc->lock, flags);
dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
@@ -39,6 +39,33 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
}
EXPORT_SYMBOL_GPL(vchan_tx_submit);
+/**
+ * vchan_tx_desc_free - free a reusable descriptor
+ * @tx: the transfer
+ *
+ * This function frees a previously allocated reusable descriptor. The only
+ * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
+ * transfer.
+ *
+ * Returns 0 upon success
+ */
+int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
+{
+ struct virt_dma_chan *vc = to_virt_chan(tx->chan);
+ struct virt_dma_desc *vd = to_virt_desc(tx);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc->lock, flags);
+ list_del(&vd->node);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
+ vc, vd, vd->tx.cookie);
+ vc->desc_free(vd);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
+
struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
dma_cookie_t cookie)
{
@@ -83,8 +110,10 @@ static void vchan_complete(unsigned long arg)
cb_data = vd->tx.callback_param;
list_del(&vd->node);
-
- vc->desc_free(vd);
+ if (dmaengine_desc_test_reuse(&vd->tx))
+ list_add(&vd->node, &vc->desc_allocated);
+ else
+ vc->desc_free(vd);
if (cb)
cb(cb_data);
@@ -96,9 +125,13 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
while (!list_empty(head)) {
struct virt_dma_desc *vd = list_first_entry(head,
struct virt_dma_desc, node);
- list_del(&vd->node);
- dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
- vc->desc_free(vd);
+ if (dmaengine_desc_test_reuse(&vd->tx)) {
+ list_move_tail(&vd->node, &vc->desc_allocated);
+ } else {
+ dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
+ list_del(&vd->node);
+ vc->desc_free(vd);
+ }
}
}
EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
@@ -108,6 +141,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
dma_cookie_init(&vc->chan);
spin_lock_init(&vc->lock);
+ INIT_LIST_HEAD(&vc->desc_allocated);
INIT_LIST_HEAD(&vc->desc_submitted);
INIT_LIST_HEAD(&vc->desc_issued);
INIT_LIST_HEAD(&vc->desc_completed);
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 2fa47745a41f..d9731ca5e262 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -29,6 +29,7 @@ struct virt_dma_chan {
spinlock_t lock;
/* protected by vc.lock */
+ struct list_head desc_allocated;
struct list_head desc_submitted;
struct list_head desc_issued;
struct list_head desc_completed;
@@ -55,10 +56,17 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
struct virt_dma_desc *vd, unsigned long tx_flags)
{
extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
+ extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
+ unsigned long flags;
dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
vd->tx.flags = tx_flags;
vd->tx.tx_submit = vchan_tx_submit;
+ vd->tx.desc_free = vchan_tx_desc_free;
+
+ spin_lock_irqsave(&vc->lock, flags);
+ list_add_tail(&vd->node, &vc->desc_allocated);
+ spin_unlock_irqrestore(&vc->lock, flags);
return &vd->tx;
}
@@ -134,6 +142,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
struct list_head *head)
{
+ list_splice_tail_init(&vc->desc_allocated, head);
list_splice_tail_init(&vc->desc_submitted, head);
list_splice_tail_init(&vc->desc_issued, head);
list_splice_tail_init(&vc->desc_completed, head);
@@ -141,14 +150,30 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
{
+ struct virt_dma_desc *vd;
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&vc->lock, flags);
vchan_get_all_descriptors(vc, &head);
+ list_for_each_entry(vd, &head, node)
+ dmaengine_desc_clear_reuse(&vd->tx);
spin_unlock_irqrestore(&vc->lock, flags);
vchan_dma_desc_free_list(vc, &head);
}
+/**
+ * vchan_synchronize() - synchronize callback execution to the current context
+ * @vc: virtual channel to synchronize
+ *
+ * Makes sure that all scheduled or active callbacks have finished running. For
+ * proper operation the caller has to ensure that no new callbacks are scheduled
+ * after the invocation of this function started.
+ */
+static inline void vchan_synchronize(struct virt_dma_chan *vc)
+{
+ tasklet_kill(&vc->task);
+}
+
#endif