aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-05-29 11:38:27 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-05-29 11:38:27 -0700
commitb00ed48bb0a7c295facf9036135a573a5cdbe7de (patch)
treea14d3702971e30dfec8392af4f7b9e97d2246f95 /drivers/dma
parentMerge tag 'trace-tools-v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace (diff)
parentdmaengine: idxd: make idxd_wq_enable() return 0 if wq is already enabled (diff)
downloadlinux-dev-b00ed48bb0a7c295facf9036135a573a5cdbe7de.tar.xz
linux-dev-b00ed48bb0a7c295facf9036135a573a5cdbe7de.zip
Merge tag 'dmaengine-5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine
Pull dmaengine updates from Vinod Koul: "Nothing special, this includes a couple of new device support and new driver support and bunch of driver updates. New support: - Tegra gpcdma driver support - Qualcomm SM8350, Sm8450 and SC7280 device support - Renesas RZN1 dma and platform support Updates: - stm32 device pause/resume support and updates - DMA memset ops Documentation and usage clarification - deprecate '#dma-channels' & '#dma-requests' bindings - driver updates for stm32, ptdma idsx etc" * tag 'dmaengine-5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (87 commits) dmaengine: idxd: make idxd_wq_enable() return 0 if wq is already enabled dmaengine: sun6i: Add support for the D1 variant dmaengine: sun6i: Add support for 34-bit physical addresses dmaengine: sun6i: Do not use virt_to_phys dt-bindings: dma: sun50i-a64: Add compatible for D1 dmaengine: tegra: Remove unused switch case dmaengine: tegra: Fix uninitialized variable usage dmaengine: stm32-dma: add device_pause/device_resume support dmaengine: stm32-dma: rename pm ops before dma pause/resume introduction dmaengine: stm32-dma: pass DMA_SxSCR value to stm32_dma_handle_chan_done() dmaengine: stm32-dma: introduce stm32_dma_sg_inc to manage chan->next_sg dmaengine: stm32-dmamux: avoid reset of dmamux if used by coprocessor dmaengine: qcom: gpi: Add support for sc7280 dt-bindings: dma: pl330: Add power-domains dmaengine: stm32-mdma: use dev_dbg on non-busy channel spurious it dmaengine: stm32-mdma: fix chan initialization in stm32_mdma_irq_handler() dmaengine: stm32-mdma: remove GISR1 register dmaengine: ti: deprecate '#dma-channels' dmaengine: mmp: deprecate '#dma-channels' dmaengine: pxa: deprecate '#dma-channels' and '#dma-requests' ...
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig14
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amba-pl08x.c11
-rw-r--r--drivers/dma/at_hdmac.c10
-rw-r--r--drivers/dma/at_xdmac.c9
-rw-r--r--drivers/dma/bestcomm/bestcomm.c2
-rw-r--r--drivers/dma/dma-jz4780.c9
-rw-r--r--drivers/dma/dmaengine.c7
-rw-r--r--drivers/dma/dmatest.c13
-rw-r--r--drivers/dma/dw/Kconfig9
-rw-r--r--drivers/dma/dw/Makefile2
-rw-r--r--drivers/dma/dw/platform.c1
-rw-r--r--drivers/dma/dw/rzn1-dmamux.c155
-rw-r--r--drivers/dma/ep93xx_dma.c2
-rw-r--r--drivers/dma/idxd/cdev.c18
-rw-r--r--drivers/dma/idxd/device.c151
-rw-r--r--drivers/dma/idxd/dma.c65
-rw-r--r--drivers/dma/idxd/idxd.h20
-rw-r--r--drivers/dma/idxd/init.c30
-rw-r--r--drivers/dma/idxd/registers.h1
-rw-r--r--drivers/dma/idxd/sysfs.c12
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c12
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c13
-rw-r--r--drivers/dma/mmp_pdma.c14
-rw-r--r--drivers/dma/mv_xor_v2.c4
-rw-r--r--drivers/dma/nbpfaxi.c14
-rw-r--r--drivers/dma/plx_dma.c4
-rw-r--r--drivers/dma/ptdma/ptdma-dev.c36
-rw-r--r--drivers/dma/ptdma/ptdma-dmaengine.c16
-rw-r--r--drivers/dma/ptdma/ptdma.h13
-rw-r--r--drivers/dma/pxa_dma.c13
-rw-r--r--drivers/dma/qcom/gpi.c21
-rw-r--r--drivers/dma/qcom/hidma.c13
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c24
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.h8
-rw-r--r--drivers/dma/sh/Kconfig2
-rw-r--r--drivers/dma/sprd-dma.c6
-rw-r--r--drivers/dma/stm32-dma.c311
-rw-r--r--drivers/dma/stm32-dmamux.c2
-rw-r--r--drivers/dma/stm32-mdma.c53
-rw-r--r--drivers/dma/sun6i-dma.c92
-rw-r--r--drivers/dma/tegra186-gpc-dma.c1498
-rw-r--r--drivers/dma/ti/cppi41.c6
-rw-r--r--drivers/dma/ti/k3-psil-am62.c8
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c17
45 files changed, 2420 insertions, 322 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index d5de3f77d3aa..487ed4ddc3be 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -163,7 +163,7 @@ config DMA_SUN4I
config DMA_SUN6I
tristate "Allwinner A31 SoCs DMA support"
- depends on MACH_SUN6I || MACH_SUN8I || (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+ depends on ARCH_SUNXI || COMPILE_TEST
depends on RESET_CONTROLLER
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
@@ -629,6 +629,18 @@ config TXX9_DMAC
Support the TXx9 SoC internal DMA controller. This can be
integrated in chips such as the Toshiba TX4927/38/39.
+config TEGRA186_GPC_DMA
+ tristate "NVIDIA Tegra GPC DMA support"
+ depends on (ARCH_TEGRA || COMPILE_TEST) && ARCH_DMA_ADDR_T_64BIT
+ depends on IOMMU_API
+ select DMA_ENGINE
+ help
+ Support for the NVIDIA Tegra General Purpose Central DMA controller.
+ The DMA controller has multiple DMA channels which can be configured
+ for different peripherals like UART, SPI, etc which are on APB bus.
+ This DMA controller transfers data from memory to peripheral FIFO
+ or vice versa. It also supports memory to memory data transfer.
+
config TEGRA20_APB_DMA
tristate "NVIDIA Tegra20 APB DMA support"
depends on ARCH_TEGRA || COMPILE_TEST
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 616d926cf2a5..2f1b87ffd7ab 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -72,6 +72,7 @@ obj-$(CONFIG_STM32_MDMA) += stm32-mdma.o
obj-$(CONFIG_SPRD_DMA) += sprd-dma.o
obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
+obj-$(CONFIG_TEGRA186_GPC_DMA) += tegra186-gpc-dma.o
obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
obj-$(CONFIG_TEGRA210_ADMA) += tegra210-adma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index a24882ba3764..a4a794e62ac2 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1535,14 +1535,6 @@ static void pl08x_free_chan_resources(struct dma_chan *chan)
vchan_free_chan_resources(to_virt_chan(chan));
}
-static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
- struct dma_chan *chan, unsigned long flags)
-{
- struct dma_async_tx_descriptor *retval = NULL;
-
- return retval;
-}
-
/*
* Code accessing dma_async_is_complete() in a tight loop may give problems.
* If slaves are relying on interrupts to signal completion this function
@@ -2760,7 +2752,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->memcpy.dev = &adev->dev;
pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
- pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
pl08x->memcpy.device_config = pl08x_config;
@@ -2787,8 +2778,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->slave.dev = &adev->dev;
pl08x->slave.device_free_chan_resources =
pl08x_free_chan_resources;
- pl08x->slave.device_prep_dma_interrupt =
- pl08x_prep_dma_interrupt;
pl08x->slave.device_tx_status = pl08x_dma_tx_status;
pl08x->slave.device_issue_pending = pl08x_issue_pending;
pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 30ae36124b1d..5a50423b7378 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -942,6 +942,7 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
struct at_desc *desc;
void __iomem *vaddr;
dma_addr_t paddr;
+ char fill_pattern;
dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
&dest, value, len, flags);
@@ -963,7 +964,14 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
__func__);
return NULL;
}
- *(u32*)vaddr = value;
+
+ /* Only the first byte of value is to be used according to dmaengine */
+ fill_pattern = (char)value;
+
+ *(u32*)vaddr = (fill_pattern << 24) |
+ (fill_pattern << 16) |
+ (fill_pattern << 8) |
+ fill_pattern;
desc = atc_create_memset_desc(chan, paddr, dest, len);
if (!desc) {
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index def564d1e8fa..3e9d726504e2 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1202,6 +1202,7 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
unsigned long flags;
size_t ublen;
u32 dwidth;
+ char pattern;
/*
* WARNING: The channel configuration is set here since there is no
* dmaengine_slave_config call in this case. Moreover we don't know the
@@ -1244,10 +1245,16 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
+ /* Only the first byte of value is to be used according to dmaengine */
+ pattern = (char)value;
+
ublen = len >> dwidth;
desc->lld.mbr_da = dst_addr;
- desc->lld.mbr_ds = value;
+ desc->lld.mbr_ds = (pattern << 24) |
+ (pattern << 16) |
+ (pattern << 8) |
+ pattern;
desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
| AT_XDMAC_MBR_UBC_NDEN
| AT_XDMAC_MBR_UBC_NSEN
diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c
index 8c42e5ca00a9..1822a7034630 100644
--- a/drivers/dma/bestcomm/bestcomm.c
+++ b/drivers/dma/bestcomm/bestcomm.c
@@ -17,7 +17,9 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index fc513eb2b289..e2ec540e6519 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -911,6 +912,14 @@ static int jz4780_dma_probe(struct platform_device *pdev)
dd = &jzdma->dma_device;
+ /*
+ * The real segment size limit is dependent on the size unit selected
+ * for the transfer. Because the size unit is selected automatically
+ * and may be as small as 1 byte, use a safe limit of 2^24-1 bytes to
+ * ensure the 24-bit transfer count in the descriptor cannot overflow.
+ */
+ dma_set_max_seg_size(dev, 0xffffff);
+
dma_cap_set(DMA_MEMCPY, dd->cap_mask);
dma_cap_set(DMA_SLAVE, dd->cap_mask);
dma_cap_set(DMA_CYCLIC, dd->cap_mask);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 2cfa8458b51b..e80feeea0e01 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1053,9 +1053,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
* When the chan_id is a negative value, we are dynamically adding
* the channel. Otherwise we are static enumerating.
*/
- mutex_lock(&device->chan_mutex);
chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
- mutex_unlock(&device->chan_mutex);
if (chan->chan_id < 0) {
pr_err("%s: unable to alloc ida for chan: %d\n",
__func__, chan->chan_id);
@@ -1078,9 +1076,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
return 0;
err_out_ida:
- mutex_lock(&device->chan_mutex);
ida_free(&device->chan_ida, chan->chan_id);
- mutex_unlock(&device->chan_mutex);
err_free_dev:
kfree(chan->dev);
err_free_local:
@@ -1113,9 +1109,7 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
device->chancnt--;
chan->dev->chan = NULL;
mutex_unlock(&dma_list_mutex);
- mutex_lock(&device->chan_mutex);
ida_free(&device->chan_ida, chan->chan_id);
- mutex_unlock(&device->chan_mutex);
device_unregister(&chan->dev->device);
free_percpu(chan->local);
}
@@ -1250,7 +1244,6 @@ int dma_async_device_register(struct dma_device *device)
if (rc != 0)
return rc;
- mutex_init(&device->chan_mutex);
ida_init(&device->chan_ida);
/* represent channels in sysfs. Probably want devs too */
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index f696246f57fd..0a2168a4ccb0 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -675,10 +675,16 @@ static int dmatest_func(void *data)
/*
* src and dst buffers are freed by ourselves below
*/
- if (params->polled)
+ if (params->polled) {
flags = DMA_CTRL_ACK;
- else
- flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ } else {
+ if (dma_has_cap(DMA_INTERRUPT, dev->cap_mask)) {
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ } else {
+ pr_err("Channel does not support interrupt!\n");
+ goto err_pq_array;
+ }
+ }
ktime = ktime_get();
while (!(kthread_should_stop() ||
@@ -906,6 +912,7 @@ error_unmap_continue:
runtime = ktime_to_us(ktime);
ret = 0;
+err_pq_array:
kfree(dma_pq);
err_srcs_array:
kfree(srcs);
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index db25f9b7778c..a9828ddd6d06 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -16,6 +16,15 @@ config DW_DMAC
Support the Synopsys DesignWare AHB DMA controller. This
can be integrated in chips such as the Intel Cherrytrail.
+config RZN1_DMAMUX
+ tristate "Renesas RZ/N1 DMAMUX driver"
+ depends on DW_DMAC
+ depends on ARCH_RZN1 || COMPILE_TEST
+ help
+ Support the Renesas RZ/N1 DMAMUX which is located in front of
+ the Synopsys DesignWare AHB DMA controller located on Renesas
+ SoCs.
+
config DW_DMAC_PCI
tristate "Synopsys DesignWare AHB DMA PCI driver"
depends on PCI
diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile
index a6f358ad8591..e1796015f213 100644
--- a/drivers/dma/dw/Makefile
+++ b/drivers/dma/dw/Makefile
@@ -9,3 +9,5 @@ dw_dmac-$(CONFIG_OF) += of.o
obj-$(CONFIG_DW_DMAC_PCI) += dw_dmac_pci.o
dw_dmac_pci-y := pci.o
+
+obj-$(CONFIG_RZN1_DMAMUX) += rzn1-dmamux.o
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 246118955877..47f2292dba98 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -137,6 +137,7 @@ static void dw_shutdown(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id dw_dma_of_id_table[] = {
{ .compatible = "snps,dma-spear1340", .data = &dw_dma_chip_pdata },
+ { .compatible = "renesas,rzn1-dma", .data = &dw_dma_chip_pdata },
{}
};
MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
diff --git a/drivers/dma/dw/rzn1-dmamux.c b/drivers/dma/dw/rzn1-dmamux.c
new file mode 100644
index 000000000000..11d254e450b0
--- /dev/null
+++ b/drivers/dma/dw/rzn1-dmamux.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Schneider-Electric
+ * Author: Miquel Raynal <miquel.raynal@bootlin.com
+ * Based on TI crossbar driver written by Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+#include <linux/bitops.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/slab.h>
+#include <linux/soc/renesas/r9a06g032-sysctrl.h>
+#include <linux/types.h>
+
+#define RNZ1_DMAMUX_NCELLS 6
+#define RZN1_DMAMUX_MAX_LINES 64
+#define RZN1_DMAMUX_LINES_PER_CTLR 16
+
+struct rzn1_dmamux_data {
+ struct dma_router dmarouter;
+ DECLARE_BITMAP(used_chans, 2 * RZN1_DMAMUX_LINES_PER_CTLR);
+};
+
+struct rzn1_dmamux_map {
+ unsigned int req_idx;
+};
+
+static void rzn1_dmamux_free(struct device *dev, void *route_data)
+{
+ struct rzn1_dmamux_data *dmamux = dev_get_drvdata(dev);
+ struct rzn1_dmamux_map *map = route_data;
+
+ dev_dbg(dev, "Unmapping DMAMUX request %u\n", map->req_idx);
+
+ clear_bit(map->req_idx, dmamux->used_chans);
+
+ kfree(map);
+}
+
+static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
+ struct rzn1_dmamux_data *dmamux = platform_get_drvdata(pdev);
+ struct rzn1_dmamux_map *map;
+ unsigned int dmac_idx, chan, val;
+ u32 mask;
+ int ret;
+
+ if (dma_spec->args_count != RNZ1_DMAMUX_NCELLS)
+ return ERR_PTR(-EINVAL);
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return ERR_PTR(-ENOMEM);
+
+ chan = dma_spec->args[0];
+ map->req_idx = dma_spec->args[4];
+ val = dma_spec->args[5];
+ dma_spec->args_count -= 2;
+
+ if (chan >= RZN1_DMAMUX_LINES_PER_CTLR) {
+ dev_err(&pdev->dev, "Invalid DMA request line: %u\n", chan);
+ ret = -EINVAL;
+ goto free_map;
+ }
+
+ if (map->req_idx >= RZN1_DMAMUX_MAX_LINES ||
+ (map->req_idx % RZN1_DMAMUX_LINES_PER_CTLR) != chan) {
+ dev_err(&pdev->dev, "Invalid MUX request line: %u\n", map->req_idx);
+ ret = -EINVAL;
+ goto free_map;
+ }
+
+ dmac_idx = map->req_idx >= RZN1_DMAMUX_LINES_PER_CTLR ? 1 : 0;
+ dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", dmac_idx);
+ if (!dma_spec->np) {
+ dev_err(&pdev->dev, "Can't get DMA master\n");
+ ret = -EINVAL;
+ goto free_map;
+ }
+
+ dev_dbg(&pdev->dev, "Mapping DMAMUX request %u to DMAC%u request %u\n",
+ map->req_idx, dmac_idx, chan);
+
+ if (test_and_set_bit(map->req_idx, dmamux->used_chans)) {
+ ret = -EBUSY;
+ goto free_map;
+ }
+
+ mask = BIT(map->req_idx);
+ ret = r9a06g032_sysctrl_set_dmamux(mask, val ? mask : 0);
+ if (ret)
+ goto clear_bitmap;
+
+ return map;
+
+clear_bitmap:
+ clear_bit(map->req_idx, dmamux->used_chans);
+free_map:
+ kfree(map);
+
+ return ERR_PTR(ret);
+}
+
+static const struct of_device_id rzn1_dmac_match[] = {
+ { .compatible = "renesas,rzn1-dma" },
+ {}
+};
+
+static int rzn1_dmamux_probe(struct platform_device *pdev)
+{
+ struct device_node *mux_node = pdev->dev.of_node;
+ const struct of_device_id *match;
+ struct device_node *dmac_node;
+ struct rzn1_dmamux_data *dmamux;
+
+ dmamux = devm_kzalloc(&pdev->dev, sizeof(*dmamux), GFP_KERNEL);
+ if (!dmamux)
+ return -ENOMEM;
+
+ dmac_node = of_parse_phandle(mux_node, "dma-masters", 0);
+ if (!dmac_node)
+ return dev_err_probe(&pdev->dev, -ENODEV, "Can't get DMA master node\n");
+
+ match = of_match_node(rzn1_dmac_match, dmac_node);
+ of_node_put(dmac_node);
+ if (!match)
+ return dev_err_probe(&pdev->dev, -EINVAL, "DMA master is not supported\n");
+
+ dmamux->dmarouter.dev = &pdev->dev;
+ dmamux->dmarouter.route_free = rzn1_dmamux_free;
+
+ platform_set_drvdata(pdev, dmamux);
+
+ return of_dma_router_register(mux_node, rzn1_dmamux_route_allocate,
+ &dmamux->dmarouter);
+}
+
+static const struct of_device_id rzn1_dmamux_match[] = {
+ { .compatible = "renesas,rzn1-dmamux" },
+ {}
+};
+
+static struct platform_driver rzn1_dmamux_driver = {
+ .driver = {
+ .name = "renesas,rzn1-dmamux",
+ .of_match_table = rzn1_dmamux_match,
+ },
+ .probe = rzn1_dmamux_probe,
+};
+module_platform_driver(rzn1_dmamux_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com");
+MODULE_DESCRIPTION("Renesas RZ/N1 DMAMUX driver");
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 98f9ee70362e..971ff5f9ae84 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -132,7 +132,7 @@ struct ep93xx_dma_desc {
/**
* struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
* @chan: dmaengine API channel
- * @edma: pointer to to the engine device
+ * @edma: pointer to the engine device
* @regs: memory mapped registers
* @irq: interrupt number of the channel
* @clk: clock used by this channel
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index b9b2b4a4124e..c2808fd081d6 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -99,7 +99,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
ctx->wq = wq;
filp->private_data = ctx;
- if (device_pasid_enabled(idxd)) {
+ if (device_user_pasid_enabled(idxd)) {
sva = iommu_sva_bind_device(dev, current->mm, NULL);
if (IS_ERR(sva)) {
rc = PTR_ERR(sva);
@@ -152,7 +152,7 @@ static int idxd_cdev_release(struct inode *node, struct file *filep)
if (wq_shared(wq)) {
idxd_device_drain_pasid(idxd, ctx->pasid);
} else {
- if (device_pasid_enabled(idxd)) {
+ if (device_user_pasid_enabled(idxd)) {
/* The wq disable in the disable pasid function will drain the wq */
rc = idxd_wq_disable_pasid(wq);
if (rc < 0)
@@ -314,7 +314,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
mutex_lock(&wq->wq_lock);
wq->type = IDXD_WQT_USER;
- rc = __drv_enable_wq(wq);
+ rc = drv_enable_wq(wq);
if (rc < 0)
goto err;
@@ -329,7 +329,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
return 0;
err_cdev:
- __drv_disable_wq(wq);
+ drv_disable_wq(wq);
err:
wq->type = IDXD_WQT_NONE;
mutex_unlock(&wq->wq_lock);
@@ -342,7 +342,7 @@ static void idxd_user_drv_remove(struct idxd_dev *idxd_dev)
mutex_lock(&wq->wq_lock);
idxd_wq_del_cdev(wq);
- __drv_disable_wq(wq);
+ drv_disable_wq(wq);
wq->type = IDXD_WQT_NONE;
mutex_unlock(&wq->wq_lock);
}
@@ -369,10 +369,16 @@ int idxd_cdev_register(void)
rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK,
ictx[i].name);
if (rc)
- return rc;
+ goto err_free_chrdev_region;
}
return 0;
+
+err_free_chrdev_region:
+ for (i--; i >= 0; i--)
+ unregister_chrdev_region(ictx[i].devt, MINORMASK);
+
+ return rc;
}
void idxd_cdev_remove(void)
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index f652da6ab47d..ff0ea60051f0 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -184,7 +184,7 @@ int idxd_wq_enable(struct idxd_wq *wq)
if (wq->state == IDXD_WQ_ENABLED) {
dev_dbg(dev, "WQ %d already enabled\n", wq->id);
- return -ENXIO;
+ return 0;
}
idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
@@ -299,24 +299,46 @@ void idxd_wqs_unmap_portal(struct idxd_device *idxd)
}
}
-int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
+static void __idxd_wq_set_priv_locked(struct idxd_wq *wq, int priv)
{
struct idxd_device *idxd = wq->idxd;
- int rc;
union wqcfg wqcfg;
unsigned int offset;
- rc = idxd_wq_disable(wq, false);
- if (rc < 0)
- return rc;
+ offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PRIVL_IDX);
+ spin_lock(&idxd->dev_lock);
+ wqcfg.bits[WQCFG_PRIVL_IDX] = ioread32(idxd->reg_base + offset);
+ wqcfg.priv = priv;
+ wq->wqcfg->bits[WQCFG_PRIVL_IDX] = wqcfg.bits[WQCFG_PRIVL_IDX];
+ iowrite32(wqcfg.bits[WQCFG_PRIVL_IDX], idxd->reg_base + offset);
+ spin_unlock(&idxd->dev_lock);
+}
+
+static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid)
+{
+ struct idxd_device *idxd = wq->idxd;
+ union wqcfg wqcfg;
+ unsigned int offset;
offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
spin_lock(&idxd->dev_lock);
wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
wqcfg.pasid_en = 1;
wqcfg.pasid = pasid;
+ wq->wqcfg->bits[WQCFG_PASID_IDX] = wqcfg.bits[WQCFG_PASID_IDX];
iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
spin_unlock(&idxd->dev_lock);
+}
+
+int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
+{
+ int rc;
+
+ rc = idxd_wq_disable(wq, false);
+ if (rc < 0)
+ return rc;
+
+ __idxd_wq_set_pasid_locked(wq, pasid);
rc = idxd_wq_enable(wq);
if (rc < 0)
@@ -555,19 +577,15 @@ int idxd_device_disable(struct idxd_device *idxd)
return -ENXIO;
}
- spin_lock(&idxd->dev_lock);
idxd_device_clear_state(idxd);
- idxd->state = IDXD_DEV_DISABLED;
- spin_unlock(&idxd->dev_lock);
return 0;
}
void idxd_device_reset(struct idxd_device *idxd)
{
idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
- spin_lock(&idxd->dev_lock);
idxd_device_clear_state(idxd);
- idxd->state = IDXD_DEV_DISABLED;
+ spin_lock(&idxd->dev_lock);
idxd_unmask_error_interrupts(idxd);
spin_unlock(&idxd->dev_lock);
}
@@ -694,15 +712,16 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
{
int i;
- lockdep_assert_held(&idxd->dev_lock);
for (i = 0; i < idxd->max_wqs; i++) {
struct idxd_wq *wq = idxd->wqs[i];
+ mutex_lock(&wq->wq_lock);
if (wq->state == IDXD_WQ_ENABLED) {
idxd_wq_disable_cleanup(wq);
wq->state = IDXD_WQ_DISABLED;
}
idxd_wq_device_reset_cleanup(wq);
+ mutex_unlock(&wq->wq_lock);
}
}
@@ -711,9 +730,12 @@ void idxd_device_clear_state(struct idxd_device *idxd)
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return;
+ idxd_device_wqs_clear_state(idxd);
+ spin_lock(&idxd->dev_lock);
idxd_groups_clear_state(idxd);
idxd_engines_clear_state(idxd);
- idxd_device_wqs_clear_state(idxd);
+ idxd->state = IDXD_DEV_DISABLED;
+ spin_unlock(&idxd->dev_lock);
}
static void idxd_group_config_write(struct idxd_group *group)
@@ -799,7 +821,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
*/
for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
- wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
+ wq->wqcfg->bits[i] |= ioread32(idxd->reg_base + wq_offset);
}
if (wq->size == 0 && wq->type != IDXD_WQT_NONE)
@@ -815,14 +837,8 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
if (wq_dedicated(wq))
wq->wqcfg->mode = 1;
- if (device_pasid_enabled(idxd)) {
- wq->wqcfg->pasid_en = 1;
- if (wq->type == IDXD_WQT_KERNEL && wq_dedicated(wq))
- wq->wqcfg->pasid = idxd->pasid;
- }
-
/*
- * Here the priv bit is set depending on the WQ type. priv = 1 if the
+ * The WQ priv bit is set depending on the WQ type. priv = 1 if the
* WQ type is kernel to indicate privileged access. This setting only
* matters for dedicated WQ. According to the DSA spec:
* If the WQ is in dedicated mode, WQ PASID Enable is 1, and the
@@ -832,7 +848,6 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
* In the case of a dedicated kernel WQ that is not able to support
* the PASID cap, then the configuration will be rejected.
*/
- wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
if (wq_dedicated(wq) && wq->wqcfg->pasid_en &&
!idxd_device_pasid_priv_enabled(idxd) &&
wq->type == IDXD_WQT_KERNEL) {
@@ -953,7 +968,7 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
if (!wq->group)
continue;
- if (wq_shared(wq) && !device_swq_supported(idxd)) {
+ if (wq_shared(wq) && !wq_shared_supported(wq)) {
idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
dev_warn(dev, "No shared wq support but configured.\n");
return -EINVAL;
@@ -1018,6 +1033,9 @@ static int idxd_wq_load_config(struct idxd_wq *wq)
wq->priority = wq->wqcfg->priority;
+ wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift;
+ wq->max_batch_size = 1ULL << wq->wqcfg->max_batch_shift;
+
for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]);
@@ -1161,7 +1179,9 @@ void idxd_wq_free_irq(struct idxd_wq *wq)
struct idxd_device *idxd = wq->idxd;
struct idxd_irq_entry *ie = &wq->ie;
- synchronize_irq(ie->vector);
+ if (wq->type != IDXD_WQT_KERNEL)
+ return;
+
free_irq(ie->vector, ie);
idxd_flush_pending_descs(ie);
if (idxd->request_int_handles)
@@ -1180,6 +1200,9 @@ int idxd_wq_request_irq(struct idxd_wq *wq)
struct idxd_irq_entry *ie;
int rc;
+ if (wq->type != IDXD_WQT_KERNEL)
+ return 0;
+
ie = &wq->ie;
ie->vector = pci_irq_vector(pdev, ie->id);
ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : INVALID_IOASID;
@@ -1211,7 +1234,7 @@ err_irq:
return rc;
}
-int __drv_enable_wq(struct idxd_wq *wq)
+int drv_enable_wq(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
@@ -1245,7 +1268,7 @@ int __drv_enable_wq(struct idxd_wq *wq)
/* Shared WQ checks */
if (wq_shared(wq)) {
- if (!device_swq_supported(idxd)) {
+ if (!wq_shared_supported(wq)) {
idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM;
dev_dbg(dev, "PASID not enabled and shared wq.\n");
goto err;
@@ -1265,6 +1288,29 @@ int __drv_enable_wq(struct idxd_wq *wq)
}
}
+ /*
+ * In the event that the WQ is configurable for pasid and priv bits.
+ * For kernel wq, the driver should setup the pasid, pasid_en, and priv bit.
+ * However, for non-kernel wq, the driver should only set the pasid_en bit for
+ * shared wq. A dedicated wq that is not 'kernel' type will configure pasid and
+ * pasid_en later on so there is no need to setup.
+ */
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
+ int priv = 0;
+
+ if (wq_pasid_enabled(wq)) {
+ if (is_idxd_wq_kernel(wq) || wq_shared(wq)) {
+ u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0;
+
+ __idxd_wq_set_pasid_locked(wq, pasid);
+ }
+ }
+
+ if (is_idxd_wq_kernel(wq))
+ priv = 1;
+ __idxd_wq_set_priv_locked(wq, priv);
+ }
+
rc = 0;
spin_lock(&idxd->dev_lock);
if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
@@ -1289,8 +1335,36 @@ int __drv_enable_wq(struct idxd_wq *wq)
}
wq->client_count = 0;
+
+ rc = idxd_wq_request_irq(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR;
+ dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc);
+ goto err_irq;
+ }
+
+ rc = idxd_wq_alloc_resources(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR;
+ dev_dbg(dev, "WQ resource alloc failed\n");
+ goto err_res_alloc;
+ }
+
+ rc = idxd_wq_init_percpu_ref(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_PERCPU_ERR;
+ dev_dbg(dev, "percpu_ref setup failed\n");
+ goto err_ref;
+ }
+
return 0;
+err_ref:
+ idxd_wq_free_resources(wq);
+err_res_alloc:
+ idxd_wq_free_irq(wq);
+err_irq:
+ idxd_wq_unmap_portal(wq);
err_map_portal:
rc = idxd_wq_disable(wq, false);
if (rc < 0)
@@ -1299,17 +1373,7 @@ err:
return rc;
}
-int drv_enable_wq(struct idxd_wq *wq)
-{
- int rc;
-
- mutex_lock(&wq->wq_lock);
- rc = __drv_enable_wq(wq);
- mutex_unlock(&wq->wq_lock);
- return rc;
-}
-
-void __drv_disable_wq(struct idxd_wq *wq)
+void drv_disable_wq(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
@@ -1320,21 +1384,16 @@ void __drv_disable_wq(struct idxd_wq *wq)
dev_warn(dev, "Clients has claim on wq %d: %d\n",
wq->id, idxd_wq_refcount(wq));
+ idxd_wq_free_resources(wq);
idxd_wq_unmap_portal(wq);
-
idxd_wq_drain(wq);
+ idxd_wq_free_irq(wq);
idxd_wq_reset(wq);
-
+ percpu_ref_exit(&wq->wq_active);
+ wq->type = IDXD_WQT_NONE;
wq->client_count = 0;
}
-void drv_disable_wq(struct idxd_wq *wq)
-{
- mutex_lock(&wq->wq_lock);
- __drv_disable_wq(wq);
- mutex_unlock(&wq->wq_lock);
-}
-
int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
{
struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index bfff59617d04..e0874cb4721c 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -88,6 +88,27 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq,
}
static struct dma_async_tx_descriptor *
+idxd_dma_prep_interrupt(struct dma_chan *c, unsigned long flags)
+{
+ struct idxd_wq *wq = to_idxd_wq(c);
+ u32 desc_flags;
+ struct idxd_desc *desc;
+
+ if (wq->state != IDXD_WQ_ENABLED)
+ return NULL;
+
+ op_flag_setup(flags, &desc_flags);
+ desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
+ if (IS_ERR(desc))
+ return NULL;
+
+ idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP,
+ 0, 0, 0, desc->compl_dma, desc_flags);
+ desc->txd.flags = flags;
+ return &desc->txd;
+}
+
+static struct dma_async_tx_descriptor *
idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
dma_addr_t dma_src, size_t len, unsigned long flags)
{
@@ -193,10 +214,12 @@ int idxd_register_dma_device(struct idxd_device *idxd)
INIT_LIST_HEAD(&dma->channels);
dma->dev = dev;
+ dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
dma->device_release = idxd_dma_release;
+ dma->device_prep_dma_interrupt = idxd_dma_prep_interrupt;
if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
@@ -227,7 +250,7 @@ void idxd_unregister_dma_device(struct idxd_device *idxd)
dma_async_device_unregister(&idxd->idxd_dma->dma);
}
-int idxd_register_dma_channel(struct idxd_wq *wq)
+static int idxd_register_dma_channel(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
struct dma_device *dma = &idxd->idxd_dma->dma;
@@ -264,7 +287,7 @@ int idxd_register_dma_channel(struct idxd_wq *wq)
return 0;
}
-void idxd_unregister_dma_channel(struct idxd_wq *wq)
+static void idxd_unregister_dma_channel(struct idxd_wq *wq)
{
struct idxd_dma_chan *idxd_chan = wq->idxd_chan;
struct dma_chan *chan = &idxd_chan->chan;
@@ -290,34 +313,13 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
mutex_lock(&wq->wq_lock);
wq->type = IDXD_WQT_KERNEL;
- rc = idxd_wq_request_irq(wq);
- if (rc < 0) {
- idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR;
- dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc);
- goto err_irq;
- }
-
- rc = __drv_enable_wq(wq);
+ rc = drv_enable_wq(wq);
if (rc < 0) {
dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc);
rc = -ENXIO;
goto err;
}
- rc = idxd_wq_alloc_resources(wq);
- if (rc < 0) {
- idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR;
- dev_dbg(dev, "WQ resource alloc failed\n");
- goto err_res_alloc;
- }
-
- rc = idxd_wq_init_percpu_ref(wq);
- if (rc < 0) {
- idxd->cmd_status = IDXD_SCMD_PERCPU_ERR;
- dev_dbg(dev, "percpu_ref setup failed\n");
- goto err_ref;
- }
-
rc = idxd_register_dma_channel(wq);
if (rc < 0) {
idxd->cmd_status = IDXD_SCMD_DMA_CHAN_ERR;
@@ -330,15 +332,8 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
return 0;
err_dma:
- __idxd_wq_quiesce(wq);
- percpu_ref_exit(&wq->wq_active);
-err_ref:
- idxd_wq_free_resources(wq);
-err_res_alloc:
- __drv_disable_wq(wq);
+ drv_disable_wq(wq);
err:
- idxd_wq_free_irq(wq);
-err_irq:
wq->type = IDXD_WQT_NONE;
mutex_unlock(&wq->wq_lock);
return rc;
@@ -351,11 +346,7 @@ static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev)
mutex_lock(&wq->wq_lock);
__idxd_wq_quiesce(wq);
idxd_unregister_dma_channel(wq);
- idxd_wq_free_resources(wq);
- __drv_disable_wq(wq);
- percpu_ref_exit(&wq->wq_active);
- idxd_wq_free_irq(wq);
- wq->type = IDXD_WQT_NONE;
+ drv_disable_wq(wq);
mutex_unlock(&wq->wq_lock);
}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index da72eb15f610..fed0dfc1eaa8 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -239,6 +239,7 @@ enum idxd_device_flag {
IDXD_FLAG_CONFIGURABLE = 0,
IDXD_FLAG_CMD_RUNNING,
IDXD_FLAG_PASID_ENABLED,
+ IDXD_FLAG_USER_PASID_ENABLED,
};
struct idxd_dma_dev {
@@ -469,9 +470,20 @@ static inline bool device_pasid_enabled(struct idxd_device *idxd)
return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
}
-static inline bool device_swq_supported(struct idxd_device *idxd)
+static inline bool device_user_pasid_enabled(struct idxd_device *idxd)
{
- return (support_enqcmd && device_pasid_enabled(idxd));
+ return test_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
+}
+
+static inline bool wq_pasid_enabled(struct idxd_wq *wq)
+{
+ return (is_idxd_wq_kernel(wq) && device_pasid_enabled(wq->idxd)) ||
+ (is_idxd_wq_user(wq) && device_user_pasid_enabled(wq->idxd));
+}
+
+static inline bool wq_shared_supported(struct idxd_wq *wq)
+{
+ return (support_enqcmd && wq_pasid_enabled(wq));
}
enum idxd_portal_prot {
@@ -559,9 +571,7 @@ void idxd_unregister_idxd_drv(void);
int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
int drv_enable_wq(struct idxd_wq *wq);
-int __drv_enable_wq(struct idxd_wq *wq);
void drv_disable_wq(struct idxd_wq *wq);
-void __drv_disable_wq(struct idxd_wq *wq);
int idxd_device_init_reset(struct idxd_device *idxd);
int idxd_device_enable(struct idxd_device *idxd);
int idxd_device_disable(struct idxd_device *idxd);
@@ -602,8 +612,6 @@ int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc);
/* dmaengine */
int idxd_register_dma_device(struct idxd_device *idxd);
void idxd_unregister_dma_device(struct idxd_device *idxd);
-int idxd_register_dma_channel(struct idxd_wq *wq);
-void idxd_unregister_dma_channel(struct idxd_wq *wq);
void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
void idxd_dma_complete_txd(struct idxd_desc *desc,
enum idxd_complete_type comp_type, bool free_desc);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 993a5dcca24f..355fb3ef4cbf 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -512,18 +512,15 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "IDXD reset complete\n");
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
- rc = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA);
- if (rc == 0) {
- rc = idxd_enable_system_pasid(idxd);
- if (rc < 0) {
- iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
- dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
- } else {
- set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
- }
- } else {
- dev_warn(dev, "Unable to turn on SVA feature.\n");
- }
+ if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA))
+ dev_warn(dev, "Unable to turn on user SVA feature.\n");
+ else
+ set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
+
+ if (idxd_enable_system_pasid(idxd))
+ dev_warn(dev, "No in-kernel DMA with PASID.\n");
+ else
+ set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
} else if (!sva) {
dev_warn(dev, "User forced SVA off via module param.\n");
}
@@ -561,7 +558,8 @@ static int idxd_probe(struct idxd_device *idxd)
err:
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
- iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
+ if (device_user_pasid_enabled(idxd))
+ iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
return rc;
}
@@ -574,7 +572,8 @@ static void idxd_cleanup(struct idxd_device *idxd)
idxd_cleanup_internals(idxd);
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
- iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
+ if (device_user_pasid_enabled(idxd))
+ iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
}
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -691,7 +690,8 @@ static void idxd_remove(struct pci_dev *pdev)
free_irq(irq_entry->vector, irq_entry);
pci_free_irq_vectors(pdev);
pci_iounmap(pdev, idxd->reg_base);
- iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+ if (device_user_pasid_enabled(idxd))
+ iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
pci_disable_device(pdev);
destroy_workqueue(idxd->wq);
perfmon_pmu_remove(idxd);
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index aa642aecdc0b..02449aa9c454 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -353,6 +353,7 @@ union wqcfg {
} __packed;
#define WQCFG_PASID_IDX 2
+#define WQCFG_PRIVL_IDX 2
#define WQCFG_OCCUP_IDX 6
#define WQCFG_OCCUP_MASK 0xffff
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index dfd549685c46..3f262a57441b 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -588,7 +588,7 @@ static ssize_t wq_mode_store(struct device *dev,
if (sysfs_streq(buf, "dedicated")) {
set_bit(WQ_FLAG_DEDICATED, &wq->flags);
wq->threshold = 0;
- } else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
+ } else if (sysfs_streq(buf, "shared")) {
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
} else {
return -EINVAL;
@@ -832,6 +832,7 @@ static ssize_t wq_name_store(struct device *dev,
size_t count)
{
struct idxd_wq *wq = confdev_to_wq(dev);
+ char *input, *pos;
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
@@ -846,9 +847,14 @@ static ssize_t wq_name_store(struct device *dev,
if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
return -EOPNOTSUPP;
+ input = kstrndup(buf, count, GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+
+ pos = strim(input);
memset(wq->name, 0, WQ_NAME_SIZE + 1);
- strncpy(wq->name, buf, WQ_NAME_SIZE);
- strreplace(wq->name, '\n', '\0');
+ sprintf(wq->name, "%s", pos);
+ kfree(input);
return count;
}
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 41ef9f15d3d5..f8847c48ba03 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -751,7 +751,6 @@ static int mtk_cqdma_probe(struct platform_device *pdev)
struct mtk_cqdma_device *cqdma;
struct mtk_cqdma_vchan *vc;
struct dma_device *dd;
- struct resource *res;
int err;
u32 i;
@@ -824,13 +823,10 @@ static int mtk_cqdma_probe(struct platform_device *pdev)
return PTR_ERR(cqdma->pc[i]->base);
/* allocate IRQ resource */
- res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
- if (!res) {
- dev_err(&pdev->dev, "No irq resource for %s\n",
- dev_name(&pdev->dev));
- return -EINVAL;
- }
- cqdma->pc[i]->irq = res->start;
+ err = platform_get_irq(pdev, i);
+ if (err < 0)
+ return err;
+ cqdma->pc[i]->irq = err;
err = devm_request_irq(&pdev->dev, cqdma->pc[i]->irq,
mtk_cqdma_irq, 0, dev_name(&pdev->dev),
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index 6ad8afbb95f2..9ebd9231f62f 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -601,7 +601,7 @@ static void mtk_hsdma_free_rooms_in_ring(struct mtk_hsdma_device *hsdma)
cb->flag = 0;
}
- cb->vd = 0;
+ cb->vd = NULL;
/*
* Recycle the RXD with the helper WRITE_ONCE that can ensure
@@ -923,13 +923,10 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
return PTR_ERR(hsdma->clk);
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&pdev->dev, "No irq resource for %s\n",
- dev_name(&pdev->dev));
- return -EINVAL;
- }
- hsdma->irq = res->start;
+ err = platform_get_irq(pdev, 0);
+ if (err < 0)
+ return err;
+ hsdma->irq = err;
refcount_set(&hsdma->pc_refcnt, 0);
spin_lock_init(&hsdma->lock);
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 5a53d7fcef01..e8d71b35593e 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -1043,13 +1043,17 @@ static int mmp_pdma_probe(struct platform_device *op)
return PTR_ERR(pdev->base);
of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
- if (of_id)
- of_property_read_u32(pdev->dev->of_node, "#dma-channels",
- &dma_channels);
- else if (pdata && pdata->dma_channels)
+ if (of_id) {
+ /* Parse new and deprecated dma-channels properties */
+ if (of_property_read_u32(pdev->dev->of_node, "dma-channels",
+ &dma_channels))
+ of_property_read_u32(pdev->dev->of_node, "#dma-channels",
+ &dma_channels);
+ } else if (pdata && pdata->dma_channels) {
dma_channels = pdata->dma_channels;
- else
+ } else {
dma_channels = 32; /* default 32 channel */
+ }
pdev->dma_channels = dma_channels;
for (i = 0; i < dma_channels; i++) {
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 9c8b4084ba2f..f10b29034da1 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -591,14 +591,14 @@ static void mv_xor_v2_tasklet(struct tasklet_struct *t)
dma_run_dependencies(&next_pending_sw_desc->async_tx);
/* Lock the channel */
- spin_lock_bh(&xor_dev->lock);
+ spin_lock(&xor_dev->lock);
/* add the SW descriptor to the free descriptors list */
list_add(&next_pending_sw_desc->free_list,
&xor_dev->free_sw_desc);
/* Release the channel */
- spin_unlock_bh(&xor_dev->lock);
+ spin_unlock(&xor_dev->lock);
/* increment the next descriptor */
pending_ptr++;
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 9c52c57919c6..a7063e9cd551 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1294,7 +1294,7 @@ static int nbpf_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct nbpf_device *nbpf;
struct dma_device *dma_dev;
- struct resource *iomem, *irq_res;
+ struct resource *iomem;
const struct nbpf_config *cfg;
int num_channels;
int ret, irq, eirq, i;
@@ -1335,13 +1335,11 @@ static int nbpf_probe(struct platform_device *pdev)
nbpf->config = cfg;
for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) {
- irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
- if (!irq_res)
- break;
-
- for (irq = irq_res->start; irq <= irq_res->end;
- irq++, irqs++)
- irqbuf[irqs] = irq;
+ irq = platform_get_irq_optional(pdev, i);
+ if (irq < 0 && irq != -ENXIO)
+ return irq;
+ if (irq > 0)
+ irqbuf[irqs++] = irq;
}
/*
diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c
index 1ffcb5ca9788..12725fa1655f 100644
--- a/drivers/dma/plx_dma.c
+++ b/drivers/dma/plx_dma.c
@@ -137,7 +137,7 @@ static void plx_dma_process_desc(struct plx_dma_dev *plxdev)
struct plx_dma_desc *desc;
u32 flags;
- spin_lock_bh(&plxdev->ring_lock);
+ spin_lock(&plxdev->ring_lock);
while (plxdev->tail != plxdev->head) {
desc = plx_dma_get_desc(plxdev, plxdev->tail);
@@ -165,7 +165,7 @@ static void plx_dma_process_desc(struct plx_dma_dev *plxdev)
plxdev->tail++;
}
- spin_unlock_bh(&plxdev->ring_lock);
+ spin_unlock(&plxdev->ring_lock);
}
static void plx_dma_abort_desc(struct plx_dma_dev *plxdev)
diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c
index daafea5bc35d..377da23012ac 100644
--- a/drivers/dma/ptdma/ptdma-dev.c
+++ b/drivers/dma/ptdma/ptdma-dev.c
@@ -100,6 +100,7 @@ int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
struct pt_passthru_engine *pt_engine)
{
struct ptdma_desc desc;
+ struct pt_device *pt = container_of(cmd_q, struct pt_device, cmd_q);
cmd_q->cmd_error = 0;
cmd_q->total_pt_ops++;
@@ -111,17 +112,12 @@ int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
desc.dst_lo = lower_32_bits(pt_engine->dst_dma);
desc.dw5.dst_hi = upper_32_bits(pt_engine->dst_dma);
- return pt_core_execute_cmd(&desc, cmd_q);
-}
-
-static inline void pt_core_disable_queue_interrupts(struct pt_device *pt)
-{
- iowrite32(0, pt->cmd_q.reg_control + 0x000C);
-}
+ if (cmd_q->int_en)
+ pt_core_enable_queue_interrupts(pt);
+ else
+ pt_core_disable_queue_interrupts(pt);
-static inline void pt_core_enable_queue_interrupts(struct pt_device *pt)
-{
- iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C);
+ return pt_core_execute_cmd(&desc, cmd_q);
}
static void pt_do_cmd_complete(unsigned long data)
@@ -144,14 +140,10 @@ static void pt_do_cmd_complete(unsigned long data)
cmd->pt_cmd_callback(cmd->data, cmd->ret);
}
-static irqreturn_t pt_core_irq_handler(int irq, void *data)
+void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q)
{
- struct pt_device *pt = data;
- struct pt_cmd_queue *cmd_q = &pt->cmd_q;
u32 status;
- pt_core_disable_queue_interrupts(pt);
- pt->total_interrupts++;
status = ioread32(cmd_q->reg_control + 0x0010);
if (status) {
cmd_q->int_status = status;
@@ -162,11 +154,21 @@ static irqreturn_t pt_core_irq_handler(int irq, void *data)
if ((status & INT_ERROR) && !cmd_q->cmd_error)
cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
- /* Acknowledge the interrupt */
+ /* Acknowledge the completion */
iowrite32(status, cmd_q->reg_control + 0x0010);
- pt_core_enable_queue_interrupts(pt);
pt_do_cmd_complete((ulong)&pt->tdata);
}
+}
+
+static irqreturn_t pt_core_irq_handler(int irq, void *data)
+{
+ struct pt_device *pt = data;
+ struct pt_cmd_queue *cmd_q = &pt->cmd_q;
+
+ pt_core_disable_queue_interrupts(pt);
+ pt->total_interrupts++;
+ pt_check_status_trans(pt, cmd_q);
+ pt_core_enable_queue_interrupts(pt);
return IRQ_HANDLED;
}
diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c
index 91b93e8d9779..cc22d162ce25 100644
--- a/drivers/dma/ptdma/ptdma-dmaengine.c
+++ b/drivers/dma/ptdma/ptdma-dmaengine.c
@@ -171,6 +171,7 @@ static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
vchan_tx_prep(&chan->vc, &desc->vd, flags);
desc->pt = chan->pt;
+ desc->pt->cmd_q.int_en = !!(flags & DMA_PREP_INTERRUPT);
desc->issued_to_hw = 0;
desc->status = DMA_IN_PROGRESS;
@@ -257,6 +258,17 @@ static void pt_issue_pending(struct dma_chan *dma_chan)
pt_cmd_callback(desc, 0);
}
+static enum dma_status
+pt_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct pt_device *pt = to_pt_chan(c)->pt;
+ struct pt_cmd_queue *cmd_q = &pt->cmd_q;
+
+ pt_check_status_trans(pt, cmd_q);
+ return dma_cookie_status(c, cookie, txstate);
+}
+
static int pt_pause(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
@@ -291,8 +303,10 @@ static int pt_terminate_all(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
unsigned long flags;
+ struct pt_cmd_queue *cmd_q = &chan->pt->cmd_q;
LIST_HEAD(head);
+ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
spin_lock_irqsave(&chan->vc.lock, flags);
vchan_get_all_descriptors(&chan->vc, &head);
spin_unlock_irqrestore(&chan->vc.lock, flags);
@@ -362,7 +376,7 @@ int pt_dmaengine_register(struct pt_device *pt)
dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt;
dma_dev->device_issue_pending = pt_issue_pending;
- dma_dev->device_tx_status = dma_cookie_status;
+ dma_dev->device_tx_status = pt_tx_status;
dma_dev->device_pause = pt_pause;
dma_dev->device_resume = pt_resume;
dma_dev->device_terminate_all = pt_terminate_all;
diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/ptdma/ptdma.h
index afbf192c9230..d093c43b7d13 100644
--- a/drivers/dma/ptdma/ptdma.h
+++ b/drivers/dma/ptdma/ptdma.h
@@ -206,6 +206,9 @@ struct pt_cmd_queue {
unsigned int active;
unsigned int suspended;
+ /* Interrupt flag */
+ bool int_en;
+
/* Register addresses for queue */
void __iomem *reg_control;
u32 qcontrol; /* Cached control register */
@@ -318,7 +321,17 @@ void pt_core_destroy(struct pt_device *pt);
int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
struct pt_passthru_engine *pt_engine);
+void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q);
void pt_start_queue(struct pt_cmd_queue *cmd_q);
void pt_stop_queue(struct pt_cmd_queue *cmd_q);
+static inline void pt_core_disable_queue_interrupts(struct pt_device *pt)
+{
+ iowrite32(0, pt->cmd_q.reg_control + 0x000C);
+}
+
+static inline void pt_core_enable_queue_interrupts(struct pt_device *pt)
+{
+ iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C);
+}
#endif
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 6078cc81892e..e7034f6f3994 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -1365,10 +1365,17 @@ static int pxad_probe(struct platform_device *op)
of_id = of_match_device(pxad_dt_ids, &op->dev);
if (of_id) {
- of_property_read_u32(op->dev.of_node, "#dma-channels",
- &dma_channels);
- ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
+ /* Parse new and deprecated dma-channels properties */
+ if (of_property_read_u32(op->dev.of_node, "dma-channels",
+ &dma_channels))
+ of_property_read_u32(op->dev.of_node, "#dma-channels",
+ &dma_channels);
+ /* Parse new and deprecated dma-requests properties */
+ ret = of_property_read_u32(op->dev.of_node, "dma-requests",
&nb_requestors);
+ if (ret)
+ ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
+ &nb_requestors);
if (ret) {
dev_warn(pdev->slave.dev,
"#dma-requests set to default 32 as missing in OF: %d",
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 94f3648f7483..8f0c9c4e2efd 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -1754,10 +1754,14 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
tre->dword[2] = u32_encode_bits(spi->rx_len, TRE_RX_LEN);
tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE);
- if (spi->cmd == SPI_RX)
+ if (spi->cmd == SPI_RX) {
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB);
- else
+ } else if (spi->cmd == SPI_TX) {
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
+ } else { /* SPI_DUPLEX */
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK);
+ }
}
/* create the dma tre */
@@ -2148,6 +2152,7 @@ static int gpi_probe(struct platform_device *pdev)
{
struct gpi_dev *gpi_dev;
unsigned int i;
+ u32 ee_offset;
int ret;
gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL);
@@ -2175,6 +2180,9 @@ static int gpi_probe(struct platform_device *pdev)
return ret;
}
+ ee_offset = (uintptr_t)device_get_match_data(gpi_dev->dev);
+ gpi_dev->ee_base = gpi_dev->ee_base - ee_offset;
+
gpi_dev->ev_factor = EV_FACTOR;
ret = dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(64));
@@ -2278,9 +2286,12 @@ static int gpi_probe(struct platform_device *pdev)
}
static const struct of_device_id gpi_of_match[] = {
- { .compatible = "qcom,sdm845-gpi-dma" },
- { .compatible = "qcom,sm8150-gpi-dma" },
- { .compatible = "qcom,sm8250-gpi-dma" },
+ { .compatible = "qcom,sc7280-gpi-dma", .data = (void *)0x10000 },
+ { .compatible = "qcom,sdm845-gpi-dma", .data = (void *)0x0 },
+ { .compatible = "qcom,sm8150-gpi-dma", .data = (void *)0x0 },
+ { .compatible = "qcom,sm8250-gpi-dma", .data = (void *)0x0 },
+ { .compatible = "qcom,sm8350-gpi-dma", .data = (void *)0x10000 },
+ { .compatible = "qcom,sm8450-gpi-dma", .data = (void *)0x10000 },
{ },
};
MODULE_DEVICE_TABLE(of, gpi_of_match);
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 51587cf8196b..210f1a9eb441 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -431,6 +431,7 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
struct hidma_desc *mdesc = NULL;
struct hidma_dev *mdma = mchan->dmadev;
unsigned long irqflags;
+ u64 byte_pattern, fill_pattern;
/* Get free descriptor */
spin_lock_irqsave(&mchan->lock, irqflags);
@@ -443,9 +444,19 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
if (!mdesc)
return NULL;
+ byte_pattern = (char)value;
+ fill_pattern = (byte_pattern << 56) |
+ (byte_pattern << 48) |
+ (byte_pattern << 40) |
+ (byte_pattern << 32) |
+ (byte_pattern << 24) |
+ (byte_pattern << 16) |
+ (byte_pattern << 8) |
+ byte_pattern;
+
mdesc->desc.flags = flags;
hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
- value, dest, len, flags,
+ fill_pattern, dest, len, flags,
HIDMA_TRE_MEMSET);
/* Place descriptor in prepared list */
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index f12606aeff87..db5a4ef76077 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -482,23 +482,30 @@ static void sf_pdma_setup_chans(struct sf_pdma *pdma)
static int sf_pdma_probe(struct platform_device *pdev)
{
struct sf_pdma *pdma;
- struct sf_pdma_chan *chan;
struct resource *res;
- int len, chans;
- int ret;
+ int ret, n_chans;
const enum dma_slave_buswidth widths =
DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES |
DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES |
DMA_SLAVE_BUSWIDTH_64_BYTES;
- chans = PDMA_NR_CH;
- len = sizeof(*pdma) + sizeof(*chan) * chans;
- pdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ ret = of_property_read_u32(pdev->dev.of_node, "dma-channels", &n_chans);
+ if (ret) {
+ /* backwards-compatibility for no dma-channels property */
+ dev_dbg(&pdev->dev, "set number of channels to default value: 4\n");
+ n_chans = PDMA_MAX_NR_CH;
+ } else if (n_chans > PDMA_MAX_NR_CH) {
+ dev_err(&pdev->dev, "the number of channels exceeds the maximum\n");
+ return -EINVAL;
+ }
+
+ pdma = devm_kzalloc(&pdev->dev, struct_size(pdma, chans, n_chans),
+ GFP_KERNEL);
if (!pdma)
return -ENOMEM;
- pdma->n_chans = chans;
+ pdma->n_chans = n_chans;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pdma->membase = devm_ioremap_resource(&pdev->dev, res);
@@ -556,7 +563,7 @@ static int sf_pdma_remove(struct platform_device *pdev)
struct sf_pdma_chan *ch;
int i;
- for (i = 0; i < PDMA_NR_CH; i++) {
+ for (i = 0; i < pdma->n_chans; i++) {
ch = &pdma->chans[i];
devm_free_irq(&pdev->dev, ch->txirq, ch);
@@ -574,6 +581,7 @@ static int sf_pdma_remove(struct platform_device *pdev)
static const struct of_device_id sf_pdma_dt_ids[] = {
{ .compatible = "sifive,fu540-c000-pdma" },
+ { .compatible = "sifive,pdma0" },
{},
};
MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids);
diff --git a/drivers/dma/sf-pdma/sf-pdma.h b/drivers/dma/sf-pdma/sf-pdma.h
index 0c20167b097d..dcb3687bd5da 100644
--- a/drivers/dma/sf-pdma/sf-pdma.h
+++ b/drivers/dma/sf-pdma/sf-pdma.h
@@ -22,11 +22,7 @@
#include "../dmaengine.h"
#include "../virt-dma.h"
-#define PDMA_NR_CH 4
-
-#if (PDMA_NR_CH != 4)
-#error "Please define PDMA_NR_CH to 4"
-#endif
+#define PDMA_MAX_NR_CH 4
#define PDMA_BASE_ADDR 0x3000000
#define PDMA_CHAN_OFFSET 0x1000
@@ -118,7 +114,7 @@ struct sf_pdma {
void __iomem *membase;
void __iomem *mappedbase;
u32 n_chans;
- struct sf_pdma_chan chans[PDMA_NR_CH];
+ struct sf_pdma_chan chans[];
};
#endif /* _SF_PDMA_H */
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index b35d705f79e7..c0b2997ab7fd 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -50,7 +50,7 @@ config RENESAS_USB_DMAC
config RZ_DMAC
tristate "Renesas RZ/{G2L,V2L} DMA Controller"
- depends on ARCH_R9A07G044 || ARCH_R9A07G054 || COMPILE_TEST
+ depends on ARCH_RZG2L || COMPILE_TEST
select RENESAS_DMA
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 7f158ef5672d..2138b80435ab 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -1117,7 +1117,11 @@ static int sprd_dma_probe(struct platform_device *pdev)
u32 chn_count;
int ret, i;
- ret = device_property_read_u32(&pdev->dev, "#dma-channels", &chn_count);
+ /* Parse new and deprecated dma-channels properties */
+ ret = device_property_read_u32(&pdev->dev, "dma-channels", &chn_count);
+ if (ret)
+ ret = device_property_read_u32(&pdev->dev, "#dma-channels",
+ &chn_count);
if (ret) {
dev_err(&pdev->dev, "get dma channels count failed\n");
return ret;
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index d2365fab1b7a..adb25a11c70f 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -208,6 +208,7 @@ struct stm32_dma_chan {
u32 threshold;
u32 mem_burst;
u32 mem_width;
+ enum dma_status status;
};
struct stm32_dma_device {
@@ -485,6 +486,7 @@ static void stm32_dma_stop(struct stm32_dma_chan *chan)
}
chan->busy = false;
+ chan->status = DMA_COMPLETE;
}
static int stm32_dma_terminate_all(struct dma_chan *c)
@@ -535,6 +537,13 @@ static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr);
}
+static void stm32_dma_sg_inc(struct stm32_dma_chan *chan)
+{
+ chan->next_sg++;
+ if (chan->desc->cyclic && (chan->next_sg == chan->desc->num_sgs))
+ chan->next_sg = 0;
+}
+
static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan);
static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
@@ -575,7 +584,7 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar);
stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr);
- chan->next_sg++;
+ stm32_dma_sg_inc(chan);
/* Clear interrupt status if it is there */
status = stm32_dma_irq_status(chan);
@@ -588,11 +597,11 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
stm32_dma_dump_reg(chan);
/* Start DMA */
+ chan->busy = true;
+ chan->status = DMA_IN_PROGRESS;
reg->dma_scr |= STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
- chan->busy = true;
-
dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
}
@@ -605,41 +614,131 @@ static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
id = chan->id;
dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
- if (dma_scr & STM32_DMA_SCR_DBM) {
- if (chan->next_sg == chan->desc->num_sgs)
- chan->next_sg = 0;
+ sg_req = &chan->desc->sg_req[chan->next_sg];
- sg_req = &chan->desc->sg_req[chan->next_sg];
+ if (dma_scr & STM32_DMA_SCR_CT) {
+ dma_sm0ar = sg_req->chan_reg.dma_sm0ar;
+ stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar);
+ dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n",
+ stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)));
+ } else {
+ dma_sm1ar = sg_req->chan_reg.dma_sm1ar;
+ stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar);
+ dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n",
+ stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)));
+ }
+}
- if (dma_scr & STM32_DMA_SCR_CT) {
- dma_sm0ar = sg_req->chan_reg.dma_sm0ar;
- stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar);
- dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n",
- stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)));
- } else {
- dma_sm1ar = sg_req->chan_reg.dma_sm1ar;
- stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar);
- dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n",
- stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)));
- }
+static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan)
+{
+ struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+ u32 dma_scr;
+
+ /*
+ * Read and store current remaining data items and peripheral/memory addresses to be
+ * updated on resume
+ */
+ dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
+ /*
+ * Transfer can be paused while between a previous resume and reconfiguration on transfer
+ * complete. If transfer is cyclic and CIRC and DBM have been deactivated for resume, need
+ * to set it here in SCR backup to ensure a good reconfiguration on transfer complete.
+ */
+ if (chan->desc && chan->desc->cyclic) {
+ if (chan->desc->num_sgs == 1)
+ dma_scr |= STM32_DMA_SCR_CIRC;
+ else
+ dma_scr |= STM32_DMA_SCR_DBM;
+ }
+ chan->chan_reg.dma_scr = dma_scr;
+
+ /*
+ * Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt, otherwise
+ * on resume NDTR autoreload value will be wrong (lower than the initial period length)
+ */
+ if (chan->desc && chan->desc->cyclic) {
+ dma_scr &= ~(STM32_DMA_SCR_DBM | STM32_DMA_SCR_CIRC);
+ stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
+ }
+
+ chan->chan_reg.dma_sndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
+
+ dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
+}
+
+static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan)
+{
+ struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+ struct stm32_dma_sg_req *sg_req;
+ u32 dma_scr, status, id;
+
+ id = chan->id;
+ dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
+
+ /* Clear interrupt status if it is there */
+ status = stm32_dma_irq_status(chan);
+ if (status)
+ stm32_dma_irq_clear(chan, status);
+
+ if (!chan->next_sg)
+ sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1];
+ else
+ sg_req = &chan->desc->sg_req[chan->next_sg - 1];
+
+ /* Reconfigure NDTR with the initial value */
+ stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), sg_req->chan_reg.dma_sndtr);
+
+ /* Restore SPAR */
+ stm32_dma_write(dmadev, STM32_DMA_SPAR(id), sg_req->chan_reg.dma_spar);
+
+ /* Restore SM0AR/SM1AR whatever DBM/CT as they may have been modified */
+ stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), sg_req->chan_reg.dma_sm0ar);
+ stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), sg_req->chan_reg.dma_sm1ar);
+
+ /* Reactivate CIRC/DBM if needed */
+ if (chan->chan_reg.dma_scr & STM32_DMA_SCR_DBM) {
+ dma_scr |= STM32_DMA_SCR_DBM;
+ /* Restore CT */
+ if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CT)
+ dma_scr &= ~STM32_DMA_SCR_CT;
+ else
+ dma_scr |= STM32_DMA_SCR_CT;
+ } else if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CIRC) {
+ dma_scr |= STM32_DMA_SCR_CIRC;
}
+ stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
+
+ stm32_dma_configure_next_sg(chan);
+
+ stm32_dma_dump_reg(chan);
+
+ dma_scr |= STM32_DMA_SCR_EN;
+ stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
+
+ dev_dbg(chan2dev(chan), "vchan %pK: reconfigured after pause/resume\n", &chan->vchan);
}
-static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan)
+static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
{
- if (chan->desc) {
- if (chan->desc->cyclic) {
- vchan_cyclic_callback(&chan->desc->vdesc);
- chan->next_sg++;
+ if (!chan->desc)
+ return;
+
+ if (chan->desc->cyclic) {
+ vchan_cyclic_callback(&chan->desc->vdesc);
+ stm32_dma_sg_inc(chan);
+ /* cyclic while CIRC/DBM disable => post resume reconfiguration needed */
+ if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM)))
+ stm32_dma_post_resume_reconfigure(chan);
+ else if (scr & STM32_DMA_SCR_DBM)
stm32_dma_configure_next_sg(chan);
- } else {
- chan->busy = false;
- if (chan->next_sg == chan->desc->num_sgs) {
- vchan_cookie_complete(&chan->desc->vdesc);
- chan->desc = NULL;
- }
- stm32_dma_start_transfer(chan);
+ } else {
+ chan->busy = false;
+ chan->status = DMA_COMPLETE;
+ if (chan->next_sg == chan->desc->num_sgs) {
+ vchan_cookie_complete(&chan->desc->vdesc);
+ chan->desc = NULL;
}
+ stm32_dma_start_transfer(chan);
}
}
@@ -675,8 +774,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
if (status & STM32_DMA_TCI) {
stm32_dma_irq_clear(chan, STM32_DMA_TCI);
- if (scr & STM32_DMA_SCR_TCIE)
- stm32_dma_handle_chan_done(chan);
+ if (scr & STM32_DMA_SCR_TCIE) {
+ if (chan->status == DMA_PAUSED && !(scr & STM32_DMA_SCR_EN))
+ stm32_dma_handle_chan_paused(chan);
+ else
+ stm32_dma_handle_chan_done(chan, scr);
+ }
status &= ~STM32_DMA_TCI;
}
@@ -711,6 +814,107 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
+static int stm32_dma_pause(struct dma_chan *c)
+{
+ struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+ unsigned long flags;
+ int ret;
+
+ if (chan->status != DMA_IN_PROGRESS)
+ return -EPERM;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ ret = stm32_dma_disable_chan(chan);
+ /*
+ * A transfer complete flag is set to indicate the end of transfer due to the stream
+ * interruption, so wait for interrupt
+ */
+ if (!ret)
+ chan->status = DMA_PAUSED;
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ return ret;
+}
+
+static int stm32_dma_resume(struct dma_chan *c)
+{
+ struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+ struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+ struct stm32_dma_chan_reg chan_reg = chan->chan_reg;
+ u32 id = chan->id, scr, ndtr, offset, spar, sm0ar, sm1ar;
+ struct stm32_dma_sg_req *sg_req;
+ unsigned long flags;
+
+ if (chan->status != DMA_PAUSED)
+ return -EPERM;
+
+ scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
+ if (WARN_ON(scr & STM32_DMA_SCR_EN))
+ return -EPERM;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+
+ /* sg_reg[prev_sg] contains original ndtr, sm0ar and sm1ar before pausing the transfer */
+ if (!chan->next_sg)
+ sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1];
+ else
+ sg_req = &chan->desc->sg_req[chan->next_sg - 1];
+
+ ndtr = sg_req->chan_reg.dma_sndtr;
+ offset = (ndtr - chan_reg.dma_sndtr) << STM32_DMA_SCR_PSIZE_GET(chan_reg.dma_scr);
+ spar = sg_req->chan_reg.dma_spar;
+ sm0ar = sg_req->chan_reg.dma_sm0ar;
+ sm1ar = sg_req->chan_reg.dma_sm1ar;
+
+ /*
+ * The peripheral and/or memory addresses have to be updated in order to adjust the
+ * address pointers. Need to check increment.
+ */
+ if (chan_reg.dma_scr & STM32_DMA_SCR_PINC)
+ stm32_dma_write(dmadev, STM32_DMA_SPAR(id), spar + offset);
+ else
+ stm32_dma_write(dmadev, STM32_DMA_SPAR(id), spar);
+
+ if (!(chan_reg.dma_scr & STM32_DMA_SCR_MINC))
+ offset = 0;
+
+ /*
+ * In case of DBM, the current target could be SM1AR.
+ * Need to temporarily deactivate CIRC/DBM to finish the current transfer, so
+ * SM0AR becomes the current target and must be updated with SM1AR + offset if CT=1.
+ */
+ if ((chan_reg.dma_scr & STM32_DMA_SCR_DBM) && (chan_reg.dma_scr & STM32_DMA_SCR_CT))
+ stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), sm1ar + offset);
+ else
+ stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), sm0ar + offset);
+
+ /* NDTR must be restored otherwise internal HW counter won't be correctly reset */
+ stm32_dma_write(dmadev, STM32_DMA_SNDTR(id), chan_reg.dma_sndtr);
+
+ /*
+ * Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt,
+ * otherwise NDTR autoreload value will be wrong (lower than the initial period length)
+ */
+ if (chan_reg.dma_scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM))
+ chan_reg.dma_scr &= ~(STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM);
+
+ if (chan_reg.dma_scr & STM32_DMA_SCR_DBM)
+ stm32_dma_configure_next_sg(chan);
+
+ stm32_dma_dump_reg(chan);
+
+ /* The stream may then be re-enabled to restart transfer from the point it was stopped */
+ chan->status = DMA_IN_PROGRESS;
+ chan_reg.dma_scr |= STM32_DMA_SCR_EN;
+ stm32_dma_write(dmadev, STM32_DMA_SCR(id), chan_reg.dma_scr);
+
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan);
+
+ return 0;
+}
+
static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
enum dma_transfer_direction direction,
enum dma_slave_buswidth *buswidth,
@@ -978,10 +1182,12 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
}
/* Enable Circular mode or double buffer mode */
- if (buf_len == period_len)
+ if (buf_len == period_len) {
chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC;
- else
+ } else {
chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
+ chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT;
+ }
/* Clear periph ctrl if client set it */
chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
@@ -1091,24 +1297,36 @@ static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan)
{
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
struct stm32_dma_sg_req *sg_req;
- u32 dma_scr, dma_smar, id;
+ u32 dma_scr, dma_smar, id, period_len;
id = chan->id;
dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
+ /* In cyclic CIRC but not DBM, CT is not used */
if (!(dma_scr & STM32_DMA_SCR_DBM))
return true;
sg_req = &chan->desc->sg_req[chan->next_sg];
+ period_len = sg_req->len;
+ /* DBM - take care of a previous pause/resume not yet post reconfigured */
if (dma_scr & STM32_DMA_SCR_CT) {
dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id));
- return (dma_smar == sg_req->chan_reg.dma_sm0ar);
+ /*
+ * If transfer has been pause/resumed,
+ * SM0AR is in the range of [SM0AR:SM0AR+period_len]
+ */
+ return (dma_smar >= sg_req->chan_reg.dma_sm0ar &&
+ dma_smar < sg_req->chan_reg.dma_sm0ar + period_len);
}
dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id));
-
- return (dma_smar == sg_req->chan_reg.dma_sm1ar);
+ /*
+ * If transfer has been pause/resumed,
+ * SM1AR is in the range of [SM1AR:SM1AR+period_len]
+ */
+ return (dma_smar >= sg_req->chan_reg.dma_sm1ar &&
+ dma_smar < sg_req->chan_reg.dma_sm1ar + period_len);
}
static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
@@ -1148,7 +1366,7 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
residue = stm32_dma_get_remaining_bytes(chan);
- if (!stm32_dma_is_current_sg(chan)) {
+ if (chan->desc->cyclic && !stm32_dma_is_current_sg(chan)) {
n_sg++;
if (n_sg == chan->desc->num_sgs)
n_sg = 0;
@@ -1188,7 +1406,12 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
u32 residue = 0;
status = dma_cookie_status(c, cookie, state);
- if (status == DMA_COMPLETE || !state)
+ if (status == DMA_COMPLETE)
+ return status;
+
+ status = chan->status;
+
+ if (!state)
return status;
spin_lock_irqsave(&chan->vchan.lock, flags);
@@ -1377,6 +1600,8 @@ static int stm32_dma_probe(struct platform_device *pdev)
dd->device_prep_slave_sg = stm32_dma_prep_slave_sg;
dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic;
dd->device_config = stm32_dma_slave_config;
+ dd->device_pause = stm32_dma_pause;
+ dd->device_resume = stm32_dma_resume;
dd->device_terminate_all = stm32_dma_terminate_all;
dd->device_synchronize = stm32_dma_synchronize;
dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
@@ -1482,7 +1707,7 @@ static int stm32_dma_runtime_resume(struct device *dev)
#endif
#ifdef CONFIG_PM_SLEEP
-static int stm32_dma_suspend(struct device *dev)
+static int stm32_dma_pm_suspend(struct device *dev)
{
struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
int id, ret, scr;
@@ -1506,14 +1731,14 @@ static int stm32_dma_suspend(struct device *dev)
return 0;
}
-static int stm32_dma_resume(struct device *dev)
+static int stm32_dma_pm_resume(struct device *dev)
{
return pm_runtime_force_resume(dev);
}
#endif
static const struct dev_pm_ops stm32_dma_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_suspend, stm32_dma_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_pm_suspend, stm32_dma_pm_resume)
SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend,
stm32_dma_runtime_resume, NULL)
};
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index d5d55732adba..eee0c5aa5fb5 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -267,7 +267,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
ret = PTR_ERR(rst);
if (ret == -EPROBE_DEFER)
goto err_clk;
- } else {
+ } else if (count > 1) { /* Don't reset if there is only one dma-master */
reset_control_assert(rst);
udelay(2);
reset_control_deassert(rst);
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 6f57ff0e7b37..caf0cce8f528 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -34,7 +34,6 @@
#include "virt-dma.h"
#define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */
-#define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */
/* MDMA Channel x interrupt/status register */
#define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */
@@ -73,6 +72,7 @@
#define STM32_MDMA_CCR_WEX BIT(14)
#define STM32_MDMA_CCR_HEX BIT(13)
#define STM32_MDMA_CCR_BEX BIT(12)
+#define STM32_MDMA_CCR_SM BIT(8)
#define STM32_MDMA_CCR_PL_MASK GENMASK(7, 6)
#define STM32_MDMA_CCR_PL(n) FIELD_PREP(STM32_MDMA_CCR_PL_MASK, (n))
#define STM32_MDMA_CCR_TCIE BIT(5)
@@ -168,7 +168,7 @@
#define STM32_MDMA_MAX_BUF_LEN 128
#define STM32_MDMA_MAX_BLOCK_LEN 65536
-#define STM32_MDMA_MAX_CHANNELS 63
+#define STM32_MDMA_MAX_CHANNELS 32
#define STM32_MDMA_MAX_REQUESTS 256
#define STM32_MDMA_MAX_BURST 128
#define STM32_MDMA_VERY_HIGH_PRIORITY 0x3
@@ -248,6 +248,7 @@ struct stm32_mdma_device {
u32 nr_channels;
u32 nr_requests;
u32 nr_ahb_addr_masks;
+ u32 chan_reserved;
struct stm32_mdma_chan chan[STM32_MDMA_MAX_CHANNELS];
u32 ahb_addr_masks[];
};
@@ -1317,26 +1318,16 @@ static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
{
struct stm32_mdma_device *dmadev = devid;
- struct stm32_mdma_chan *chan = devid;
+ struct stm32_mdma_chan *chan;
u32 reg, id, ccr, ien, status;
/* Find out which channel generates the interrupt */
status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0);
- if (status) {
- id = __ffs(status);
- } else {
- status = readl_relaxed(dmadev->base + STM32_MDMA_GISR1);
- if (!status) {
- dev_dbg(mdma2dev(dmadev), "spurious it\n");
- return IRQ_NONE;
- }
- id = __ffs(status);
- /*
- * As GISR0 provides status for channel id from 0 to 31,
- * so GISR1 provides status for channel id from 32 to 62
- */
- id += 32;
+ if (!status) {
+ dev_dbg(mdma2dev(dmadev), "spurious it\n");
+ return IRQ_NONE;
}
+ id = __ffs(status);
chan = &dmadev->chan[id];
if (!chan) {
@@ -1354,9 +1345,12 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
if (!(status & ien)) {
spin_unlock(&chan->vchan.lock);
- dev_warn(chan2dev(chan),
- "spurious it (status=0x%04x, ien=0x%04x)\n",
- status, ien);
+ if (chan->busy)
+ dev_warn(chan2dev(chan),
+ "spurious it (status=0x%04x, ien=0x%04x)\n", status, ien);
+ else
+ dev_dbg(chan2dev(chan),
+ "spurious it (status=0x%04x, ien=0x%04x)\n", status, ien);
return IRQ_NONE;
}
@@ -1456,10 +1450,23 @@ static void stm32_mdma_free_chan_resources(struct dma_chan *c)
chan->desc_pool = NULL;
}
+static bool stm32_mdma_filter_fn(struct dma_chan *c, void *fn_param)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+
+ /* Check if chan is marked Secure */
+ if (dmadev->chan_reserved & BIT(chan->id))
+ return false;
+
+ return true;
+}
+
static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct stm32_mdma_device *dmadev = ofdma->of_dma_data;
+ dma_cap_mask_t mask = dmadev->ddev.cap_mask;
struct stm32_mdma_chan *chan;
struct dma_chan *c;
struct stm32_mdma_chan_config config;
@@ -1485,7 +1492,7 @@ static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec,
return NULL;
}
- c = dma_get_any_slave_channel(&dmadev->ddev);
+ c = __dma_request_channel(&mask, stm32_mdma_filter_fn, &config, ofdma->of_node);
if (!c) {
dev_err(mdma2dev(dmadev), "No more channels available\n");
return NULL;
@@ -1615,6 +1622,10 @@ static int stm32_mdma_probe(struct platform_device *pdev)
for (i = 0; i < dmadev->nr_channels; i++) {
chan = &dmadev->chan[i];
chan->id = i;
+
+ if (stm32_mdma_read(dmadev, STM32_MDMA_CCR(i)) & STM32_MDMA_CCR_SM)
+ dmadev->chan_reserved |= BIT(i);
+
chan->vchan.desc_free = stm32_mdma_desc_free;
vchan_init(&chan->vchan, dd);
}
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 5cadd4d2b824..b7557f437936 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -90,6 +90,14 @@
#define DMA_CHAN_CUR_PARA 0x1c
+/*
+ * LLI address mangling
+ *
+ * The LLI link physical address is also mangled, but we avoid dealing
+ * with that by allocating LLIs from the DMA32 zone.
+ */
+#define SRC_HIGH_ADDR(x) (((x) & 0x3U) << 16)
+#define DST_HIGH_ADDR(x) (((x) & 0x3U) << 18)
/*
* Various hardware related defines
@@ -132,6 +140,7 @@ struct sun6i_dma_config {
u32 dst_burst_lengths;
u32 src_addr_widths;
u32 dst_addr_widths;
+ bool has_high_addr;
bool has_mbus_clk;
};
@@ -241,9 +250,7 @@ static inline void sun6i_dma_dump_com_regs(struct sun6i_dma_dev *sdev)
static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev,
struct sun6i_pchan *pchan)
{
- phys_addr_t reg = virt_to_phys(pchan->base);
-
- dev_dbg(sdev->slave.dev, "Chan %d reg: %pa\n"
+ dev_dbg(sdev->slave.dev, "Chan %d reg:\n"
"\t___en(%04x): \t0x%08x\n"
"\tpause(%04x): \t0x%08x\n"
"\tstart(%04x): \t0x%08x\n"
@@ -252,7 +259,7 @@ static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev,
"\t__dst(%04x): \t0x%08x\n"
"\tcount(%04x): \t0x%08x\n"
"\t_para(%04x): \t0x%08x\n\n",
- pchan->idx, &reg,
+ pchan->idx,
DMA_CHAN_ENABLE,
readl(pchan->base + DMA_CHAN_ENABLE),
DMA_CHAN_PAUSE,
@@ -385,17 +392,16 @@ static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev,
}
static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan,
- struct sun6i_dma_lli *lli)
+ struct sun6i_dma_lli *v_lli,
+ dma_addr_t p_lli)
{
- phys_addr_t p_lli = virt_to_phys(lli);
-
dev_dbg(chan2dev(&vchan->vc.chan),
- "\n\tdesc: p - %pa v - 0x%p\n"
+ "\n\tdesc:\tp - %pad v - 0x%p\n"
"\t\tc - 0x%08x s - 0x%08x d - 0x%08x\n"
"\t\tl - 0x%08x p - 0x%08x n - 0x%08x\n",
- &p_lli, lli,
- lli->cfg, lli->src, lli->dst,
- lli->len, lli->para, lli->p_lli_next);
+ &p_lli, v_lli,
+ v_lli->cfg, v_lli->src, v_lli->dst,
+ v_lli->len, v_lli->para, v_lli->p_lli_next);
}
static void sun6i_dma_free_desc(struct virt_dma_desc *vd)
@@ -445,7 +451,7 @@ static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
pchan->desc = to_sun6i_desc(&desc->tx);
pchan->done = NULL;
- sun6i_dma_dump_lli(vchan, pchan->desc->v_lli);
+ sun6i_dma_dump_lli(vchan, pchan->desc->v_lli, pchan->desc->p_lli);
irq_reg = pchan->idx / DMA_IRQ_CHAN_NR;
irq_offset = pchan->idx % DMA_IRQ_CHAN_NR;
@@ -626,6 +632,18 @@ static int set_config(struct sun6i_dma_dev *sdev,
return 0;
}
+static inline void sun6i_dma_set_addr(struct sun6i_dma_dev *sdev,
+ struct sun6i_dma_lli *v_lli,
+ dma_addr_t src, dma_addr_t dst)
+{
+ v_lli->src = lower_32_bits(src);
+ v_lli->dst = lower_32_bits(dst);
+
+ if (sdev->cfg->has_high_addr)
+ v_lli->para |= SRC_HIGH_ADDR(upper_32_bits(src)) |
+ DST_HIGH_ADDR(upper_32_bits(dst));
+}
+
static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
@@ -648,16 +666,15 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
if (!txd)
return NULL;
- v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
+ v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli);
if (!v_lli) {
dev_err(sdev->slave.dev, "Failed to alloc lli memory\n");
goto err_txd_free;
}
- v_lli->src = src;
- v_lli->dst = dest;
v_lli->len = len;
v_lli->para = NORMAL_WAIT;
+ sun6i_dma_set_addr(sdev, v_lli, src, dest);
burst = convert_burst(8);
width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES);
@@ -670,7 +687,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
sun6i_dma_lli_add(NULL, v_lli, p_lli, txd);
- sun6i_dma_dump_lli(vchan, v_lli);
+ sun6i_dma_dump_lli(vchan, v_lli, p_lli);
return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
@@ -708,7 +725,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
return NULL;
for_each_sg(sgl, sg, sg_len, i) {
- v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
+ v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli);
if (!v_lli)
goto err_lli_free;
@@ -716,8 +733,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
v_lli->para = NORMAL_WAIT;
if (dir == DMA_MEM_TO_DEV) {
- v_lli->src = sg_dma_address(sg);
- v_lli->dst = sconfig->dst_addr;
+ sun6i_dma_set_addr(sdev, v_lli,
+ sg_dma_address(sg),
+ sconfig->dst_addr);
v_lli->cfg = lli_cfg;
sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port);
sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE);
@@ -729,8 +747,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
sg_dma_len(sg), flags);
} else {
- v_lli->src = sconfig->src_addr;
- v_lli->dst = sg_dma_address(sg);
+ sun6i_dma_set_addr(sdev, v_lli,
+ sconfig->src_addr,
+ sg_dma_address(sg));
v_lli->cfg = lli_cfg;
sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM);
sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE);
@@ -746,14 +765,16 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
}
dev_dbg(chan2dev(chan), "First: %pad\n", &txd->p_lli);
- for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
- sun6i_dma_dump_lli(vchan, prev);
+ for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli;
+ p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next)
+ sun6i_dma_dump_lli(vchan, v_lli, p_lli);
return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
err_lli_free:
- for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
- dma_pool_free(sdev->pool, prev, virt_to_phys(prev));
+ for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli;
+ p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next)
+ dma_pool_free(sdev->pool, v_lli, p_lli);
kfree(txd);
return NULL;
}
@@ -787,7 +808,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic(
return NULL;
for (i = 0; i < periods; i++) {
- v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
+ v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli);
if (!v_lli) {
dev_err(sdev->slave.dev, "Failed to alloc lli memory\n");
goto err_lli_free;
@@ -797,14 +818,16 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic(
v_lli->para = NORMAL_WAIT;
if (dir == DMA_MEM_TO_DEV) {
- v_lli->src = buf_addr + period_len * i;
- v_lli->dst = sconfig->dst_addr;
+ sun6i_dma_set_addr(sdev, v_lli,
+ buf_addr + period_len * i,
+ sconfig->dst_addr);
v_lli->cfg = lli_cfg;
sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port);
sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE);
} else {
- v_lli->src = sconfig->src_addr;
- v_lli->dst = buf_addr + period_len * i;
+ sun6i_dma_set_addr(sdev, v_lli,
+ sconfig->src_addr,
+ buf_addr + period_len * i);
v_lli->cfg = lli_cfg;
sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM);
sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE);
@@ -820,8 +843,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic(
return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
err_lli_free:
- for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
- dma_pool_free(sdev->pool, prev, virt_to_phys(prev));
+ for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli;
+ p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next)
+ dma_pool_free(sdev->pool, v_lli, p_lli);
kfree(txd);
return NULL;
}
@@ -1174,8 +1198,6 @@ static struct sun6i_dma_config sun50i_a64_dma_cfg = {
};
/*
- * TODO: Add support for more than 4g physical addressing.
- *
* The A100 binding uses the number of dma channels from the
* device tree node.
*/
@@ -1194,6 +1216,7 @@ static struct sun6i_dma_config sun50i_a100_dma_cfg = {
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
+ .has_high_addr = true,
.has_mbus_clk = true,
};
@@ -1248,6 +1271,7 @@ static const struct of_device_id sun6i_dma_match[] = {
{ .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg },
{ .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg },
{ .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg },
+ { .compatible = "allwinner,sun20i-d1-dma", .data = &sun50i_a100_dma_cfg },
{ .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg },
{ .compatible = "allwinner,sun50i-a100-dma", .data = &sun50i_a100_dma_cfg },
{ .compatible = "allwinner,sun50i-h6-dma", .data = &sun50i_h6_dma_cfg },
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
new file mode 100644
index 000000000000..05cd451f541d
--- /dev/null
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -0,0 +1,1498 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * DMA driver for NVIDIA Tegra GPC DMA controller.
+ *
+ * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <dt-bindings/memory/tegra186-mc.h>
+#include "virt-dma.h"
+
+/* CSR register */
+#define TEGRA_GPCDMA_CHAN_CSR 0x00
+#define TEGRA_GPCDMA_CSR_ENB BIT(31)
+#define TEGRA_GPCDMA_CSR_IE_EOC BIT(30)
+#define TEGRA_GPCDMA_CSR_ONCE BIT(27)
+
+#define TEGRA_GPCDMA_CSR_FC_MODE GENMASK(25, 24)
+#define TEGRA_GPCDMA_CSR_FC_MODE_NO_MMIO \
+ FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 0)
+#define TEGRA_GPCDMA_CSR_FC_MODE_ONE_MMIO \
+ FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 1)
+#define TEGRA_GPCDMA_CSR_FC_MODE_TWO_MMIO \
+ FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 2)
+#define TEGRA_GPCDMA_CSR_FC_MODE_FOUR_MMIO \
+ FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 3)
+
+#define TEGRA_GPCDMA_CSR_DMA GENMASK(23, 21)
+#define TEGRA_GPCDMA_CSR_DMA_IO2MEM_NO_FC \
+ FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 0)
+#define TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC \
+ FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 1)
+#define TEGRA_GPCDMA_CSR_DMA_MEM2IO_NO_FC \
+ FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 2)
+#define TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC \
+ FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 3)
+#define TEGRA_GPCDMA_CSR_DMA_MEM2MEM \
+ FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 4)
+#define TEGRA_GPCDMA_CSR_DMA_FIXED_PAT \
+ FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 6)
+
+#define TEGRA_GPCDMA_CSR_REQ_SEL_MASK GENMASK(20, 16)
+#define TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED \
+ FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, 4)
+#define TEGRA_GPCDMA_CSR_IRQ_MASK BIT(15)
+#define TEGRA_GPCDMA_CSR_WEIGHT GENMASK(13, 10)
+
+/* STATUS register */
+#define TEGRA_GPCDMA_CHAN_STATUS 0x004
+#define TEGRA_GPCDMA_STATUS_BUSY BIT(31)
+#define TEGRA_GPCDMA_STATUS_ISE_EOC BIT(30)
+#define TEGRA_GPCDMA_STATUS_PING_PONG BIT(28)
+#define TEGRA_GPCDMA_STATUS_DMA_ACTIVITY BIT(27)
+#define TEGRA_GPCDMA_STATUS_CHANNEL_PAUSE BIT(26)
+#define TEGRA_GPCDMA_STATUS_CHANNEL_RX BIT(25)
+#define TEGRA_GPCDMA_STATUS_CHANNEL_TX BIT(24)
+#define TEGRA_GPCDMA_STATUS_IRQ_INTR_STA BIT(23)
+#define TEGRA_GPCDMA_STATUS_IRQ_STA BIT(21)
+#define TEGRA_GPCDMA_STATUS_IRQ_TRIG_STA BIT(20)
+
+#define TEGRA_GPCDMA_CHAN_CSRE 0x008
+#define TEGRA_GPCDMA_CHAN_CSRE_PAUSE BIT(31)
+
+/* Source address */
+#define TEGRA_GPCDMA_CHAN_SRC_PTR 0x00C
+
+/* Destination address */
+#define TEGRA_GPCDMA_CHAN_DST_PTR 0x010
+
+/* High address pointer */
+#define TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR 0x014
+#define TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR GENMASK(7, 0)
+#define TEGRA_GPCDMA_HIGH_ADDR_DST_PTR GENMASK(23, 16)
+
+/* MC sequence register */
+#define TEGRA_GPCDMA_CHAN_MCSEQ 0x18
+#define TEGRA_GPCDMA_MCSEQ_DATA_SWAP BIT(31)
+#define TEGRA_GPCDMA_MCSEQ_REQ_COUNT GENMASK(30, 25)
+#define TEGRA_GPCDMA_MCSEQ_BURST GENMASK(24, 23)
+#define TEGRA_GPCDMA_MCSEQ_BURST_2 \
+ FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 0)
+#define TEGRA_GPCDMA_MCSEQ_BURST_16 \
+ FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 3)
+#define TEGRA_GPCDMA_MCSEQ_WRAP1 GENMASK(22, 20)
+#define TEGRA_GPCDMA_MCSEQ_WRAP0 GENMASK(19, 17)
+#define TEGRA_GPCDMA_MCSEQ_WRAP_NONE 0
+
+#define TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK GENMASK(13, 7)
+#define TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK GENMASK(6, 0)
+
+/* MMIO sequence register */
+#define TEGRA_GPCDMA_CHAN_MMIOSEQ 0x01c
+#define TEGRA_GPCDMA_MMIOSEQ_DBL_BUF BIT(31)
+#define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH GENMASK(30, 28)
+#define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8 \
+ FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 0)
+#define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16 \
+ FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 1)
+#define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32 \
+ FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 2)
+#define TEGRA_GPCDMA_MMIOSEQ_DATA_SWAP BIT(27)
+#define TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT 23
+#define TEGRA_GPCDMA_MMIOSEQ_BURST_MIN 2U
+#define TEGRA_GPCDMA_MMIOSEQ_BURST_MAX 32U
+#define TEGRA_GPCDMA_MMIOSEQ_BURST(bs) \
+ (GENMASK((fls(bs) - 2), 0) << TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT)
+#define TEGRA_GPCDMA_MMIOSEQ_MASTER_ID GENMASK(22, 19)
+#define TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD GENMASK(18, 16)
+#define TEGRA_GPCDMA_MMIOSEQ_MMIO_PROT GENMASK(8, 7)
+
+/* Channel WCOUNT */
+#define TEGRA_GPCDMA_CHAN_WCOUNT 0x20
+
+/* Transfer count */
+#define TEGRA_GPCDMA_CHAN_XFER_COUNT 0x24
+
+/* DMA byte count status */
+#define TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS 0x28
+
+/* Error Status Register */
+#define TEGRA_GPCDMA_CHAN_ERR_STATUS 0x30
+#define TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT 8
+#define TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK 0xF
+#define TEGRA_GPCDMA_CHAN_ERR_TYPE(err) ( \
+ ((err) >> TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT) & \
+ TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK)
+#define TEGRA_DMA_BM_FIFO_FULL_ERR 0xF
+#define TEGRA_DMA_PERIPH_FIFO_FULL_ERR 0xE
+#define TEGRA_DMA_PERIPH_ID_ERR 0xD
+#define TEGRA_DMA_STREAM_ID_ERR 0xC
+#define TEGRA_DMA_MC_SLAVE_ERR 0xB
+#define TEGRA_DMA_MMIO_SLAVE_ERR 0xA
+
+/* Fixed Pattern */
+#define TEGRA_GPCDMA_CHAN_FIXED_PATTERN 0x34
+
+#define TEGRA_GPCDMA_CHAN_TZ 0x38
+#define TEGRA_GPCDMA_CHAN_TZ_MMIO_PROT_1 BIT(0)
+#define TEGRA_GPCDMA_CHAN_TZ_MC_PROT_1 BIT(1)
+
+#define TEGRA_GPCDMA_CHAN_SPARE 0x3c
+#define TEGRA_GPCDMA_CHAN_SPARE_EN_LEGACY_FC BIT(16)
+
+/*
+ * If any burst is in flight and DMA paused then this is the time to complete
+ * on-flight burst and update DMA status register.
+ */
+#define TEGRA_GPCDMA_BURST_COMPLETE_TIME 20
+#define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 100
+
+/* Channel base address offset from GPCDMA base address */
+#define TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET 0x20000
+
+struct tegra_dma;
+struct tegra_dma_channel;
+
+/*
+ * tegra_dma_chip_data Tegra chip specific DMA data
+ * @nr_channels: Number of channels available in the controller.
+ * @channel_reg_size: Channel register size.
+ * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
+ * @hw_support_pause: DMA HW engine support pause of the channel.
+ */
+struct tegra_dma_chip_data {
+ bool hw_support_pause;
+ unsigned int nr_channels;
+ unsigned int channel_reg_size;
+ unsigned int max_dma_count;
+ int (*terminate)(struct tegra_dma_channel *tdc);
+};
+
+/* DMA channel registers */
+struct tegra_dma_channel_regs {
+ u32 csr;
+ u32 src_ptr;
+ u32 dst_ptr;
+ u32 high_addr_ptr;
+ u32 mc_seq;
+ u32 mmio_seq;
+ u32 wcount;
+ u32 fixed_pattern;
+};
+
+/*
+ * tegra_dma_sg_req: DMA request details to configure hardware. This
+ * contains the details for one transfer to configure DMA hw.
+ * The client's request for data transfer can be broken into multiple
+ * sub-transfer as per requester details and hw support. This sub transfer
+ * get added as an array in Tegra DMA desc which manages the transfer details.
+ */
+struct tegra_dma_sg_req {
+ unsigned int len;
+ struct tegra_dma_channel_regs ch_regs;
+};
+
+/*
+ * tegra_dma_desc: Tegra DMA descriptors which uses virt_dma_desc to
+ * manage client request and keep track of transfer status, callbacks
+ * and request counts etc.
+ */
+struct tegra_dma_desc {
+ bool cyclic;
+ unsigned int bytes_req;
+ unsigned int bytes_xfer;
+ unsigned int sg_idx;
+ unsigned int sg_count;
+ struct virt_dma_desc vd;
+ struct tegra_dma_channel *tdc;
+ struct tegra_dma_sg_req sg_req[];
+};
+
+/*
+ * tegra_dma_channel: Channel specific information
+ */
+struct tegra_dma_channel {
+ bool config_init;
+ char name[30];
+ enum dma_transfer_direction sid_dir;
+ int id;
+ int irq;
+ int slave_id;
+ struct tegra_dma *tdma;
+ struct virt_dma_chan vc;
+ struct tegra_dma_desc *dma_desc;
+ struct dma_slave_config dma_sconfig;
+ unsigned int stream_id;
+ unsigned long chan_base_offset;
+};
+
+/*
+ * tegra_dma: Tegra DMA specific information
+ */
+struct tegra_dma {
+ const struct tegra_dma_chip_data *chip_data;
+ unsigned long sid_m2d_reserved;
+ unsigned long sid_d2m_reserved;
+ void __iomem *base_addr;
+ struct device *dev;
+ struct dma_device dma_dev;
+ struct reset_control *rst;
+ struct tegra_dma_channel channels[];
+};
+
+static inline void tdc_write(struct tegra_dma_channel *tdc,
+ u32 reg, u32 val)
+{
+ writel_relaxed(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg);
+}
+
+static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
+{
+ return readl_relaxed(tdc->tdma->base_addr + tdc->chan_base_offset + reg);
+}
+
+static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
+{
+ return container_of(dc, struct tegra_dma_channel, vc.chan);
+}
+
+static inline struct tegra_dma_desc *vd_to_tegra_dma_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct tegra_dma_desc, vd);
+}
+
+static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
+{
+ return tdc->vc.chan.device->dev;
+}
+
+static void tegra_dma_dump_chan_regs(struct tegra_dma_channel *tdc)
+{
+ dev_dbg(tdc2dev(tdc), "DMA Channel %d name %s register dump:\n",
+ tdc->id, tdc->name);
+ dev_dbg(tdc2dev(tdc), "CSR %x STA %x CSRE %x SRC %x DST %x\n",
+ tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR),
+ tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS),
+ tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE),
+ tdc_read(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR),
+ tdc_read(tdc, TEGRA_GPCDMA_CHAN_DST_PTR)
+ );
+ dev_dbg(tdc2dev(tdc), "MCSEQ %x IOSEQ %x WCNT %x XFER %x BSTA %x\n",
+ tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ),
+ tdc_read(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ),
+ tdc_read(tdc, TEGRA_GPCDMA_CHAN_WCOUNT),
+ tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT),
+ tdc_read(tdc, TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS)
+ );
+ dev_dbg(tdc2dev(tdc), "DMA ERR_STA %x\n",
+ tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS));
+}
+
+static int tegra_dma_sid_reserve(struct tegra_dma_channel *tdc,
+ enum dma_transfer_direction direction)
+{
+ struct tegra_dma *tdma = tdc->tdma;
+ int sid = tdc->slave_id;
+
+ if (!is_slave_direction(direction))
+ return 0;
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ if (test_and_set_bit(sid, &tdma->sid_m2d_reserved)) {
+ dev_err(tdma->dev, "slave id already in use\n");
+ return -EINVAL;
+ }
+ break;
+ case DMA_DEV_TO_MEM:
+ if (test_and_set_bit(sid, &tdma->sid_d2m_reserved)) {
+ dev_err(tdma->dev, "slave id already in use\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ break;
+ }
+
+ tdc->sid_dir = direction;
+
+ return 0;
+}
+
+static void tegra_dma_sid_free(struct tegra_dma_channel *tdc)
+{
+ struct tegra_dma *tdma = tdc->tdma;
+ int sid = tdc->slave_id;
+
+ switch (tdc->sid_dir) {
+ case DMA_MEM_TO_DEV:
+ clear_bit(sid, &tdma->sid_m2d_reserved);
+ break;
+ case DMA_DEV_TO_MEM:
+ clear_bit(sid, &tdma->sid_d2m_reserved);
+ break;
+ default:
+ break;
+ }
+
+ tdc->sid_dir = DMA_TRANS_NONE;
+}
+
+static void tegra_dma_desc_free(struct virt_dma_desc *vd)
+{
+ kfree(container_of(vd, struct tegra_dma_desc, vd));
+}
+
+static int tegra_dma_slave_config(struct dma_chan *dc,
+ struct dma_slave_config *sconfig)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+
+ memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
+ tdc->config_init = true;
+
+ return 0;
+}
+
+static int tegra_dma_pause(struct tegra_dma_channel *tdc)
+{
+ int ret;
+ u32 val;
+
+ val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE);
+ val |= TEGRA_GPCDMA_CHAN_CSRE_PAUSE;
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val);
+
+ /* Wait until busy bit is de-asserted */
+ ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr +
+ tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS,
+ val,
+ !(val & TEGRA_GPCDMA_STATUS_BUSY),
+ TEGRA_GPCDMA_BURST_COMPLETE_TIME,
+ TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT);
+
+ if (ret) {
+ dev_err(tdc2dev(tdc), "DMA pause timed out\n");
+ tegra_dma_dump_chan_regs(tdc);
+ }
+
+ return ret;
+}
+
+static int tegra_dma_device_pause(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ unsigned long flags;
+ int ret;
+
+ if (!tdc->tdma->chip_data->hw_support_pause)
+ return -ENOSYS;
+
+ spin_lock_irqsave(&tdc->vc.lock, flags);
+ ret = tegra_dma_pause(tdc);
+ spin_unlock_irqrestore(&tdc->vc.lock, flags);
+
+ return ret;
+}
+
+static void tegra_dma_resume(struct tegra_dma_channel *tdc)
+{
+ u32 val;
+
+ val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE);
+ val &= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE;
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val);
+}
+
+static int tegra_dma_device_resume(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ unsigned long flags;
+
+ if (!tdc->tdma->chip_data->hw_support_pause)
+ return -ENOSYS;
+
+ spin_lock_irqsave(&tdc->vc.lock, flags);
+ tegra_dma_resume(tdc);
+ spin_unlock_irqrestore(&tdc->vc.lock, flags);
+
+ return 0;
+}
+
+static void tegra_dma_disable(struct tegra_dma_channel *tdc)
+{
+ u32 csr, status;
+
+ csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR);
+
+ /* Disable interrupts */
+ csr &= ~TEGRA_GPCDMA_CSR_IE_EOC;
+
+ /* Disable DMA */
+ csr &= ~TEGRA_GPCDMA_CSR_ENB;
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, csr);
+
+ /* Clear interrupt status if it is there */
+ status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS);
+ if (status & TEGRA_GPCDMA_STATUS_ISE_EOC) {
+ dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS, status);
+ }
+}
+
+static void tegra_dma_configure_next_sg(struct tegra_dma_channel *tdc)
+{
+ struct tegra_dma_desc *dma_desc = tdc->dma_desc;
+ struct tegra_dma_channel_regs *ch_regs;
+ int ret;
+ u32 val;
+
+ dma_desc->sg_idx++;
+
+ /* Reset the sg index for cyclic transfers */
+ if (dma_desc->sg_idx == dma_desc->sg_count)
+ dma_desc->sg_idx = 0;
+
+ /* Configure next transfer immediately after DMA is busy */
+ ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr +
+ tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS,
+ val,
+ (val & TEGRA_GPCDMA_STATUS_BUSY), 0,
+ TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT);
+ if (ret)
+ return;
+
+ ch_regs = &dma_desc->sg_req[dma_desc->sg_idx].ch_regs;
+
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, ch_regs->wcount);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, ch_regs->src_ptr);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, ch_regs->dst_ptr);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, ch_regs->high_addr_ptr);
+
+ /* Start DMA */
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR,
+ ch_regs->csr | TEGRA_GPCDMA_CSR_ENB);
+}
+
+static void tegra_dma_start(struct tegra_dma_channel *tdc)
+{
+ struct tegra_dma_desc *dma_desc = tdc->dma_desc;
+ struct tegra_dma_channel_regs *ch_regs;
+ struct virt_dma_desc *vdesc;
+
+ if (!dma_desc) {
+ vdesc = vchan_next_desc(&tdc->vc);
+ if (!vdesc)
+ return;
+
+ dma_desc = vd_to_tegra_dma_desc(vdesc);
+ list_del(&vdesc->node);
+ dma_desc->tdc = tdc;
+ tdc->dma_desc = dma_desc;
+
+ tegra_dma_resume(tdc);
+ }
+
+ ch_regs = &dma_desc->sg_req[dma_desc->sg_idx].ch_regs;
+
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, ch_regs->wcount);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, 0);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, ch_regs->src_ptr);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, ch_regs->dst_ptr);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, ch_regs->high_addr_ptr);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_FIXED_PATTERN, ch_regs->fixed_pattern);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ, ch_regs->mmio_seq);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, ch_regs->mc_seq);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, ch_regs->csr);
+
+ /* Start DMA */
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR,
+ ch_regs->csr | TEGRA_GPCDMA_CSR_ENB);
+}
+
+static void tegra_dma_xfer_complete(struct tegra_dma_channel *tdc)
+{
+ vchan_cookie_complete(&tdc->dma_desc->vd);
+
+ tegra_dma_sid_free(tdc);
+ tdc->dma_desc = NULL;
+}
+
+static void tegra_dma_chan_decode_error(struct tegra_dma_channel *tdc,
+ unsigned int err_status)
+{
+ switch (TEGRA_GPCDMA_CHAN_ERR_TYPE(err_status)) {
+ case TEGRA_DMA_BM_FIFO_FULL_ERR:
+ dev_err(tdc->tdma->dev,
+ "GPCDMA CH%d bm fifo full\n", tdc->id);
+ break;
+
+ case TEGRA_DMA_PERIPH_FIFO_FULL_ERR:
+ dev_err(tdc->tdma->dev,
+ "GPCDMA CH%d peripheral fifo full\n", tdc->id);
+ break;
+
+ case TEGRA_DMA_PERIPH_ID_ERR:
+ dev_err(tdc->tdma->dev,
+ "GPCDMA CH%d illegal peripheral id\n", tdc->id);
+ break;
+
+ case TEGRA_DMA_STREAM_ID_ERR:
+ dev_err(tdc->tdma->dev,
+ "GPCDMA CH%d illegal stream id\n", tdc->id);
+ break;
+
+ case TEGRA_DMA_MC_SLAVE_ERR:
+ dev_err(tdc->tdma->dev,
+ "GPCDMA CH%d mc slave error\n", tdc->id);
+ break;
+
+ case TEGRA_DMA_MMIO_SLAVE_ERR:
+ dev_err(tdc->tdma->dev,
+ "GPCDMA CH%d mmio slave error\n", tdc->id);
+ break;
+
+ default:
+ dev_err(tdc->tdma->dev,
+ "GPCDMA CH%d security violation %x\n", tdc->id,
+ err_status);
+ }
+}
+
+static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
+{
+ struct tegra_dma_channel *tdc = dev_id;
+ struct tegra_dma_desc *dma_desc = tdc->dma_desc;
+ struct tegra_dma_sg_req *sg_req;
+ u32 status;
+
+ /* Check channel error status register */
+ status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS);
+ if (status) {
+ tegra_dma_chan_decode_error(tdc, status);
+ tegra_dma_dump_chan_regs(tdc);
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS, 0xFFFFFFFF);
+ }
+
+ spin_lock(&tdc->vc.lock);
+ status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS);
+ if (!(status & TEGRA_GPCDMA_STATUS_ISE_EOC))
+ goto irq_done;
+
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS,
+ TEGRA_GPCDMA_STATUS_ISE_EOC);
+
+ if (!dma_desc)
+ goto irq_done;
+
+ sg_req = dma_desc->sg_req;
+ dma_desc->bytes_xfer += sg_req[dma_desc->sg_idx].len;
+
+ if (dma_desc->cyclic) {
+ vchan_cyclic_callback(&dma_desc->vd);
+ tegra_dma_configure_next_sg(tdc);
+ } else {
+ dma_desc->sg_idx++;
+ if (dma_desc->sg_idx == dma_desc->sg_count)
+ tegra_dma_xfer_complete(tdc);
+ else
+ tegra_dma_start(tdc);
+ }
+
+irq_done:
+ spin_unlock(&tdc->vc.lock);
+ return IRQ_HANDLED;
+}
+
+static void tegra_dma_issue_pending(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ unsigned long flags;
+
+ if (tdc->dma_desc)
+ return;
+
+ spin_lock_irqsave(&tdc->vc.lock, flags);
+ if (vchan_issue_pending(&tdc->vc))
+ tegra_dma_start(tdc);
+
+ /*
+ * For cyclic DMA transfers, program the second
+ * transfer parameters as soon as the first DMA
+ * transfer is started inorder for the DMA
+ * controller to trigger the second transfer
+ * with the correct parameters.
+ */
+ if (tdc->dma_desc && tdc->dma_desc->cyclic)
+ tegra_dma_configure_next_sg(tdc);
+
+ spin_unlock_irqrestore(&tdc->vc.lock, flags);
+}
+
+static int tegra_dma_stop_client(struct tegra_dma_channel *tdc)
+{
+ int ret;
+ u32 status, csr;
+
+ /*
+ * Change the client associated with the DMA channel
+ * to stop DMA engine from starting any more bursts for
+ * the given client and wait for in flight bursts to complete
+ */
+ csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR);
+ csr &= ~(TEGRA_GPCDMA_CSR_REQ_SEL_MASK);
+ csr |= TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED;
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, csr);
+
+ /* Wait for in flight data transfer to finish */
+ udelay(TEGRA_GPCDMA_BURST_COMPLETE_TIME);
+
+ /* If TX/RX path is still active wait till it becomes
+ * inactive
+ */
+
+ ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr +
+ tdc->chan_base_offset +
+ TEGRA_GPCDMA_CHAN_STATUS,
+ status,
+ !(status & (TEGRA_GPCDMA_STATUS_CHANNEL_TX |
+ TEGRA_GPCDMA_STATUS_CHANNEL_RX)),
+ 5,
+ TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT);
+ if (ret) {
+ dev_err(tdc2dev(tdc), "Timeout waiting for DMA burst completion!\n");
+ tegra_dma_dump_chan_regs(tdc);
+ }
+
+ return ret;
+}
+
+static int tegra_dma_terminate_all(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ unsigned long flags;
+ LIST_HEAD(head);
+ int err;
+
+ spin_lock_irqsave(&tdc->vc.lock, flags);
+
+ if (tdc->dma_desc) {
+ err = tdc->tdma->chip_data->terminate(tdc);
+ if (err) {
+ spin_unlock_irqrestore(&tdc->vc.lock, flags);
+ return err;
+ }
+
+ tegra_dma_disable(tdc);
+ tdc->dma_desc = NULL;
+ }
+
+ tegra_dma_sid_free(tdc);
+ vchan_get_all_descriptors(&tdc->vc, &head);
+ spin_unlock_irqrestore(&tdc->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&tdc->vc, &head);
+
+ return 0;
+}
+
+static int tegra_dma_get_residual(struct tegra_dma_channel *tdc)
+{
+ struct tegra_dma_desc *dma_desc = tdc->dma_desc;
+ struct tegra_dma_sg_req *sg_req = dma_desc->sg_req;
+ unsigned int bytes_xfer, residual;
+ u32 wcount = 0, status;
+
+ wcount = tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT);
+
+ /*
+ * Set wcount = 0 if EOC bit is set. The transfer would have
+ * already completed and the CHAN_XFER_COUNT could have updated
+ * for the next transfer, specifically in case of cyclic transfers.
+ */
+ status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS);
+ if (status & TEGRA_GPCDMA_STATUS_ISE_EOC)
+ wcount = 0;
+
+ bytes_xfer = dma_desc->bytes_xfer +
+ sg_req[dma_desc->sg_idx].len - (wcount * 4);
+
+ residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req);
+
+ return residual;
+}
+
+static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ struct tegra_dma_desc *dma_desc;
+ struct virt_dma_desc *vd;
+ unsigned int residual;
+ unsigned long flags;
+ enum dma_status ret;
+
+ ret = dma_cookie_status(dc, cookie, txstate);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ spin_lock_irqsave(&tdc->vc.lock, flags);
+ vd = vchan_find_desc(&tdc->vc, cookie);
+ if (vd) {
+ dma_desc = vd_to_tegra_dma_desc(vd);
+ residual = dma_desc->bytes_req;
+ dma_set_residue(txstate, residual);
+ } else if (tdc->dma_desc && tdc->dma_desc->vd.tx.cookie == cookie) {
+ residual = tegra_dma_get_residual(tdc);
+ dma_set_residue(txstate, residual);
+ } else {
+ dev_err(tdc2dev(tdc), "cookie %d is not found\n", cookie);
+ }
+ spin_unlock_irqrestore(&tdc->vc.lock, flags);
+
+ return ret;
+}
+
+static inline int get_bus_width(struct tegra_dma_channel *tdc,
+ enum dma_slave_buswidth slave_bw)
+{
+ switch (slave_bw) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32;
+ default:
+ dev_err(tdc2dev(tdc), "given slave bus width is not supported\n");
+ return -EINVAL;
+ }
+}
+
+static unsigned int get_burst_size(struct tegra_dma_channel *tdc,
+ u32 burst_size, enum dma_slave_buswidth slave_bw,
+ int len)
+{
+ unsigned int burst_mmio_width, burst_byte;
+
+ /*
+ * burst_size from client is in terms of the bus_width.
+ * convert that into words.
+ * If burst_size is not specified from client, then use
+ * len to calculate the optimum burst size
+ */
+ burst_byte = burst_size ? burst_size * slave_bw : len;
+ burst_mmio_width = burst_byte / 4;
+
+ if (burst_mmio_width < TEGRA_GPCDMA_MMIOSEQ_BURST_MIN)
+ return 0;
+
+ burst_mmio_width = min(burst_mmio_width, TEGRA_GPCDMA_MMIOSEQ_BURST_MAX);
+
+ return TEGRA_GPCDMA_MMIOSEQ_BURST(burst_mmio_width);
+}
+
+static int get_transfer_param(struct tegra_dma_channel *tdc,
+ enum dma_transfer_direction direction,
+ u32 *apb_addr,
+ u32 *mmio_seq,
+ u32 *csr,
+ unsigned int *burst_size,
+ enum dma_slave_buswidth *slave_bw)
+{
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ *apb_addr = tdc->dma_sconfig.dst_addr;
+ *mmio_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
+ *burst_size = tdc->dma_sconfig.dst_maxburst;
+ *slave_bw = tdc->dma_sconfig.dst_addr_width;
+ *csr = TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC;
+ return 0;
+ case DMA_DEV_TO_MEM:
+ *apb_addr = tdc->dma_sconfig.src_addr;
+ *mmio_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
+ *burst_size = tdc->dma_sconfig.src_maxburst;
+ *slave_bw = tdc->dma_sconfig.src_addr_width;
+ *csr = TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC;
+ return 0;
+ default:
+ dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
+ }
+
+ return -EINVAL;
+}
+
+static struct dma_async_tx_descriptor *
+tegra_dma_prep_dma_memset(struct dma_chan *dc, dma_addr_t dest, int value,
+ size_t len, unsigned long flags)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count;
+ struct tegra_dma_sg_req *sg_req;
+ struct tegra_dma_desc *dma_desc;
+ u32 csr, mc_seq;
+
+ if ((len & 3) || (dest & 3) || len > max_dma_count) {
+ dev_err(tdc2dev(tdc),
+ "DMA length/memory address is not supported\n");
+ return NULL;
+ }
+
+ /* Set DMA mode to fixed pattern */
+ csr = TEGRA_GPCDMA_CSR_DMA_FIXED_PAT;
+ /* Enable once or continuous mode */
+ csr |= TEGRA_GPCDMA_CSR_ONCE;
+ /* Enable IRQ mask */
+ csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
+ /* Enable the DMA interrupt */
+ if (flags & DMA_PREP_INTERRUPT)
+ csr |= TEGRA_GPCDMA_CSR_IE_EOC;
+ /* Configure default priority weight for the channel */
+ csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
+
+ mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
+ /* retain stream-id and clean rest */
+ mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK;
+
+ /* Set the address wrapping */
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
+ TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
+ TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
+
+ /* Program outstanding MC requests */
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
+ /* Set burst size */
+ mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
+
+ dma_desc = kzalloc(struct_size(dma_desc, sg_req, 1), GFP_NOWAIT);
+ if (!dma_desc)
+ return NULL;
+
+ dma_desc->bytes_req = len;
+ dma_desc->sg_count = 1;
+ sg_req = dma_desc->sg_req;
+
+ sg_req[0].ch_regs.src_ptr = 0;
+ sg_req[0].ch_regs.dst_ptr = dest;
+ sg_req[0].ch_regs.high_addr_ptr =
+ FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32));
+ sg_req[0].ch_regs.fixed_pattern = value;
+ /* Word count reg takes value as (N +1) words */
+ sg_req[0].ch_regs.wcount = ((len - 4) >> 2);
+ sg_req[0].ch_regs.csr = csr;
+ sg_req[0].ch_regs.mmio_seq = 0;
+ sg_req[0].ch_regs.mc_seq = mc_seq;
+ sg_req[0].len = len;
+
+ dma_desc->cyclic = false;
+ return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+tegra_dma_prep_dma_memcpy(struct dma_chan *dc, dma_addr_t dest,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ struct tegra_dma_sg_req *sg_req;
+ struct tegra_dma_desc *dma_desc;
+ unsigned int max_dma_count;
+ u32 csr, mc_seq;
+
+ max_dma_count = tdc->tdma->chip_data->max_dma_count;
+ if ((len & 3) || (src & 3) || (dest & 3) || len > max_dma_count) {
+ dev_err(tdc2dev(tdc),
+ "DMA length/memory address is not supported\n");
+ return NULL;
+ }
+
+ /* Set DMA mode to memory to memory transfer */
+ csr = TEGRA_GPCDMA_CSR_DMA_MEM2MEM;
+ /* Enable once or continuous mode */
+ csr |= TEGRA_GPCDMA_CSR_ONCE;
+ /* Enable IRQ mask */
+ csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
+ /* Enable the DMA interrupt */
+ if (flags & DMA_PREP_INTERRUPT)
+ csr |= TEGRA_GPCDMA_CSR_IE_EOC;
+ /* Configure default priority weight for the channel */
+ csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
+
+ mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
+ /* retain stream-id and clean rest */
+ mc_seq &= (TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK) |
+ (TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK);
+
+ /* Set the address wrapping */
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
+ TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
+ TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
+
+ /* Program outstanding MC requests */
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
+ /* Set burst size */
+ mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
+
+ dma_desc = kzalloc(struct_size(dma_desc, sg_req, 1), GFP_NOWAIT);
+ if (!dma_desc)
+ return NULL;
+
+ dma_desc->bytes_req = len;
+ dma_desc->sg_count = 1;
+ sg_req = dma_desc->sg_req;
+
+ sg_req[0].ch_regs.src_ptr = src;
+ sg_req[0].ch_regs.dst_ptr = dest;
+ sg_req[0].ch_regs.high_addr_ptr =
+ FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (src >> 32));
+ sg_req[0].ch_regs.high_addr_ptr |=
+ FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32));
+ /* Word count reg takes value as (N +1) words */
+ sg_req[0].ch_regs.wcount = ((len - 4) >> 2);
+ sg_req[0].ch_regs.csr = csr;
+ sg_req[0].ch_regs.mmio_seq = 0;
+ sg_req[0].ch_regs.mc_seq = mc_seq;
+ sg_req[0].len = len;
+
+ dma_desc->cyclic = false;
+ return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+tegra_dma_prep_slave_sg(struct dma_chan *dc, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count;
+ enum dma_slave_buswidth slave_bw = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0;
+ struct tegra_dma_sg_req *sg_req;
+ struct tegra_dma_desc *dma_desc;
+ struct scatterlist *sg;
+ u32 burst_size;
+ unsigned int i;
+ int ret;
+
+ if (!tdc->config_init) {
+ dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
+ return NULL;
+ }
+ if (sg_len < 1) {
+ dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
+ return NULL;
+ }
+
+ ret = tegra_dma_sid_reserve(tdc, direction);
+ if (ret)
+ return NULL;
+
+ ret = get_transfer_param(tdc, direction, &apb_ptr, &mmio_seq, &csr,
+ &burst_size, &slave_bw);
+ if (ret < 0)
+ return NULL;
+
+ /* Enable once or continuous mode */
+ csr |= TEGRA_GPCDMA_CSR_ONCE;
+ /* Program the slave id in requestor select */
+ csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id);
+ /* Enable IRQ mask */
+ csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
+ /* Configure default priority weight for the channel*/
+ csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
+
+ /* Enable the DMA interrupt */
+ if (flags & DMA_PREP_INTERRUPT)
+ csr |= TEGRA_GPCDMA_CSR_IE_EOC;
+
+ mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
+ /* retain stream-id and clean rest */
+ mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK;
+
+ /* Set the address wrapping on both MC and MMIO side */
+
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
+ TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
+ TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
+ mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1);
+
+ /* Program 2 MC outstanding requests by default. */
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
+
+ /* Setting MC burst size depending on MMIO burst size */
+ if (burst_size == 64)
+ mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
+ else
+ mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2;
+
+ dma_desc = kzalloc(struct_size(dma_desc, sg_req, sg_len), GFP_NOWAIT);
+ if (!dma_desc)
+ return NULL;
+
+ dma_desc->sg_count = sg_len;
+ sg_req = dma_desc->sg_req;
+
+ /* Make transfer requests */
+ for_each_sg(sgl, sg, sg_len, i) {
+ u32 len;
+ dma_addr_t mem;
+
+ mem = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+ if ((len & 3) || (mem & 3) || len > max_dma_count) {
+ dev_err(tdc2dev(tdc),
+ "DMA length/memory address is not supported\n");
+ kfree(dma_desc);
+ return NULL;
+ }
+
+ mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
+ dma_desc->bytes_req += len;
+
+ if (direction == DMA_MEM_TO_DEV) {
+ sg_req[i].ch_regs.src_ptr = mem;
+ sg_req[i].ch_regs.dst_ptr = apb_ptr;
+ sg_req[i].ch_regs.high_addr_ptr =
+ FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32));
+ } else if (direction == DMA_DEV_TO_MEM) {
+ sg_req[i].ch_regs.src_ptr = apb_ptr;
+ sg_req[i].ch_regs.dst_ptr = mem;
+ sg_req[i].ch_regs.high_addr_ptr =
+ FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32));
+ }
+
+ /*
+ * Word count register takes input in words. Writing a value
+ * of N into word count register means a req of (N+1) words.
+ */
+ sg_req[i].ch_regs.wcount = ((len - 4) >> 2);
+ sg_req[i].ch_regs.csr = csr;
+ sg_req[i].ch_regs.mmio_seq = mmio_seq;
+ sg_req[i].ch_regs.mc_seq = mc_seq;
+ sg_req[i].len = len;
+ }
+
+ dma_desc->cyclic = false;
+ return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ enum dma_slave_buswidth slave_bw = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0, burst_size;
+ unsigned int max_dma_count, len, period_count, i;
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ struct tegra_dma_desc *dma_desc;
+ struct tegra_dma_sg_req *sg_req;
+ dma_addr_t mem = buf_addr;
+ int ret;
+
+ if (!buf_len || !period_len) {
+ dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
+ return NULL;
+ }
+
+ if (!tdc->config_init) {
+ dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
+ return NULL;
+ }
+
+ ret = tegra_dma_sid_reserve(tdc, direction);
+ if (ret)
+ return NULL;
+
+ /*
+ * We only support cycle transfer when buf_len is multiple of
+ * period_len.
+ */
+ if (buf_len % period_len) {
+ dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
+ return NULL;
+ }
+
+ len = period_len;
+ max_dma_count = tdc->tdma->chip_data->max_dma_count;
+ if ((len & 3) || (buf_addr & 3) || len > max_dma_count) {
+ dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
+ return NULL;
+ }
+
+ ret = get_transfer_param(tdc, direction, &apb_ptr, &mmio_seq, &csr,
+ &burst_size, &slave_bw);
+ if (ret < 0)
+ return NULL;
+
+ /* Enable once or continuous mode */
+ csr &= ~TEGRA_GPCDMA_CSR_ONCE;
+ /* Program the slave id in requestor select */
+ csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id);
+ /* Enable IRQ mask */
+ csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
+ /* Configure default priority weight for the channel*/
+ csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
+
+ /* Enable the DMA interrupt */
+ if (flags & DMA_PREP_INTERRUPT)
+ csr |= TEGRA_GPCDMA_CSR_IE_EOC;
+
+ mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1);
+
+ mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
+ /* retain stream-id and clean rest */
+ mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK;
+
+ /* Set the address wrapping on both MC and MMIO side */
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
+ TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
+ TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
+
+ /* Program 2 MC outstanding requests by default. */
+ mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
+ /* Setting MC burst size depending on MMIO burst size */
+ if (burst_size == 64)
+ mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
+ else
+ mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2;
+
+ period_count = buf_len / period_len;
+ dma_desc = kzalloc(struct_size(dma_desc, sg_req, period_count),
+ GFP_NOWAIT);
+ if (!dma_desc)
+ return NULL;
+
+ dma_desc->bytes_req = buf_len;
+ dma_desc->sg_count = period_count;
+ sg_req = dma_desc->sg_req;
+
+ /* Split transfer equal to period size */
+ for (i = 0; i < period_count; i++) {
+ mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
+ if (direction == DMA_MEM_TO_DEV) {
+ sg_req[i].ch_regs.src_ptr = mem;
+ sg_req[i].ch_regs.dst_ptr = apb_ptr;
+ sg_req[i].ch_regs.high_addr_ptr =
+ FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32));
+ } else if (direction == DMA_DEV_TO_MEM) {
+ sg_req[i].ch_regs.src_ptr = apb_ptr;
+ sg_req[i].ch_regs.dst_ptr = mem;
+ sg_req[i].ch_regs.high_addr_ptr =
+ FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32));
+ }
+ /*
+ * Word count register takes input in words. Writing a value
+ * of N into word count register means a req of (N+1) words.
+ */
+ sg_req[i].ch_regs.wcount = ((len - 4) >> 2);
+ sg_req[i].ch_regs.csr = csr;
+ sg_req[i].ch_regs.mmio_seq = mmio_seq;
+ sg_req[i].ch_regs.mc_seq = mc_seq;
+ sg_req[i].len = len;
+
+ mem += len;
+ }
+
+ dma_desc->cyclic = true;
+
+ return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
+}
+
+static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ int ret;
+
+ ret = request_irq(tdc->irq, tegra_dma_isr, 0, tdc->name, tdc);
+ if (ret) {
+ dev_err(tdc2dev(tdc), "request_irq failed for %s\n", tdc->name);
+ return ret;
+ }
+
+ dma_cookie_init(&tdc->vc.chan);
+ tdc->config_init = false;
+ return 0;
+}
+
+static void tegra_dma_chan_synchronize(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+
+ synchronize_irq(tdc->irq);
+ vchan_synchronize(&tdc->vc);
+}
+
+static void tegra_dma_free_chan_resources(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+
+ dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
+
+ tegra_dma_terminate_all(dc);
+ synchronize_irq(tdc->irq);
+
+ tasklet_kill(&tdc->vc.task);
+ tdc->config_init = false;
+ tdc->slave_id = -1;
+ tdc->sid_dir = DMA_TRANS_NONE;
+ free_irq(tdc->irq, tdc);
+
+ vchan_free_chan_resources(&tdc->vc);
+}
+
+static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct tegra_dma *tdma = ofdma->of_dma_data;
+ struct tegra_dma_channel *tdc;
+ struct dma_chan *chan;
+
+ chan = dma_get_any_slave_channel(&tdma->dma_dev);
+ if (!chan)
+ return NULL;
+
+ tdc = to_tegra_dma_chan(chan);
+ tdc->slave_id = dma_spec->args[0];
+
+ return chan;
+}
+
+static const struct tegra_dma_chip_data tegra186_dma_chip_data = {
+ .nr_channels = 31,
+ .channel_reg_size = SZ_64K,
+ .max_dma_count = SZ_1G,
+ .hw_support_pause = false,
+ .terminate = tegra_dma_stop_client,
+};
+
+static const struct tegra_dma_chip_data tegra194_dma_chip_data = {
+ .nr_channels = 31,
+ .channel_reg_size = SZ_64K,
+ .max_dma_count = SZ_1G,
+ .hw_support_pause = true,
+ .terminate = tegra_dma_pause,
+};
+
+static const struct of_device_id tegra_dma_of_match[] = {
+ {
+ .compatible = "nvidia,tegra186-gpcdma",
+ .data = &tegra186_dma_chip_data,
+ }, {
+ .compatible = "nvidia,tegra194-gpcdma",
+ .data = &tegra194_dma_chip_data,
+ }, {
+ },
+};
+MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
+
+static int tegra_dma_program_sid(struct tegra_dma_channel *tdc, int stream_id)
+{
+ unsigned int reg_val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
+
+ reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK);
+ reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK);
+
+ reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK, stream_id);
+ reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK, stream_id);
+
+ tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, reg_val);
+ return 0;
+}
+
+static int tegra_dma_probe(struct platform_device *pdev)
+{
+ const struct tegra_dma_chip_data *cdata = NULL;
+ struct iommu_fwspec *iommu_spec;
+ unsigned int stream_id, i;
+ struct tegra_dma *tdma;
+ int ret;
+
+ cdata = of_device_get_match_data(&pdev->dev);
+
+ tdma = devm_kzalloc(&pdev->dev,
+ struct_size(tdma, channels, cdata->nr_channels),
+ GFP_KERNEL);
+ if (!tdma)
+ return -ENOMEM;
+
+ tdma->dev = &pdev->dev;
+ tdma->chip_data = cdata;
+ platform_set_drvdata(pdev, tdma);
+
+ tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(tdma->base_addr))
+ return PTR_ERR(tdma->base_addr);
+
+ tdma->rst = devm_reset_control_get_exclusive(&pdev->dev, "gpcdma");
+ if (IS_ERR(tdma->rst)) {
+ return dev_err_probe(&pdev->dev, PTR_ERR(tdma->rst),
+ "Missing controller reset\n");
+ }
+ reset_control_reset(tdma->rst);
+
+ tdma->dma_dev.dev = &pdev->dev;
+
+ iommu_spec = dev_iommu_fwspec_get(&pdev->dev);
+ if (!iommu_spec) {
+ dev_err(&pdev->dev, "Missing iommu stream-id\n");
+ return -EINVAL;
+ }
+ stream_id = iommu_spec->ids[0] & 0xffff;
+
+ INIT_LIST_HEAD(&tdma->dma_dev.channels);
+ for (i = 0; i < cdata->nr_channels; i++) {
+ struct tegra_dma_channel *tdc = &tdma->channels[i];
+
+ tdc->irq = platform_get_irq(pdev, i);
+ if (tdc->irq < 0)
+ return tdc->irq;
+
+ tdc->chan_base_offset = TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET +
+ i * cdata->channel_reg_size;
+ snprintf(tdc->name, sizeof(tdc->name), "gpcdma.%d", i);
+ tdc->tdma = tdma;
+ tdc->id = i;
+ tdc->slave_id = -1;
+
+ vchan_init(&tdc->vc, &tdma->dma_dev);
+ tdc->vc.desc_free = tegra_dma_desc_free;
+
+ /* program stream-id for this channel */
+ tegra_dma_program_sid(tdc, stream_id);
+ tdc->stream_id = stream_id;
+ }
+
+ dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_MEMCPY, tdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_MEMSET, tdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
+
+ /*
+ * Only word aligned transfers are supported. Set the copy
+ * alignment shift.
+ */
+ tdma->dma_dev.copy_align = 2;
+ tdma->dma_dev.fill_align = 2;
+ tdma->dma_dev.device_alloc_chan_resources =
+ tegra_dma_alloc_chan_resources;
+ tdma->dma_dev.device_free_chan_resources =
+ tegra_dma_free_chan_resources;
+ tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
+ tdma->dma_dev.device_prep_dma_memcpy = tegra_dma_prep_dma_memcpy;
+ tdma->dma_dev.device_prep_dma_memset = tegra_dma_prep_dma_memset;
+ tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
+ tdma->dma_dev.device_config = tegra_dma_slave_config;
+ tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
+ tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
+ tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
+ tdma->dma_dev.device_pause = tegra_dma_device_pause;
+ tdma->dma_dev.device_resume = tegra_dma_device_resume;
+ tdma->dma_dev.device_synchronize = tegra_dma_chan_synchronize;
+ tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+ ret = dma_async_device_register(&tdma->dma_dev);
+ if (ret < 0) {
+ dev_err_probe(&pdev->dev, ret,
+ "GPC DMA driver registration failed\n");
+ return ret;
+ }
+
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ tegra_dma_of_xlate, tdma);
+ if (ret < 0) {
+ dev_err_probe(&pdev->dev, ret,
+ "GPC DMA OF registration failed\n");
+
+ dma_async_device_unregister(&tdma->dma_dev);
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "GPC DMA driver register %d channels\n",
+ cdata->nr_channels);
+
+ return 0;
+}
+
+static int tegra_dma_remove(struct platform_device *pdev)
+{
+ struct tegra_dma *tdma = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&tdma->dma_dev);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_dma_pm_suspend(struct device *dev)
+{
+ struct tegra_dma *tdma = dev_get_drvdata(dev);
+ unsigned int i;
+
+ for (i = 0; i < tdma->chip_data->nr_channels; i++) {
+ struct tegra_dma_channel *tdc = &tdma->channels[i];
+
+ if (tdc->dma_desc) {
+ dev_err(tdma->dev, "channel %u busy\n", i);
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_dma_pm_resume(struct device *dev)
+{
+ struct tegra_dma *tdma = dev_get_drvdata(dev);
+ unsigned int i;
+
+ reset_control_reset(tdma->rst);
+
+ for (i = 0; i < tdma->chip_data->nr_channels; i++) {
+ struct tegra_dma_channel *tdc = &tdma->channels[i];
+
+ tegra_dma_program_sid(tdc, tdc->stream_id);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume)
+};
+
+static struct platform_driver tegra_dma_driver = {
+ .driver = {
+ .name = "tegra-gpcdma",
+ .pm = &tegra_dma_dev_pm_ops,
+ .of_match_table = tegra_dma_of_match,
+ },
+ .probe = tegra_dma_probe,
+ .remove = tegra_dma_remove,
+};
+
+module_platform_driver(tegra_dma_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra GPC DMA Controller driver");
+MODULE_AUTHOR("Pavan Kunapuli <pkunapuli@nvidia.com>");
+MODULE_AUTHOR("Rajesh Gumasta <rgumasta@nvidia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c
index 062bd9bd4de0..695915dba707 100644
--- a/drivers/dma/ti/cppi41.c
+++ b/drivers/dma/ti/cppi41.c
@@ -1105,8 +1105,12 @@ static int cppi41_dma_probe(struct platform_device *pdev)
cdd->qmgr_num_pend = glue_info->qmgr_num_pend;
cdd->first_completion_queue = glue_info->first_completion_queue;
+ /* Parse new and deprecated dma-channels properties */
ret = of_property_read_u32(dev->of_node,
- "#dma-channels", &cdd->n_chans);
+ "dma-channels", &cdd->n_chans);
+ if (ret)
+ ret = of_property_read_u32(dev->of_node,
+ "#dma-channels", &cdd->n_chans);
if (ret)
goto err_get_n_chans;
diff --git a/drivers/dma/ti/k3-psil-am62.c b/drivers/dma/ti/k3-psil-am62.c
index d431e2033237..2b6fd6e37c61 100644
--- a/drivers/dma/ti/k3-psil-am62.c
+++ b/drivers/dma/ti/k3-psil-am62.c
@@ -70,10 +70,10 @@
/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
static struct psil_ep am62_src_ep_map[] = {
/* SAUL */
- PSIL_SAUL(0x7500, 20, 35, 8, 35, 0),
- PSIL_SAUL(0x7501, 21, 35, 8, 36, 0),
- PSIL_SAUL(0x7502, 22, 43, 8, 43, 0),
- PSIL_SAUL(0x7503, 23, 43, 8, 44, 0),
+ PSIL_SAUL(0x7504, 20, 35, 8, 35, 0),
+ PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
+ PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
+ PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
/* PDMA_MAIN0 - SPI0-3 */
PSIL_PDMA_XY_PKT(0x4302),
PSIL_PDMA_XY_PKT(0x4303),
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 7aa63b652027..dc299ab36818 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -229,7 +229,7 @@ struct zynqmp_dma_chan {
bool is_dmacoherent;
struct tasklet_struct tasklet;
bool idle;
- u32 desc_size;
+ size_t desc_size;
bool err;
u32 bus_width;
u32 src_burst_len;
@@ -486,7 +486,8 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
}
chan->desc_pool_v = dma_alloc_coherent(chan->dev,
- (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
+ (2 * ZYNQMP_DMA_DESC_SIZE(chan) *
+ ZYNQMP_DMA_NUM_DESCS),
&chan->desc_pool_p, GFP_KERNEL);
if (!chan->desc_pool_v)
return -ENOMEM;
@@ -1077,7 +1078,11 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT);
pm_runtime_use_autosuspend(zdev->dev);
pm_runtime_enable(zdev->dev);
- pm_runtime_get_sync(zdev->dev);
+ ret = pm_runtime_resume_and_get(zdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "device wakeup failed.\n");
+ pm_runtime_disable(zdev->dev);
+ }
if (!pm_runtime_enabled(zdev->dev)) {
ret = zynqmp_dma_runtime_resume(zdev->dev);
if (ret)
@@ -1093,7 +1098,11 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
p->dst_addr_widths = BIT(zdev->chan->bus_width / 8);
p->src_addr_widths = BIT(zdev->chan->bus_width / 8);
- dma_async_device_register(&zdev->common);
+ ret = dma_async_device_register(&zdev->common);
+ if (ret) {
+ dev_err(zdev->dev, "failed to register the dma device\n");
+ goto free_chan_resources;
+ }
ret = of_dma_controller_register(pdev->dev.of_node,
of_zynqmp_dma_xlate, zdev);