aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/dma
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-03-30 10:54:49 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-30 10:54:49 -0700
commit2a44cdaa01837355b14b9221e87d75963846296c (patch)
tree7ab1171992b1b142e03587289199da88d65bea2f /drivers/dma
parentMerge tag 'rproc-v5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/remoteproc/linux (diff)
parentdmaengine: hisi_dma: fix MSI allocate fail when reload hisi_dma (diff)
downloadwireguard-linux-2a44cdaa01837355b14b9221e87d75963846296c.tar.xz
wireguard-linux-2a44cdaa01837355b14b9221e87d75963846296c.zip
Merge tag 'dmaengine-5.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine
Pull dmaengine updates from Vinod Koul: "This time we have bunch of driver updates and some new device support. New support: - Document RZ/V2L and RZ/G2UL dma binding - TI AM62x k3-udma and k3-psil support Updates: - Yaml conversion for Mediatek uart apdma schema - Removal of DMA-32 fallback configuration for various drivers - imx-sdma updates for channel restart" * tag 'dmaengine-5.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (23 commits) dmaengine: hisi_dma: fix MSI allocate fail when reload hisi_dma dmaengine: dw-axi-dmac: cleanup comments dmaengine: fsl-dpaa2-qdma: Drop comma after SoC match table sentinel dt-bindings: dma: Convert mtk-uart-apdma to DT schema dmaengine: ppc4xx: Make use of the helper macro LIST_HEAD() dmaengine: idxd: Remove useless DMA-32 fallback configuration dmaengine: qcom_hidma: Remove useless DMA-32 fallback configuration dmaengine: sh: Kconfig: Add ARCH_R9A07G054 dependency for RZ_DMAC config option dmaengine: ti: k3-psil: Add AM62x PSIL and PDMA data dmaengine: ti: k3-udma: Add AM62x DMSS support dmaengine: ti: cleanup comments dmaengine: imx-sdma: clean up some inconsistent indenting dmaengine: Revert "dmaengine: shdma: Fix runtime PM imbalance on error" dmaengine: idxd: restore traffic class defaults after wq reset dmaengine: altera-msgdma: Remove useless DMA-32 fallback configuration dmaengine: stm32-dma: set dma_device max_sg_burst dmaengine: imx-sdma: fix cyclic buffer race condition dmaengine: imx-sdma: restart cyclic channel if needed dmaengine: iot: Remove useless DMA-32 fallback configuration dmaengine: ptdma: handle the cases based on DMA is complete ...
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/altera-msgdma.c4
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c8
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac.h2
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h2
-rw-r--r--drivers/dma/hisi_dma.c2
-rw-r--r--drivers/dma/idxd/device.c9
-rw-r--r--drivers/dma/idxd/init.c2
-rw-r--r--drivers/dma/imx-sdma.c22
-rw-r--r--drivers/dma/ioat/init.c2
-rw-r--r--drivers/dma/ppc4xx/adma.c2
-rw-r--r--drivers/dma/ptdma/ptdma-dmaengine.c24
-rw-r--r--drivers/dma/qcom/hidma.c4
-rw-r--r--drivers/dma/sh/Kconfig6
-rw-r--r--drivers/dma/sh/shdma-base.c4
-rw-r--r--drivers/dma/stm32-dma.c1
-rw-r--r--drivers/dma/ti/Makefile3
-rw-r--r--drivers/dma/ti/cppi41.c6
-rw-r--r--drivers/dma/ti/edma.c10
-rw-r--r--drivers/dma/ti/k3-psil-am62.c186
-rw-r--r--drivers/dma/ti/k3-psil-priv.h1
-rw-r--r--drivers/dma/ti/k3-psil.c1
-rw-r--r--drivers/dma/ti/k3-udma.c1
-rw-r--r--drivers/dma/ti/omap-dma.c2
23 files changed, 258 insertions, 46 deletions
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index f5b885d69cd3..6f56dfd375e3 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -891,9 +891,7 @@ static int msgdma_probe(struct platform_device *pdev)
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
dev_warn(&pdev->dev, "unable to set coherent mask to 64");
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (ret)
- goto fail;
+ goto fail;
}
msgdma_reset(mdev);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index 33baf1591a49..e9c9bcb1f5c2 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0
// (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
/*
@@ -35,7 +35,7 @@
/*
* The set of bus widths supported by the DMA controller. DW AXI DMAC supports
* master data bus width up to 512 bits (for both AXI master interfaces), but
- * it depends on IP block configurarion.
+ * it depends on IP block configuration.
*/
#define AXI_DMA_BUSWIDTHS \
(DMA_SLAVE_BUSWIDTH_1_BYTE | \
@@ -1089,10 +1089,10 @@ static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
u32 status, i;
- /* Disable DMAC inerrupts. We'll enable them after processing chanels */
+ /* Disable DMAC interrupts. We'll enable them after processing channels */
axi_dma_irq_disable(chip);
- /* Poll, clear and process every chanel interrupt status */
+ /* Poll, clear and process every channel interrupt status */
for (i = 0; i < dw->hdata->nr_channels; i++) {
chan = &dw->chan[i];
status = axi_chan_irq_read(chan);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
index be69a0b76860..e9d5eb0fd594 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
/*
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
index 7d571849c569..03e2f4e0baca 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
@@ -139,7 +139,7 @@ struct dpaa2_qdma_priv_per_prio {
static struct soc_device_attribute soc_fixup_tuning[] = {
{ .family = "QorIQ LX2160A"},
- { },
+ { /* sentinel */ }
};
/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */
diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c
index 97c87a7cba87..43817ced3a3e 100644
--- a/drivers/dma/hisi_dma.c
+++ b/drivers/dma/hisi_dma.c
@@ -30,7 +30,7 @@
#define HISI_DMA_MODE 0x217c
#define HISI_DMA_OFFSET 0x100
-#define HISI_DMA_MSI_NUM 30
+#define HISI_DMA_MSI_NUM 32
#define HISI_DMA_CHAN_NUM 30
#define HISI_DMA_Q_DEPTH_VAL 1024
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 573ad8b86804..3061fe857d69 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -681,8 +681,13 @@ static void idxd_groups_clear_state(struct idxd_device *idxd)
group->use_rdbuf_limit = false;
group->rdbufs_allowed = 0;
group->rdbufs_reserved = 0;
- group->tc_a = -1;
- group->tc_b = -1;
+ if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) {
+ group->tc_a = 1;
+ group->tc_b = 1;
+ } else {
+ group->tc_a = -1;
+ group->tc_b = -1;
+ }
}
}
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 08a5f4310188..993a5dcca24f 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -605,8 +605,6 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_dbg(dev, "Set DMA masks\n");
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (rc)
goto err;
dev_dbg(dev, "Set PCI master\n");
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 75ec0754d4ad..70c0aa931ddf 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -701,6 +701,11 @@ static int sdma_config_ownership(struct sdma_channel *sdmac,
return 0;
}
+static int is_sdma_channel_enabled(struct sdma_engine *sdma, int channel)
+{
+ return !!(readl(sdma->regs + SDMA_H_STATSTOP) & BIT(channel));
+}
+
static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
{
writel(BIT(channel), sdma->regs + SDMA_H_START);
@@ -842,7 +847,6 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
*/
desc->chn_real_count = bd->mode.count;
- bd->mode.status |= BD_DONE;
bd->mode.count = desc->period_len;
desc->buf_ptail = desc->buf_tail;
desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd;
@@ -857,9 +861,21 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
spin_lock(&sdmac->vc.lock);
+ /* Assign buffer ownership to SDMA */
+ bd->mode.status |= BD_DONE;
+
if (error)
sdmac->status = old_status;
}
+
+ /*
+ * SDMA stops cyclic channel when DMA request triggers a channel and no SDMA
+ * owned buffer is available (i.e. BD_DONE was set too late).
+ */
+ if (!is_sdma_channel_enabled(sdmac->sdma, sdmac->channel)) {
+ dev_warn(sdmac->sdma->dev, "restart cyclic channel %d\n", sdmac->channel);
+ sdma_enable_channel(sdmac->sdma, sdmac->channel);
+ }
}
static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
@@ -876,9 +892,9 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
for (i = 0; i < sdmac->desc->num_bd; i++) {
bd = &sdmac->desc->bd[i];
- if (bd->mode.status & (BD_DONE | BD_RROR))
+ if (bd->mode.status & (BD_DONE | BD_RROR))
error = -EIO;
- sdmac->desc->chn_real_count += bd->mode.count;
+ sdmac->desc->chn_real_count += bd->mode.count;
}
if (error)
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 373b8dac6c9b..5d707ff63554 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -1365,8 +1365,6 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err)
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err)
return err;
device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 5e46e347e28b..6b5e91f26afc 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -1686,8 +1686,8 @@ static struct ppc440spe_adma_desc_slot *ppc440spe_adma_alloc_slots(
{
struct ppc440spe_adma_desc_slot *iter = NULL, *_iter;
struct ppc440spe_adma_desc_slot *alloc_start = NULL;
- struct list_head chain = LIST_HEAD_INIT(chain);
int slots_found, retry = 0;
+ LIST_HEAD(chain);
BUG_ON(!num_slots || !slots_per_op);
diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c
index c9e52f6f2f50..91b93e8d9779 100644
--- a/drivers/dma/ptdma/ptdma-dmaengine.c
+++ b/drivers/dma/ptdma/ptdma-dmaengine.c
@@ -100,12 +100,17 @@ static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
spin_lock_irqsave(&chan->vc.lock, flags);
if (desc) {
- if (desc->status != DMA_ERROR)
- desc->status = DMA_COMPLETE;
-
- dma_cookie_complete(tx_desc);
- dma_descriptor_unmap(tx_desc);
- list_del(&desc->vd.node);
+ if (desc->status != DMA_COMPLETE) {
+ if (desc->status != DMA_ERROR)
+ desc->status = DMA_COMPLETE;
+
+ dma_cookie_complete(tx_desc);
+ dma_descriptor_unmap(tx_desc);
+ list_del(&desc->vd.node);
+ } else {
+ /* Don't handle it twice */
+ tx_desc = NULL;
+ }
}
desc = pt_next_dma_desc(chan);
@@ -233,9 +238,14 @@ static void pt_issue_pending(struct dma_chan *dma_chan)
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
struct pt_dma_desc *desc;
unsigned long flags;
+ bool engine_is_idle = true;
spin_lock_irqsave(&chan->vc.lock, flags);
+ desc = pt_next_dma_desc(chan);
+ if (desc)
+ engine_is_idle = false;
+
vchan_issue_pending(&chan->vc);
desc = pt_next_dma_desc(chan);
@@ -243,7 +253,7 @@ static void pt_issue_pending(struct dma_chan *dma_chan)
spin_unlock_irqrestore(&chan->vc.lock, flags);
/* If there was nothing active, start processing */
- if (desc)
+ if (engine_is_idle)
pt_cmd_callback(desc, 0);
}
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 65d054bb11aa..51587cf8196b 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -838,9 +838,7 @@ static int hidma_probe(struct platform_device *pdev)
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc) {
dev_warn(&pdev->dev, "unable to set coherent mask to 64");
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (rc)
- goto dmafree;
+ goto dmafree;
}
dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index a46296285307..b35d705f79e7 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -49,10 +49,10 @@ config RENESAS_USB_DMAC
SoCs.
config RZ_DMAC
- tristate "Renesas RZ/G2L DMA Controller"
- depends on ARCH_R9A07G044 || COMPILE_TEST
+ tristate "Renesas RZ/{G2L,V2L} DMA Controller"
+ depends on ARCH_R9A07G044 || ARCH_R9A07G054 || COMPILE_TEST
select RENESAS_DMA
select DMA_VIRTUAL_CHANNELS
help
This driver supports the general purpose DMA controller found in the
- Renesas RZ/G2L SoC variants.
+ Renesas RZ/{G2L,V2L} SoC variants.
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index b26ed690f03c..158e5e7defae 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -115,10 +115,8 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
ret = pm_runtime_get(schan->dev);
spin_unlock_irq(&schan->chan_lock);
- if (ret < 0) {
+ if (ret < 0)
dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
- pm_runtime_put(schan->dev);
- }
pm_runtime_barrier(schan->dev);
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 83a37a6955a3..d2365fab1b7a 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -1389,6 +1389,7 @@ static int stm32_dma_probe(struct platform_device *pdev)
dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
dd->copy_align = DMAENGINE_ALIGN_32_BYTES;
dd->max_burst = STM32_DMA_MAX_BURST;
+ dd->max_sg_burst = STM32_DMA_ALIGNED_MAX_DATA_ITEMS;
dd->descriptor_reuse = true;
dd->dev = &pdev->dev;
INIT_LIST_HEAD(&dd->channels);
diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile
index 1d4081a049b7..d3a303f0d7c6 100644
--- a/drivers/dma/ti/Makefile
+++ b/drivers/dma/ti/Makefile
@@ -9,5 +9,6 @@ obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o \
k3-psil-j721e.o \
k3-psil-j7200.o \
k3-psil-am64.o \
- k3-psil-j721s2.o
+ k3-psil-j721s2.o \
+ k3-psil-am62.o
obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c
index 8c2f7ebe998c..062bd9bd4de0 100644
--- a/drivers/dma/ti/cppi41.c
+++ b/drivers/dma/ti/cppi41.c
@@ -315,7 +315,7 @@ static irqreturn_t cppi41_irq(int irq, void *data)
val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i));
if (i == QMGR_PENDING_SLOT_Q(first_completion_queue) && val) {
u32 mask;
- /* set corresponding bit for completetion Q 93 */
+ /* set corresponding bit for completion Q 93 */
mask = 1 << QMGR_PENDING_BIT_Q(first_completion_queue);
/* not set all bits for queues less than Q 93 */
mask--;
@@ -703,7 +703,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c)
* transfer descriptor followed by TD descriptor. Waiting seems not to
* cause any difference.
* RX seems to be thrown out right away. However once the TearDown
- * descriptor gets through we are done. If we have seens the transfer
+ * descriptor gets through we are done. If we have seen the transfer
* descriptor before the TD we fetch it from enqueue, it has to be
* there waiting for us.
*/
@@ -747,7 +747,7 @@ static int cppi41_stop_chan(struct dma_chan *chan)
struct cppi41_channel *cc, *_ct;
/*
- * channels might still be in the pendling list if
+ * channels might still be in the pending list if
* cppi41_dma_issue_pending() is called after
* cppi41_runtime_suspend() is called
*/
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 08e47f44d325..3ea8ef7f57df 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -118,10 +118,10 @@
/*
* Max of 20 segments per channel to conserve PaRAM slots
- * Also note that MAX_NR_SG should be atleast the no.of periods
+ * Also note that MAX_NR_SG should be at least the no.of periods
* that are required for ASoC, otherwise DMA prep calls will
* fail. Today davinci-pcm is the only user of this driver and
- * requires atleast 17 slots, so we setup the default to 20.
+ * requires at least 17 slots, so we setup the default to 20.
*/
#define MAX_NR_SG 20
#define EDMA_MAX_SLOTS MAX_NR_SG
@@ -976,7 +976,7 @@ static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
* and quotient respectively of the division of:
* (dma_length / acnt) by (SZ_64K -1). This is so
* that in case bcnt over flows, we have ccnt to use.
- * Note: In A-sync tranfer only, bcntrld is used, but it
+ * Note: In A-sync transfer only, bcntrld is used, but it
* only applies for sg_dma_len(sg) >= SZ_64K.
* In this case, the best way adopted is- bccnt for the
* first frame will be the remainder below. Then for
@@ -1203,7 +1203,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
* slot2: the remaining amount of data after slot1.
* ACNT = full_length - length1, length2 = ACNT
*
- * When the full_length is multibple of 32767 one slot can be
+ * When the full_length is a multiple of 32767 one slot can be
* used to complete the transfer.
*/
width = array_size;
@@ -1814,7 +1814,7 @@ static void edma_issue_pending(struct dma_chan *chan)
* This limit exists to avoid a possible infinite loop when waiting for proof
* that a particular transfer is completed. This limit can be hit if there
* are large bursts to/from slow devices or the CPU is never able to catch
- * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
+ * the DMA hardware idle. On an AM335x transferring 48 bytes from the UART
* RX-FIFO, as many as 55 loops have been seen.
*/
#define EDMA_MAX_TR_WAIT_LOOPS 1000
diff --git a/drivers/dma/ti/k3-psil-am62.c b/drivers/dma/ti/k3-psil-am62.c
new file mode 100644
index 000000000000..d431e2033237
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-am62.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .mapped_channel_id = -1, \
+ .default_flow_id = -1, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x, ch, flow_base, flow_cnt) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ .mapped_channel_id = ch, \
+ .flow_start = flow_base, \
+ .flow_num = flow_cnt, \
+ .default_flow_id = flow_base, \
+ }, \
+ }
+
+#define PSIL_SAUL(x, ch, flow_base, flow_cnt, default_flow, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .mapped_channel_id = ch, \
+ .flow_start = flow_base, \
+ .flow_num = flow_cnt, \
+ .default_flow_id = default_flow, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+#define PSIL_PDMA_MCASP(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pdma_acc32 = 1, \
+ .pdma_burst = 1, \
+ }, \
+ }
+
+#define PSIL_CSI2RX(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep am62_src_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0x7500, 20, 35, 8, 35, 0),
+ PSIL_SAUL(0x7501, 21, 35, 8, 36, 0),
+ PSIL_SAUL(0x7502, 22, 43, 8, 43, 0),
+ PSIL_SAUL(0x7503, 23, 43, 8, 44, 0),
+ /* PDMA_MAIN0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0x4302),
+ PSIL_PDMA_XY_PKT(0x4303),
+ PSIL_PDMA_XY_PKT(0x4304),
+ PSIL_PDMA_XY_PKT(0x4305),
+ PSIL_PDMA_XY_PKT(0x4306),
+ PSIL_PDMA_XY_PKT(0x4307),
+ PSIL_PDMA_XY_PKT(0x4308),
+ PSIL_PDMA_XY_PKT(0x4309),
+ PSIL_PDMA_XY_PKT(0x430a),
+ PSIL_PDMA_XY_PKT(0x430b),
+ PSIL_PDMA_XY_PKT(0x430c),
+ PSIL_PDMA_XY_PKT(0x430d),
+ /* PDMA_MAIN1 - UART0-6 */
+ PSIL_PDMA_XY_PKT(0x4400),
+ PSIL_PDMA_XY_PKT(0x4401),
+ PSIL_PDMA_XY_PKT(0x4402),
+ PSIL_PDMA_XY_PKT(0x4403),
+ PSIL_PDMA_XY_PKT(0x4404),
+ PSIL_PDMA_XY_PKT(0x4405),
+ PSIL_PDMA_XY_PKT(0x4406),
+ /* PDMA_MAIN2 - MCASP0-2 */
+ PSIL_PDMA_MCASP(0x4500),
+ PSIL_PDMA_MCASP(0x4501),
+ PSIL_PDMA_MCASP(0x4502),
+ /* CPSW3G */
+ PSIL_ETHERNET(0x4600, 19, 19, 16),
+ /* CSI2RX */
+ PSIL_CSI2RX(0x4700),
+ PSIL_CSI2RX(0x4701),
+ PSIL_CSI2RX(0x4702),
+ PSIL_CSI2RX(0x4703),
+ PSIL_CSI2RX(0x4704),
+ PSIL_CSI2RX(0x4705),
+ PSIL_CSI2RX(0x4706),
+ PSIL_CSI2RX(0x4707),
+ PSIL_CSI2RX(0x4708),
+ PSIL_CSI2RX(0x4709),
+ PSIL_CSI2RX(0x470a),
+ PSIL_CSI2RX(0x470b),
+ PSIL_CSI2RX(0x470c),
+ PSIL_CSI2RX(0x470d),
+ PSIL_CSI2RX(0x470e),
+ PSIL_CSI2RX(0x470f),
+ PSIL_CSI2RX(0x4710),
+ PSIL_CSI2RX(0x4711),
+ PSIL_CSI2RX(0x4712),
+ PSIL_CSI2RX(0x4713),
+ PSIL_CSI2RX(0x4714),
+ PSIL_CSI2RX(0x4715),
+ PSIL_CSI2RX(0x4716),
+ PSIL_CSI2RX(0x4717),
+ PSIL_CSI2RX(0x4718),
+ PSIL_CSI2RX(0x4719),
+ PSIL_CSI2RX(0x471a),
+ PSIL_CSI2RX(0x471b),
+ PSIL_CSI2RX(0x471c),
+ PSIL_CSI2RX(0x471d),
+ PSIL_CSI2RX(0x471e),
+ PSIL_CSI2RX(0x471f),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep am62_dst_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
+ PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
+ /* PDMA_MAIN0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0xc302),
+ PSIL_PDMA_XY_PKT(0xc303),
+ PSIL_PDMA_XY_PKT(0xc304),
+ PSIL_PDMA_XY_PKT(0xc305),
+ PSIL_PDMA_XY_PKT(0xc306),
+ PSIL_PDMA_XY_PKT(0xc307),
+ PSIL_PDMA_XY_PKT(0xc308),
+ PSIL_PDMA_XY_PKT(0xc309),
+ PSIL_PDMA_XY_PKT(0xc30a),
+ PSIL_PDMA_XY_PKT(0xc30b),
+ PSIL_PDMA_XY_PKT(0xc30c),
+ PSIL_PDMA_XY_PKT(0xc30d),
+ /* PDMA_MAIN1 - UART0-6 */
+ PSIL_PDMA_XY_PKT(0xc400),
+ PSIL_PDMA_XY_PKT(0xc401),
+ PSIL_PDMA_XY_PKT(0xc402),
+ PSIL_PDMA_XY_PKT(0xc403),
+ PSIL_PDMA_XY_PKT(0xc404),
+ PSIL_PDMA_XY_PKT(0xc405),
+ PSIL_PDMA_XY_PKT(0xc406),
+ /* PDMA_MAIN2 - MCASP0-2 */
+ PSIL_PDMA_MCASP(0xc500),
+ PSIL_PDMA_MCASP(0xc501),
+ PSIL_PDMA_MCASP(0xc502),
+ /* CPSW3G */
+ PSIL_ETHERNET(0xc600, 19, 19, 8),
+ PSIL_ETHERNET(0xc601, 20, 27, 8),
+ PSIL_ETHERNET(0xc602, 21, 35, 8),
+ PSIL_ETHERNET(0xc603, 22, 43, 8),
+ PSIL_ETHERNET(0xc604, 23, 51, 8),
+ PSIL_ETHERNET(0xc605, 24, 59, 8),
+ PSIL_ETHERNET(0xc606, 25, 67, 8),
+ PSIL_ETHERNET(0xc607, 26, 75, 8),
+};
+
+struct psil_ep_map am62_ep_map = {
+ .name = "am62",
+ .src = am62_src_ep_map,
+ .src_count = ARRAY_SIZE(am62_src_ep_map),
+ .dst = am62_dst_ep_map,
+ .dst_count = ARRAY_SIZE(am62_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h
index e51e179cdb56..74fa9ec02968 100644
--- a/drivers/dma/ti/k3-psil-priv.h
+++ b/drivers/dma/ti/k3-psil-priv.h
@@ -42,5 +42,6 @@ extern struct psil_ep_map j721e_ep_map;
extern struct psil_ep_map j7200_ep_map;
extern struct psil_ep_map am64_ep_map;
extern struct psil_ep_map j721s2_ep_map;
+extern struct psil_ep_map am62_ep_map;
#endif /* K3_PSIL_PRIV_H_ */
diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c
index 8867b4bd0c51..761a384093d2 100644
--- a/drivers/dma/ti/k3-psil.c
+++ b/drivers/dma/ti/k3-psil.c
@@ -22,6 +22,7 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "J7200", .data = &j7200_ep_map },
{ .family = "AM64X", .data = &am64_ep_map },
{ .family = "J721S2", .data = &j721s2_ep_map },
+ { .family = "AM62X", .data = &am62_ep_map },
{ /* sentinel */ }
};
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index d2d4cbe63e44..2f0d2c68c93c 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -4375,6 +4375,7 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "J7200", .data = &j7200_soc_data },
{ .family = "AM64X", .data = &am64_soc_data },
{ .family = "J721S2", .data = &j721e_soc_data},
+ { .family = "AM62X", .data = &am64_soc_data },
{ /* sentinel */ }
};
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 7cb577e6587b..8e52a0dc1f78 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1442,7 +1442,7 @@ static int omap_dma_pause(struct dma_chan *chan)
* A source-synchronised channel is one where the fetching of data is
* under control of the device. In other words, a device-to-memory
* transfer. So, a destination-synchronised channel (which would be a
- * memory-to-device transfer) undergoes an abort if the the CCR_ENABLE
+ * memory-to-device transfer) undergoes an abort if the CCR_ENABLE
* bit is cleared.
* From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel
* aborts immediately after completion of current read/write