diff options
Diffstat (limited to 'drivers/dma/xilinx/xilinx_dma.c')
-rw-r--r-- | drivers/dma/xilinx/xilinx_dma.c | 649 |
1 files changed, 510 insertions, 139 deletions
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 5d56f1e4d332..a9c5d5cc9f2b 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -25,6 +25,12 @@ * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory * Access (DMA) between a memory-mapped source address and a memory-mapped * destination address. + * + * The AXI Multichannel Direct Memory Access (AXI MCDMA) core is a soft + * Xilinx IP that provides high-bandwidth direct memory access between + * memory and AXI4-Stream target peripherals. It provides scatter gather + * (SG) interface with multiple channels independent configuration support. + * */ #include <linux/bitops.h> @@ -173,18 +179,6 @@ #define XILINX_DMA_NUM_DESCS 255 #define XILINX_DMA_NUM_APP_WORDS 5 -/* Multi-Channel DMA Descriptor offsets*/ -#define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20) -#define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20) - -/* Multi-Channel DMA Masks/Shifts */ -#define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0) -#define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0) -#define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19) -#define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0) -#define XILINX_DMA_BD_STRIDE_SHIFT 0 -#define XILINX_DMA_BD_VSIZE_SHIFT 19 - /* AXI CDMA Specific Registers/Offsets */ #define XILINX_CDMA_REG_SRCADDR 0x18 #define XILINX_CDMA_REG_DSTADDR 0x20 @@ -194,6 +188,31 @@ #define xilinx_prep_dma_addr_t(addr) \ ((dma_addr_t)((u64)addr##_##msb << 32 | (addr))) + +/* AXI MCDMA Specific Registers/Offsets */ +#define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000 +#define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500 +#define XILINX_MCDMA_CHEN_OFFSET 0x0008 +#define XILINX_MCDMA_CH_ERR_OFFSET 0x0010 +#define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020 +#define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028 +#define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40) +#define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40) +#define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40) +#define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40) + +/* AXI MCDMA Specific Masks/Shifts */ +#define XILINX_MCDMA_COALESCE_SHIFT 16 +#define XILINX_MCDMA_COALESCE_MAX 24 +#define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5) +#define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16) +#define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0) +#define XILINX_MCDMA_IRQ_IOC_MASK BIT(5) +#define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6) +#define XILINX_MCDMA_IRQ_ERR_MASK BIT(7) +#define XILINX_MCDMA_BD_EOP BIT(30) +#define XILINX_MCDMA_BD_SOP BIT(31) + /** * struct xilinx_vdma_desc_hw - Hardware Descriptor * @next_desc: Next Descriptor Pointer @0x00 @@ -221,8 +240,8 @@ struct xilinx_vdma_desc_hw { * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 * @buf_addr: Buffer address @0x08 * @buf_addr_msb: MSB of Buffer address @0x0C - * @mcdma_control: Control field for mcdma @0x10 - * @vsize_stride: Vsize and Stride field for mcdma @0x14 + * @reserved1: Reserved @0x10 + * @reserved2: Reserved @0x14 * @control: Control field @0x18 * @status: Status field @0x1C * @app: APP Fields @0x20 - 0x30 @@ -232,14 +251,38 @@ struct xilinx_axidma_desc_hw { u32 next_desc_msb; u32 buf_addr; u32 buf_addr_msb; - u32 mcdma_control; - u32 vsize_stride; + u32 reserved1; + u32 reserved2; u32 control; u32 status; u32 app[XILINX_DMA_NUM_APP_WORDS]; } __aligned(64); /** + * struct xilinx_aximcdma_desc_hw - Hardware Descriptor for AXI MCDMA + * @next_desc: Next Descriptor Pointer @0x00 + * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 + * @buf_addr: Buffer address @0x08 + * @buf_addr_msb: MSB of Buffer address @0x0C + * @rsvd: Reserved field @0x10 + * @control: Control Information field @0x14 + * @status: Status field @0x18 + * @sideband_status: Status of sideband signals @0x1C + * @app: APP Fields @0x20 - 0x30 + */ +struct xilinx_aximcdma_desc_hw { + u32 next_desc; + u32 next_desc_msb; + u32 buf_addr; + u32 buf_addr_msb; + u32 rsvd; + u32 control; + u32 status; + u32 sideband_status; + u32 app[XILINX_DMA_NUM_APP_WORDS]; +} __aligned(64); + +/** * struct xilinx_cdma_desc_hw - Hardware Descriptor * @next_desc: Next Descriptor Pointer @0x00 * @next_desc_msb: Next Descriptor Pointer MSB @0x04 @@ -286,6 +329,18 @@ struct xilinx_axidma_tx_segment { } __aligned(64); /** + * struct xilinx_aximcdma_tx_segment - Descriptor segment + * @hw: Hardware descriptor + * @node: Node in the descriptor segments list + * @phys: Physical address of segment + */ +struct xilinx_aximcdma_tx_segment { + struct xilinx_aximcdma_desc_hw hw; + struct list_head node; + dma_addr_t phys; +} __aligned(64); + +/** * struct xilinx_cdma_tx_segment - Descriptor segment * @hw: Hardware descriptor * @node: Node in the descriptor segments list @@ -303,12 +358,16 @@ struct xilinx_cdma_tx_segment { * @segments: TX segments list * @node: Node in the channel descriptors list * @cyclic: Check for cyclic transfers. + * @err: Whether the descriptor has an error. + * @residue: Residue of the completed descriptor */ struct xilinx_dma_tx_descriptor { struct dma_async_tx_descriptor async_tx; struct list_head segments; struct list_head node; bool cyclic; + bool err; + u32 residue; }; /** @@ -339,8 +398,8 @@ struct xilinx_dma_tx_descriptor { * @desc_pendingcount: Descriptor pending count * @ext_addr: Indicates 64 bit addressing is supported by dma channel * @desc_submitcount: Descriptor h/w submitted count - * @residue: Residue for AXI DMA * @seg_v: Statically allocated segments base + * @seg_mv: Statically allocated segments base for MCDMA * @seg_p: Physical allocated segments base * @cyclic_seg_v: Statically allocated segment base for cyclic transfers * @cyclic_seg_p: Physical allocated segments base for cyclic dma @@ -376,8 +435,8 @@ struct xilinx_dma_chan { u32 desc_pendingcount; bool ext_addr; u32 desc_submitcount; - u32 residue; struct xilinx_axidma_tx_segment *seg_v; + struct xilinx_aximcdma_tx_segment *seg_mv; dma_addr_t seg_p; struct xilinx_axidma_tx_segment *cyclic_seg_v; dma_addr_t cyclic_seg_p; @@ -393,12 +452,14 @@ struct xilinx_dma_chan { * @XDMA_TYPE_AXIDMA: Axi dma ip. * @XDMA_TYPE_CDMA: Axi cdma ip. * @XDMA_TYPE_VDMA: Axi vdma ip. + * @XDMA_TYPE_AXIMCDMA: Axi MCDMA ip. * */ enum xdma_ip_type { XDMA_TYPE_AXIDMA = 0, XDMA_TYPE_CDMA, XDMA_TYPE_VDMA, + XDMA_TYPE_AXIMCDMA }; struct xilinx_dma_config { @@ -406,6 +467,7 @@ struct xilinx_dma_config { int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk, struct clk **tx_clk, struct clk **txs_clk, struct clk **rx_clk, struct clk **rxs_clk); + irqreturn_t (*irq_handler)(int irq, void *data); }; /** @@ -414,7 +476,6 @@ struct xilinx_dma_config { * @dev: Device Structure * @common: DMA device structure * @chan: Driver specific DMA channel - * @mcdma: Specifies whether Multi-Channel is present or not * @flush_on_fsync: Flush on frame sync * @ext_addr: Indicates 64 bit addressing is supported by dma device * @pdev: Platform device structure pointer @@ -427,13 +488,13 @@ struct xilinx_dma_config { * @nr_channels: Number of channels DMA device supports * @chan_id: DMA channel identifier * @max_buffer_len: Max buffer length + * @s2mm_index: S2MM channel index */ struct xilinx_dma_device { void __iomem *regs; struct device *dev; struct dma_device common; struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; - bool mcdma; u32 flush_on_fsync; bool ext_addr; struct platform_device *pdev; @@ -446,6 +507,7 @@ struct xilinx_dma_device { u32 nr_channels; u32 chan_id; u32 max_buffer_len; + u32 s2mm_index; }; /* Macros */ @@ -546,6 +608,18 @@ static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan, } } +static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan, + struct xilinx_aximcdma_desc_hw *hw, + dma_addr_t buf_addr, size_t sg_used) +{ + if (chan->ext_addr) { + hw->buf_addr = lower_32_bits(buf_addr + sg_used); + hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used); + } else { + hw->buf_addr = buf_addr + sg_used; + } +} + /* ----------------------------------------------------------------------------- * Descriptors and segments alloc and free */ @@ -613,6 +687,33 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) } spin_unlock_irqrestore(&chan->lock, flags); + if (!segment) + dev_dbg(chan->dev, "Could not find free tx segment\n"); + + return segment; +} + +/** + * xilinx_aximcdma_alloc_tx_segment - Allocate transaction segment + * @chan: Driver specific DMA channel + * + * Return: The allocated segment on success and NULL on failure. + */ +static struct xilinx_aximcdma_tx_segment * +xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan) +{ + struct xilinx_aximcdma_tx_segment *segment = NULL; + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + if (!list_empty(&chan->free_seg_list)) { + segment = list_first_entry(&chan->free_seg_list, + struct xilinx_aximcdma_tx_segment, + node); + list_del(&segment->node); + } + spin_unlock_irqrestore(&chan->lock, flags); + return segment; } @@ -627,6 +728,17 @@ static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw) hw->next_desc_msb = next_desc_msb; } +static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw) +{ + u32 next_desc = hw->next_desc; + u32 next_desc_msb = hw->next_desc_msb; + + memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw)); + + hw->next_desc = next_desc; + hw->next_desc_msb = next_desc_msb; +} + /** * xilinx_dma_free_tx_segment - Free transaction segment * @chan: Driver specific DMA channel @@ -641,6 +753,20 @@ static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan, } /** + * xilinx_mcdma_free_tx_segment - Free transaction segment + * @chan: Driver specific DMA channel + * @segment: DMA transaction segment + */ +static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan, + struct xilinx_aximcdma_tx_segment * + segment) +{ + xilinx_mcdma_clean_hw_desc(&segment->hw); + + list_add_tail(&segment->node, &chan->free_seg_list); +} + +/** * xilinx_cdma_free_tx_segment - Free transaction segment * @chan: Driver specific DMA channel * @segment: DMA transaction segment @@ -694,6 +820,7 @@ xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan, struct xilinx_vdma_tx_segment *segment, *next; struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next; struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next; + struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next; if (!desc) return; @@ -709,12 +836,18 @@ xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan, list_del(&cdma_segment->node); xilinx_cdma_free_tx_segment(chan, cdma_segment); } - } else { + } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { list_for_each_entry_safe(axidma_segment, axidma_next, &desc->segments, node) { list_del(&axidma_segment->node); xilinx_dma_free_tx_segment(chan, axidma_segment); } + } else { + list_for_each_entry_safe(aximcdma_segment, aximcdma_next, + &desc->segments, node) { + list_del(&aximcdma_segment->node); + xilinx_mcdma_free_tx_segment(chan, aximcdma_segment); + } } kfree(desc); @@ -783,10 +916,61 @@ static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) chan->cyclic_seg_v, chan->cyclic_seg_p); } - if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) { + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { + spin_lock_irqsave(&chan->lock, flags); + INIT_LIST_HEAD(&chan->free_seg_list); + spin_unlock_irqrestore(&chan->lock, flags); + + /* Free memory that is allocated for BD */ + dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) * + XILINX_DMA_NUM_DESCS, chan->seg_mv, + chan->seg_p); + } + + if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA && + chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) { dma_pool_destroy(chan->desc_pool); chan->desc_pool = NULL; } + +} + +/** + * xilinx_dma_get_residue - Compute residue for a given descriptor + * @chan: Driver specific dma channel + * @desc: dma transaction descriptor + * + * Return: The number of residue bytes for the descriptor. + */ +static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan, + struct xilinx_dma_tx_descriptor *desc) +{ + struct xilinx_cdma_tx_segment *cdma_seg; + struct xilinx_axidma_tx_segment *axidma_seg; + struct xilinx_cdma_desc_hw *cdma_hw; + struct xilinx_axidma_desc_hw *axidma_hw; + struct list_head *entry; + u32 residue = 0; + + list_for_each(entry, &desc->segments) { + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { + cdma_seg = list_entry(entry, + struct xilinx_cdma_tx_segment, + node); + cdma_hw = &cdma_seg->hw; + residue += (cdma_hw->control - cdma_hw->status) & + chan->xdev->max_buffer_len; + } else { + axidma_seg = list_entry(entry, + struct xilinx_axidma_tx_segment, + node); + axidma_hw = &axidma_seg->hw; + residue += (axidma_hw->control - axidma_hw->status) & + chan->xdev->max_buffer_len; + } + } + + return residue; } /** @@ -823,7 +1007,7 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) spin_lock_irqsave(&chan->lock, flags); list_for_each_entry_safe(desc, next, &chan->done_list, node) { - struct dmaengine_desc_callback cb; + struct dmaengine_result result; if (desc->cyclic) { xilinx_dma_chan_handle_cyclic(chan, desc, &flags); @@ -833,14 +1017,22 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) /* Remove from the list of running transactions */ list_del(&desc->node); - /* Run the link descriptor callback function */ - dmaengine_desc_get_callback(&desc->async_tx, &cb); - if (dmaengine_desc_callback_valid(&cb)) { - spin_unlock_irqrestore(&chan->lock, flags); - dmaengine_desc_callback_invoke(&cb, NULL); - spin_lock_irqsave(&chan->lock, flags); + if (unlikely(desc->err)) { + if (chan->direction == DMA_DEV_TO_MEM) + result.result = DMA_TRANS_READ_FAILED; + else + result.result = DMA_TRANS_WRITE_FAILED; + } else { + result.result = DMA_TRANS_NOERROR; } + result.residue = desc->residue; + + /* Run the link descriptor callback function */ + spin_unlock_irqrestore(&chan->lock, flags); + dmaengine_desc_get_callback_invoke(&desc->async_tx, &result); + spin_lock_irqsave(&chan->lock, flags); + /* Run any dependencies, then free the descriptor */ dma_run_dependencies(&desc->async_tx); xilinx_dma_free_tx_descriptor(chan, desc); @@ -922,6 +1114,30 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) list_add_tail(&chan->seg_v[i].node, &chan->free_seg_list); } + } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { + /* Allocate the buffer descriptors. */ + chan->seg_mv = dma_alloc_coherent(chan->dev, + sizeof(*chan->seg_mv) * + XILINX_DMA_NUM_DESCS, + &chan->seg_p, GFP_KERNEL); + if (!chan->seg_mv) { + dev_err(chan->dev, + "unable to allocate channel %d descriptors\n", + chan->id); + return -ENOMEM; + } + for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) { + chan->seg_mv[i].hw.next_desc = + lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) * + ((i + 1) % XILINX_DMA_NUM_DESCS)); + chan->seg_mv[i].hw.next_desc_msb = + upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) * + ((i + 1) % XILINX_DMA_NUM_DESCS)); + chan->seg_mv[i].phys = chan->seg_p + + sizeof(*chan->seg_v) * i; + list_add_tail(&chan->seg_mv[i].node, + &chan->free_seg_list); + } } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool", chan->dev, @@ -937,7 +1153,8 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) } if (!chan->desc_pool && - (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) { + ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) && + chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) { dev_err(chan->dev, "unable to allocate channel %d descriptor pool\n", chan->id); @@ -1003,8 +1220,6 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, { struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); struct xilinx_dma_tx_descriptor *desc; - struct xilinx_axidma_tx_segment *segment; - struct xilinx_axidma_desc_hw *hw; enum dma_status ret; unsigned long flags; u32 residue = 0; @@ -1013,23 +1228,20 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, if (ret == DMA_COMPLETE || !txstate) return ret; - if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { - spin_lock_irqsave(&chan->lock, flags); + spin_lock_irqsave(&chan->lock, flags); - desc = list_last_entry(&chan->active_list, - struct xilinx_dma_tx_descriptor, node); - if (chan->has_sg) { - list_for_each_entry(segment, &desc->segments, node) { - hw = &segment->hw; - residue += (hw->control - hw->status) & - chan->xdev->max_buffer_len; - } - } - spin_unlock_irqrestore(&chan->lock, flags); + desc = list_last_entry(&chan->active_list, + struct xilinx_dma_tx_descriptor, node); + /* + * VDMA and simple mode do not support residue reporting, so the + * residue field will always be 0. + */ + if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA) + residue = xilinx_dma_get_residue(chan, desc); - chan->residue = residue; - dma_set_residue(txstate, chan->residue); - } + spin_unlock_irqrestore(&chan->lock, flags); + + dma_set_residue(txstate, residue); return ret; } @@ -1301,53 +1513,23 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); } - if (chan->has_sg && !chan->xdev->mcdma) + if (chan->has_sg) xilinx_write(chan, XILINX_DMA_REG_CURDESC, head_desc->async_tx.phys); - if (chan->has_sg && chan->xdev->mcdma) { - if (chan->direction == DMA_MEM_TO_DEV) { - dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, - head_desc->async_tx.phys); - } else { - if (!chan->tdest) { - dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, - head_desc->async_tx.phys); - } else { - dma_ctrl_write(chan, - XILINX_DMA_MCRX_CDESC(chan->tdest), - head_desc->async_tx.phys); - } - } - } - xilinx_dma_start(chan); if (chan->err) return; /* Start the transfer */ - if (chan->has_sg && !chan->xdev->mcdma) { + if (chan->has_sg) { if (chan->cyclic) xilinx_write(chan, XILINX_DMA_REG_TAILDESC, chan->cyclic_seg_v->phys); else xilinx_write(chan, XILINX_DMA_REG_TAILDESC, tail_segment->phys); - } else if (chan->has_sg && chan->xdev->mcdma) { - if (chan->direction == DMA_MEM_TO_DEV) { - dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, - tail_segment->phys); - } else { - if (!chan->tdest) { - dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, - tail_segment->phys); - } else { - dma_ctrl_write(chan, - XILINX_DMA_MCRX_TDESC(chan->tdest), - tail_segment->phys); - } - } } else { struct xilinx_axidma_tx_segment *segment; struct xilinx_axidma_desc_hw *hw; @@ -1371,6 +1553,76 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) } /** + * xilinx_mcdma_start_transfer - Starts MCDMA transfer + * @chan: Driver specific channel struct pointer + */ +static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan) +{ + struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; + struct xilinx_axidma_tx_segment *tail_segment; + u32 reg; + + /* + * lock has been held by calling functions, so we don't need it + * to take it here again. + */ + + if (chan->err) + return; + + if (!chan->idle) + return; + + if (list_empty(&chan->pending_list)) + return; + + head_desc = list_first_entry(&chan->pending_list, + struct xilinx_dma_tx_descriptor, node); + tail_desc = list_last_entry(&chan->pending_list, + struct xilinx_dma_tx_descriptor, node); + tail_segment = list_last_entry(&tail_desc->segments, + struct xilinx_axidma_tx_segment, node); + + reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest)); + + if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) { + reg &= ~XILINX_MCDMA_COALESCE_MASK; + reg |= chan->desc_pendingcount << + XILINX_MCDMA_COALESCE_SHIFT; + } + + reg |= XILINX_MCDMA_IRQ_ALL_MASK; + dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg); + + /* Program current descriptor */ + xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest), + head_desc->async_tx.phys); + + /* Program channel enable register */ + reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET); + reg |= BIT(chan->tdest); + dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg); + + /* Start the fetch of BDs for the channel */ + reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest)); + reg |= XILINX_MCDMA_CR_RUNSTOP_MASK; + dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg); + + xilinx_dma_start(chan); + + if (chan->err) + return; + + /* Start the transfer */ + xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest), + tail_segment->phys); + + list_splice_tail_init(&chan->pending_list, &chan->active_list); + chan->desc_pendingcount = 0; + chan->idle = false; +} + +/** * xilinx_dma_issue_pending - Issue pending transactions * @dchan: DMA channel */ @@ -1399,6 +1651,13 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) return; list_for_each_entry_safe(desc, next, &chan->active_list, node) { + if (chan->has_sg && chan->xdev->dma_config->dmatype != + XDMA_TYPE_VDMA) + desc->residue = xilinx_dma_get_residue(chan, desc); + else + desc->residue = 0; + desc->err = chan->err; + list_del(&desc->node); if (!desc->cyclic) dma_cookie_complete(&desc->async_tx); @@ -1433,6 +1692,7 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan) chan->err = false; chan->idle = true; + chan->desc_pendingcount = 0; chan->desc_submitcount = 0; return err; @@ -1461,6 +1721,74 @@ static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan) } /** + * xilinx_mcdma_irq_handler - MCDMA Interrupt handler + * @irq: IRQ number + * @data: Pointer to the Xilinx MCDMA channel structure + * + * Return: IRQ_HANDLED/IRQ_NONE + */ +static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data) +{ + struct xilinx_dma_chan *chan = data; + u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id; + + if (chan->direction == DMA_DEV_TO_MEM) + ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET; + else + ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET; + + /* Read the channel id raising the interrupt*/ + chan_sermask = dma_ctrl_read(chan, ser_offset); + chan_id = ffs(chan_sermask); + + if (!chan_id) + return IRQ_NONE; + + if (chan->direction == DMA_DEV_TO_MEM) + chan_offset = chan->xdev->s2mm_index; + + chan_offset = chan_offset + (chan_id - 1); + chan = chan->xdev->chan[chan_offset]; + /* Read the status and ack the interrupts. */ + status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest)); + if (!(status & XILINX_MCDMA_IRQ_ALL_MASK)) + return IRQ_NONE; + + dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest), + status & XILINX_MCDMA_IRQ_ALL_MASK); + + if (status & XILINX_MCDMA_IRQ_ERR_MASK) { + dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n", + chan, + dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET), + dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET + (chan->tdest)), + dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET + (chan->tdest))); + chan->err = true; + } + + if (status & XILINX_MCDMA_IRQ_DELAY_MASK) { + /* + * Device takes too long to do the transfer when user requires + * responsiveness. + */ + dev_dbg(chan->dev, "Inter-packet latency too long\n"); + } + + if (status & XILINX_MCDMA_IRQ_IOC_MASK) { + spin_lock(&chan->lock); + xilinx_dma_complete_descriptor(chan); + chan->idle = true; + chan->start_transfer(chan); + spin_unlock(&chan->lock); + } + + tasklet_schedule(&chan->tasklet); + return IRQ_HANDLED; +} + +/** * xilinx_dma_irq_handler - DMA Interrupt handler * @irq: IRQ number * @data: Pointer to the Xilinx DMA channel structure @@ -1967,31 +2295,32 @@ error: } /** - * xilinx_dma_prep_interleaved - prepare a descriptor for a - * DMA_SLAVE transaction + * xilinx_mcdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction * @dchan: DMA channel - * @xt: Interleaved template pointer + * @sgl: scatterlist to transfer to/from + * @sg_len: number of entries in @scatterlist + * @direction: DMA direction * @flags: transfer ack flags + * @context: APP words of the descriptor * * Return: Async transaction descriptor on success and NULL on failure */ static struct dma_async_tx_descriptor * -xilinx_dma_prep_interleaved(struct dma_chan *dchan, - struct dma_interleaved_template *xt, - unsigned long flags) +xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, + unsigned int sg_len, + enum dma_transfer_direction direction, + unsigned long flags, void *context) { struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); struct xilinx_dma_tx_descriptor *desc; - struct xilinx_axidma_tx_segment *segment; - struct xilinx_axidma_desc_hw *hw; - - if (!is_slave_direction(xt->dir)) - return NULL; - - if (!xt->numf || !xt->sgl[0].size) - return NULL; + struct xilinx_aximcdma_tx_segment *segment = NULL; + u32 *app_w = (u32 *)context; + struct scatterlist *sg; + size_t copy; + size_t sg_used; + unsigned int i; - if (xt->frame_size != 1) + if (!is_slave_direction(direction)) return NULL; /* Allocate a transaction descriptor. */ @@ -1999,54 +2328,67 @@ xilinx_dma_prep_interleaved(struct dma_chan *dchan, if (!desc) return NULL; - chan->direction = xt->dir; dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); desc->async_tx.tx_submit = xilinx_dma_tx_submit; - /* Get a free segment */ - segment = xilinx_axidma_alloc_tx_segment(chan); - if (!segment) - goto error; + /* Build transactions using information in the scatter gather list */ + for_each_sg(sgl, sg, sg_len, i) { + sg_used = 0; - hw = &segment->hw; + /* Loop until the entire scatterlist entry is used */ + while (sg_used < sg_dma_len(sg)) { + struct xilinx_aximcdma_desc_hw *hw; - /* Fill in the descriptor */ - if (xt->dir != DMA_MEM_TO_DEV) - hw->buf_addr = xt->dst_start; - else - hw->buf_addr = xt->src_start; + /* Get a free segment */ + segment = xilinx_aximcdma_alloc_tx_segment(chan); + if (!segment) + goto error; - hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK; - hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) & - XILINX_DMA_BD_VSIZE_MASK; - hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) & - XILINX_DMA_BD_STRIDE_MASK; - hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK; + /* + * Calculate the maximum number of bytes to transfer, + * making sure it is less than the hw limit + */ + copy = min_t(size_t, sg_dma_len(sg) - sg_used, + chan->xdev->max_buffer_len); + hw = &segment->hw; - /* - * Insert the segment into the descriptor segments - * list. - */ - list_add_tail(&segment->node, &desc->segments); + /* Fill in the descriptor */ + xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg), + sg_used); + hw->control = copy; + if (chan->direction == DMA_MEM_TO_DEV && app_w) { + memcpy(hw->app, app_w, sizeof(u32) * + XILINX_DMA_NUM_APP_WORDS); + } + + sg_used += copy; + /* + * Insert the segment into the descriptor segments + * list. + */ + list_add_tail(&segment->node, &desc->segments); + } + } segment = list_first_entry(&desc->segments, - struct xilinx_axidma_tx_segment, node); + struct xilinx_aximcdma_tx_segment, node); desc->async_tx.phys = segment->phys; /* For the last DMA_MEM_TO_DEV transfer, set EOP */ - if (xt->dir == DMA_MEM_TO_DEV) { - segment->hw.control |= XILINX_DMA_BD_SOP; + if (chan->direction == DMA_MEM_TO_DEV) { + segment->hw.control |= XILINX_MCDMA_BD_SOP; segment = list_last_entry(&desc->segments, - struct xilinx_axidma_tx_segment, + struct xilinx_aximcdma_tx_segment, node); - segment->hw.control |= XILINX_DMA_BD_EOP; + segment->hw.control |= XILINX_MCDMA_BD_EOP; } return &desc->async_tx; error: xilinx_dma_free_tx_descriptor(chan, desc); + return NULL; } @@ -2194,7 +2536,9 @@ static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); if (IS_ERR(*axi_clk)) { err = PTR_ERR(*axi_clk); - dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); + if (err != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", + err); return err; } @@ -2259,14 +2603,18 @@ static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); if (IS_ERR(*axi_clk)) { err = PTR_ERR(*axi_clk); - dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err); + if (err != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", + err); return err; } *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk"); if (IS_ERR(*dev_clk)) { err = PTR_ERR(*dev_clk); - dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err); + if (err != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", + err); return err; } @@ -2299,7 +2647,9 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); if (IS_ERR(*axi_clk)) { err = PTR_ERR(*axi_clk); - dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); + if (err != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", + err); return err; } @@ -2321,7 +2671,8 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, err = clk_prepare_enable(*axi_clk); if (err) { - dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); + dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", + err); return err; } @@ -2454,6 +2805,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, "xlnx,axi-dma-s2mm-channel")) { chan->direction = DMA_DEV_TO_MEM; chan->id = chan_id; + xdev->s2mm_index = xdev->nr_channels; chan->tdest = chan_id - xdev->nr_channels; chan->has_vflip = of_property_read_bool(node, "xlnx,enable-vert-flip"); @@ -2463,7 +2815,11 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, XILINX_VDMA_ENABLE_VERTICAL_FLIP; } - chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) + chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET; + else + chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; + if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; chan->config.park = 1; @@ -2478,9 +2834,9 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, } /* Request the interrupt */ - chan->irq = irq_of_parse_and_map(node, 0); - err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED, - "xilinx-dma-controller", chan); + chan->irq = irq_of_parse_and_map(node, chan->tdest); + err = request_irq(chan->irq, xdev->dma_config->irq_handler, + IRQF_SHARED, "xilinx-dma-controller", chan); if (err) { dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); return err; @@ -2489,6 +2845,9 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { chan->start_transfer = xilinx_dma_start_transfer; chan->stop_transfer = xilinx_dma_stop_transfer; + } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { + chan->start_transfer = xilinx_mcdma_start_transfer; + chan->stop_transfer = xilinx_dma_stop_transfer; } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { chan->start_transfer = xilinx_cdma_start_transfer; chan->stop_transfer = xilinx_cdma_stop_transfer; @@ -2545,7 +2904,7 @@ static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, int ret, i, nr_channels = 1; ret = of_property_read_u32(node, "dma-channels", &nr_channels); - if ((ret < 0) && xdev->mcdma) + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0) dev_warn(xdev->dev, "missing dma-channels property\n"); for (i = 0; i < nr_channels; i++) @@ -2578,22 +2937,31 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, static const struct xilinx_dma_config axidma_config = { .dmatype = XDMA_TYPE_AXIDMA, .clk_init = axidma_clk_init, + .irq_handler = xilinx_dma_irq_handler, }; +static const struct xilinx_dma_config aximcdma_config = { + .dmatype = XDMA_TYPE_AXIMCDMA, + .clk_init = axidma_clk_init, + .irq_handler = xilinx_mcdma_irq_handler, +}; static const struct xilinx_dma_config axicdma_config = { .dmatype = XDMA_TYPE_CDMA, .clk_init = axicdma_clk_init, + .irq_handler = xilinx_dma_irq_handler, }; static const struct xilinx_dma_config axivdma_config = { .dmatype = XDMA_TYPE_VDMA, .clk_init = axivdma_clk_init, + .irq_handler = xilinx_dma_irq_handler, }; static const struct of_device_id xilinx_dma_of_ids[] = { { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config }, { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config }, { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config }, + { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config }, {} }; MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids); @@ -2612,7 +2980,6 @@ static int xilinx_dma_probe(struct platform_device *pdev) struct device_node *node = pdev->dev.of_node; struct xilinx_dma_device *xdev; struct device_node *child, *np = pdev->dev.of_node; - struct resource *io; u32 num_frames, addr_width, len_width; int i, err; @@ -2638,16 +3005,15 @@ static int xilinx_dma_probe(struct platform_device *pdev) return err; /* Request and map I/O memory */ - io = platform_get_resource(pdev, IORESOURCE_MEM, 0); - xdev->regs = devm_ioremap_resource(&pdev->dev, io); + xdev->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(xdev->regs)) return PTR_ERR(xdev->regs); /* Retrieve the DMA engine properties from the device tree */ xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); - if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { - xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA || + xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { if (!of_property_read_u32(node, "xlnx,sg-length-width", &len_width)) { if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN || @@ -2712,14 +3078,17 @@ static int xilinx_dma_probe(struct platform_device *pdev) xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; xdev->common.device_prep_dma_cyclic = xilinx_dma_prep_dma_cyclic; - xdev->common.device_prep_interleaved_dma = - xilinx_dma_prep_interleaved; - /* Residue calculation is supported by only AXI DMA */ + /* Residue calculation is supported by only AXI DMA and CDMA */ xdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask); xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; + /* Residue calculation is supported by only AXI DMA and CDMA */ + xdev->common.residue_granularity = + DMA_RESIDUE_GRANULARITY_SEGMENT; + } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { + xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg; } else { xdev->common.device_prep_interleaved_dma = xilinx_vdma_dma_prep_interleaved; @@ -2755,6 +3124,8 @@ static int xilinx_dma_probe(struct platform_device *pdev) dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n"); else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n"); + else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) + dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n"); else dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); |