From 7df95299b94a63ec67a6389fc02dc25019a80ee8 Mon Sep 17 00:00:00 2001 From: Matt Sickler Date: Mon, 22 Apr 2019 22:05:58 +0000 Subject: staging: kpc2000: Add DMA driver Add Daktronics DMA driver. I've added the SPDX license identifiers, Kconfig entry, and cleaned up as many of the warnings as I could. The AIO support code will be removed in a future patch. Signed-off-by: Matt Sickler Signed-off-by: Greg Kroah-Hartman --- drivers/staging/kpc2000/Kconfig | 11 + drivers/staging/kpc2000/Makefile | 1 + drivers/staging/kpc2000/kpc_dma/Makefile | 6 + drivers/staging/kpc2000/kpc_dma/dma.c | 264 ++++++++++++++ drivers/staging/kpc2000/kpc_dma/fileops.c | 420 +++++++++++++++++++++++ drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c | 248 +++++++++++++ drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h | 220 ++++++++++++ drivers/staging/kpc2000/kpc_dma/uapi.h | 11 + 8 files changed, 1181 insertions(+) create mode 100644 drivers/staging/kpc2000/kpc_dma/Makefile create mode 100644 drivers/staging/kpc2000/kpc_dma/dma.c create mode 100644 drivers/staging/kpc2000/kpc_dma/fileops.c create mode 100644 drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c create mode 100644 drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h create mode 100644 drivers/staging/kpc2000/kpc_dma/uapi.h (limited to 'drivers/staging') diff --git a/drivers/staging/kpc2000/Kconfig b/drivers/staging/kpc2000/Kconfig index 926e770d6e0e..fb5922928f47 100644 --- a/drivers/staging/kpc2000/Kconfig +++ b/drivers/staging/kpc2000/Kconfig @@ -44,3 +44,14 @@ config KPC2000_I2C If unsure, say N. +config KPC2000_DMA + tristate "Daktronics KPC DMA controller" + depends on KPC2000 + help + Say Y here if you wish to support the Daktronics DMA controller. + + To compile this driver as a module, choose M here: the module + will be called kpc2000_dma + + If unsure, say N. + diff --git a/drivers/staging/kpc2000/Makefile b/drivers/staging/kpc2000/Makefile index 6fcb2ee7b27d..1e48e9df1329 100644 --- a/drivers/staging/kpc2000/Makefile +++ b/drivers/staging/kpc2000/Makefile @@ -3,3 +3,4 @@ obj-$(CONFIG_KPC2000) += kpc2000/ obj-$(CONFIG_KPC2000_I2C) += kpc_i2c/ obj-$(CONFIG_KPC2000_SPI) += kpc_spi/ +obj-$(CONFIG_KPC2000_DMA) += kpc_dma/ diff --git a/drivers/staging/kpc2000/kpc_dma/Makefile b/drivers/staging/kpc2000/kpc_dma/Makefile new file mode 100644 index 000000000000..fe5db532c8c8 --- /dev/null +++ b/drivers/staging/kpc2000/kpc_dma/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-m := kpc_dma.o +kpc_dma-objs += dma.o +kpc_dma-objs += fileops.o +kpc_dma-objs += kpc_dma_driver.o diff --git a/drivers/staging/kpc2000/kpc_dma/dma.c b/drivers/staging/kpc2000/kpc_dma/dma.c new file mode 100644 index 000000000000..6959bac11388 --- /dev/null +++ b/drivers/staging/kpc2000/kpc_dma/dma.c @@ -0,0 +1,264 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "kpc_dma_driver.h" + +/********** IRQ Handlers **********/ +static +irqreturn_t ndd_irq_handler(int irq, void *dev_id) +{ + struct kpc_dma_device *ldev = (struct kpc_dma_device*)dev_id; + + if ((GetEngineControl(ldev) & ENG_CTL_IRQ_ACTIVE) || (ldev->desc_completed->MyDMAAddr != GetEngineCompletePtr(ldev))) + schedule_work(&ldev->irq_work); + + return IRQ_HANDLED; +} + +static +void ndd_irq_worker(struct work_struct *ws) +{ + struct kpc_dma_descriptor *cur; + struct kpc_dma_device *eng = container_of(ws, struct kpc_dma_device, irq_work); + lock_engine(eng); + + if (GetEngineCompletePtr(eng) == 0) + goto out; + + if (eng->desc_completed->MyDMAAddr == GetEngineCompletePtr(eng)) + goto out; + + cur = eng->desc_completed; + do { + cur = cur->Next; + dev_dbg(&eng->pldev->dev, "Handling completed descriptor %p (acd = %p)\n", cur, cur->acd); + BUG_ON(cur == eng->desc_next); // Ordering failure. + + if (cur->DescControlFlags & DMA_DESC_CTL_SOP){ + eng->accumulated_bytes = 0; + eng->accumulated_flags = 0; + } + + eng->accumulated_bytes += cur->DescByteCount; + if (cur->DescStatusFlags & DMA_DESC_STS_ERROR) + eng->accumulated_flags |= ACD_FLAG_ENG_ACCUM_ERROR; + + if (cur->DescStatusFlags & DMA_DESC_STS_SHORT) + eng->accumulated_flags |= ACD_FLAG_ENG_ACCUM_SHORT; + + if (cur->DescControlFlags & DMA_DESC_CTL_EOP){ + if (cur->acd) + transfer_complete_cb(cur->acd, eng->accumulated_bytes, eng->accumulated_flags | ACD_FLAG_DONE); + } + + eng->desc_completed = cur; + } while (cur->MyDMAAddr != GetEngineCompletePtr(eng)); + + out: + SetClearEngineControl(eng, ENG_CTL_IRQ_ACTIVE, 0); + + unlock_engine(eng); +} + + +/********** DMA Engine Init/Teardown **********/ +void start_dma_engine(struct kpc_dma_device *eng) +{ + eng->desc_next = eng->desc_pool_first; + eng->desc_completed = eng->desc_pool_last; + + // Setup the engine pointer registers + SetEngineNextPtr(eng, eng->desc_pool_first); + SetEngineSWPtr(eng, eng->desc_pool_first); + ClearEngineCompletePtr(eng); + + WriteEngineControl(eng, ENG_CTL_DMA_ENABLE | ENG_CTL_IRQ_ENABLE); +} + +int setup_dma_engine(struct kpc_dma_device *eng, u32 desc_cnt) +{ + u32 caps; + struct kpc_dma_descriptor * cur; + struct kpc_dma_descriptor * next; + dma_addr_t next_handle; + dma_addr_t head_handle; + unsigned int i; + int rv; + dev_dbg(&eng->pldev->dev, "Setting up DMA engine [%p]\n", eng); + + caps = GetEngineCapabilities(eng); + + if (WARN(!(caps & ENG_CAP_PRESENT), "setup_dma_engine() called for DMA Engine at %p which isn't present in hardware!\n", eng)) + return -ENXIO; + + if (caps & ENG_CAP_DIRECTION){ + eng->dir = DMA_FROM_DEVICE; + } else { + eng->dir = DMA_TO_DEVICE; + } + + eng->desc_pool_cnt = desc_cnt; + eng->desc_pool = dma_pool_create("KPC DMA Descriptors", &eng->pldev->dev, sizeof(struct kpc_dma_descriptor), DMA_DESC_ALIGNMENT, 4096); + + eng->desc_pool_first = dma_pool_alloc(eng->desc_pool, GFP_KERNEL | GFP_DMA, &head_handle); + if (!eng->desc_pool_first){ + dev_err(&eng->pldev->dev, "setup_dma_engine: couldn't allocate desc_pool_first!\n"); + dma_pool_destroy(eng->desc_pool); + return -ENOMEM; + } + + eng->desc_pool_first->MyDMAAddr = head_handle; + clear_desc(eng->desc_pool_first); + + cur = eng->desc_pool_first; + for (i = 1 ; i < eng->desc_pool_cnt ; i++){ + next = dma_pool_alloc(eng->desc_pool, GFP_KERNEL | GFP_DMA, &next_handle); + if (next == NULL) + goto done_alloc; + + clear_desc(next); + next->MyDMAAddr = next_handle; + + cur->DescNextDescPtr = next_handle; + cur->Next = next; + cur = next; + } + + done_alloc: + // Link the last descriptor back to the first, so it's a circular linked list + cur->Next = eng->desc_pool_first; + cur->DescNextDescPtr = eng->desc_pool_first->MyDMAAddr; + + eng->desc_pool_last = cur; + eng->desc_completed = eng->desc_pool_last; + + // Setup work queue + INIT_WORK(&eng->irq_work, ndd_irq_worker); + + // Grab IRQ line + rv = request_irq(eng->irq, ndd_irq_handler, IRQF_SHARED, KP_DRIVER_NAME_DMA_CONTROLLER, eng); + if (rv){ + dev_err(&eng->pldev->dev, "setup_dma_engine: failed to request_irq: %d\n", rv); + return rv; + } + + // Turn on the engine! + start_dma_engine(eng); + unlock_engine(eng); + + return 0; +} + +void stop_dma_engine(struct kpc_dma_device *eng) +{ + unsigned long timeout; + dev_dbg(&eng->pldev->dev, "Destroying DMA engine [%p]\n", eng); + + // Disable the descriptor engine + WriteEngineControl(eng, 0); + + // Wait for descriptor engine to finish current operaion + timeout = jiffies + (HZ / 2); + while (GetEngineControl(eng) & ENG_CTL_DMA_RUNNING){ + if (time_after(jiffies, timeout)){ + dev_crit(&eng->pldev->dev, "DMA_RUNNING still asserted!\n"); + break; + } + } + + // Request a reset + WriteEngineControl(eng, ENG_CTL_DMA_RESET_REQUEST); + + // Wait for reset request to be processed + timeout = jiffies + (HZ / 2); + while (GetEngineControl(eng) & (ENG_CTL_DMA_RUNNING | ENG_CTL_DMA_RESET_REQUEST)){ + if (time_after(jiffies, timeout)){ + dev_crit(&eng->pldev->dev, "ENG_CTL_DMA_RESET_REQUEST still asserted!\n"); + break; + } + } + + // Request a reset + WriteEngineControl(eng, ENG_CTL_DMA_RESET); + + // And wait for reset to complete + timeout = jiffies + (HZ / 2); + while (GetEngineControl(eng) & ENG_CTL_DMA_RESET){ + if (time_after(jiffies, timeout)){ + dev_crit(&eng->pldev->dev, "DMA_RESET still asserted!\n"); + break; + } + } + + // Clear any persistent bits just to make sure there is no residue from the reset + SetClearEngineControl(eng, (ENG_CTL_IRQ_ACTIVE | ENG_CTL_DESC_COMPLETE | ENG_CTL_DESC_ALIGN_ERR | ENG_CTL_DESC_FETCH_ERR | ENG_CTL_SW_ABORT_ERR | ENG_CTL_DESC_CHAIN_END | ENG_CTL_DMA_WAITING_PERSIST), 0); + + // Reset performance counters + + // Completely disable the engine + WriteEngineControl(eng, 0); +} + +void destroy_dma_engine(struct kpc_dma_device *eng) +{ + struct kpc_dma_descriptor * cur; + dma_addr_t cur_handle; + unsigned int i; + + stop_dma_engine(eng); + + cur = eng->desc_pool_first; + cur_handle = eng->desc_pool_first->MyDMAAddr; + + for (i = 0 ; i < eng->desc_pool_cnt ; i++){ + struct kpc_dma_descriptor *next = cur->Next; + dma_addr_t next_handle = cur->DescNextDescPtr; + dma_pool_free(eng->desc_pool, cur, cur_handle); + cur_handle = next_handle; + cur = next; + } + + dma_pool_destroy(eng->desc_pool); + + free_irq(eng->irq, eng); +} + + + +/********** Helper Functions **********/ +int count_descriptors_available(struct kpc_dma_device *eng) +{ + u32 count = 0; + struct kpc_dma_descriptor *cur = eng->desc_next; + while (cur != eng->desc_completed){ + BUG_ON(cur == NULL); + count++; + cur = cur->Next; + } + return count; +} + +void clear_desc(struct kpc_dma_descriptor *desc) +{ + if (desc == NULL) + return; + desc->DescByteCount = 0; + desc->DescStatusErrorFlags = 0; + desc->DescStatusFlags = 0; + desc->DescUserControlLS = 0; + desc->DescUserControlMS = 0; + desc->DescCardAddrLS = 0; + desc->DescBufferByteCount = 0; + desc->DescCardAddrMS = 0; + desc->DescControlFlags = 0; + desc->DescSystemAddrLS = 0; + desc->DescSystemAddrMS = 0; + desc->acd = NULL; +} diff --git a/drivers/staging/kpc2000/kpc_dma/fileops.c b/drivers/staging/kpc2000/kpc_dma/fileops.c new file mode 100644 index 000000000000..5741d2b49a7d --- /dev/null +++ b/drivers/staging/kpc2000/kpc_dma/fileops.c @@ -0,0 +1,420 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#include +#include +#include +#include /* printk() */ +#include /* kmalloc() */ +#include /* everything... */ +#include /* error codes */ +#include /* size_t */ +#include +#include /* copy_*_user */ +#include /* aio stuff */ +#include +#include +#include "kpc_dma_driver.h" +#include "uapi.h" + +/********** Helper Functions **********/ +static inline +unsigned int count_pages(unsigned long iov_base, size_t iov_len) +{ + unsigned long first = (iov_base & PAGE_MASK) >> PAGE_SHIFT; + unsigned long last = ((iov_base+iov_len-1) & PAGE_MASK) >> PAGE_SHIFT; + return last - first + 1; +} + +static inline +unsigned int count_parts_for_sge(struct scatterlist *sg) +{ + unsigned int sg_length = sg_dma_len(sg); + sg_length += (0x80000-1); + return (sg_length / 0x80000); +} + +/********** Transfer Helpers **********/ +static +int kpc_dma_transfer(struct dev_private_data *priv, struct kiocb *kcb, unsigned long iov_base, size_t iov_len) +{ + unsigned int i = 0; + long rv = 0; + struct kpc_dma_device *ldev; + struct aio_cb_data *acd; + DECLARE_COMPLETION_ONSTACK(done); + u32 desc_needed = 0; + struct scatterlist *sg; + u32 num_descrs_avail; + struct kpc_dma_descriptor *desc; + unsigned int pcnt; + unsigned int p; + u64 card_addr; + u64 dma_addr; + u64 user_ctl; + + BUG_ON(priv == NULL); + ldev = priv->ldev; + BUG_ON(ldev == NULL); + + dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_transfer(priv = [%p], kcb = [%p], iov_base = [%p], iov_len = %ld) ldev = [%p]\n", priv, kcb, (void*)iov_base, iov_len, ldev); + + acd = (struct aio_cb_data *) kzalloc(sizeof(struct aio_cb_data), GFP_KERNEL); + if (!acd){ + dev_err(&priv->ldev->pldev->dev, "Couldn't kmalloc space for for the aio data\n"); + return -ENOMEM; + } + memset(acd, 0x66, sizeof(struct aio_cb_data)); + + acd->priv = priv; + acd->ldev = priv->ldev; + acd->cpl = &done; + acd->flags = 0; + acd->kcb = kcb; + acd->len = iov_len; + acd->page_count = count_pages(iov_base, iov_len); + + // Allocate an array of page pointers + acd->user_pages = kzalloc(sizeof(struct page *) * acd->page_count, GFP_KERNEL); + if (!acd->user_pages){ + dev_err(&priv->ldev->pldev->dev, "Couldn't kmalloc space for for the page pointers\n"); + rv = -ENOMEM; + goto err_alloc_userpages; + } + + // Lock the user buffer pages in memory, and hold on to the page pointers (for the sglist) + down_read(¤t->mm->mmap_sem); /* get memory map semaphore */ + rv = get_user_pages(iov_base, acd->page_count, FOLL_TOUCH | FOLL_WRITE | FOLL_GET, acd->user_pages, NULL); + up_read(¤t->mm->mmap_sem); /* release the semaphore */ + if (rv != acd->page_count){ + dev_err(&priv->ldev->pldev->dev, "Couldn't get_user_pages (%ld)\n", rv); + goto err_get_user_pages; + } + + // Allocate and setup the sg_table (scatterlist entries) + rv = sg_alloc_table_from_pages(&acd->sgt, acd->user_pages, acd->page_count, iov_base & (PAGE_SIZE-1), iov_len, GFP_KERNEL); + if (rv){ + dev_err(&priv->ldev->pldev->dev, "Couldn't alloc sg_table (%ld)\n", rv); + goto err_alloc_sg_table; + } + + // Setup the DMA mapping for all the sg entries + acd->mapped_entry_count = dma_map_sg(&ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, ldev->dir); + if (acd->mapped_entry_count <= 0){ + dev_err(&priv->ldev->pldev->dev, "Couldn't dma_map_sg (%d)\n", acd->mapped_entry_count); + goto err_dma_map_sg; + } + + // Calculate how many descriptors are actually needed for this transfer. + for_each_sg(acd->sgt.sgl, sg, acd->mapped_entry_count, i){ + desc_needed += count_parts_for_sge(sg); + } + + lock_engine(ldev); + + // Figoure out how many descriptors are available and return an error if there aren't enough + num_descrs_avail = count_descriptors_available(ldev); + dev_dbg(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d\n", acd->mapped_entry_count, desc_needed, num_descrs_avail); + if (desc_needed >= ldev->desc_pool_cnt){ + dev_warn(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d TOO MANY to ever complete!\n", acd->mapped_entry_count, desc_needed, num_descrs_avail); + rv = -EAGAIN; + unlock_engine(ldev); + goto err_descr_too_many; + } + if (desc_needed > num_descrs_avail){ + dev_warn(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d Too many to complete right now.\n", acd->mapped_entry_count, desc_needed, num_descrs_avail); + rv = -EMSGSIZE; + unlock_engine(ldev); + goto err_descr_too_many; + } + + // Loop through all the sg table entries and fill out a descriptor for each one. + desc = ldev->desc_next; + card_addr = acd->priv->card_addr; + for_each_sg(acd->sgt.sgl, sg, acd->mapped_entry_count, i){ + pcnt = count_parts_for_sge(sg); + for (p = 0 ; p < pcnt ; p++){ + // Fill out the descriptor + BUG_ON(desc == NULL); + clear_desc(desc); + if (p != pcnt-1){ + desc->DescByteCount = 0x80000; + } else { + desc->DescByteCount = sg_dma_len(sg) - (p * 0x80000); + } + desc->DescBufferByteCount = desc->DescByteCount; + + desc->DescControlFlags |= DMA_DESC_CTL_IRQONERR; + if (i == 0 && p == 0) + desc->DescControlFlags |= DMA_DESC_CTL_SOP; + if (i == acd->mapped_entry_count-1 && p == pcnt-1) + desc->DescControlFlags |= DMA_DESC_CTL_EOP | DMA_DESC_CTL_IRQONDONE; + + desc->DescCardAddrLS = (card_addr & 0xFFFFFFFF); + desc->DescCardAddrMS = (card_addr >> 32) & 0xF; + card_addr += desc->DescByteCount; + + dma_addr = sg_dma_address(sg) + (p * 0x80000); + desc->DescSystemAddrLS = (dma_addr & 0x00000000FFFFFFFF) >> 0; + desc->DescSystemAddrMS = (dma_addr & 0xFFFFFFFF00000000) >> 32; + + user_ctl = acd->priv->user_ctl; + if (i == acd->mapped_entry_count-1 && p == pcnt-1){ + user_ctl = acd->priv->user_ctl_last; + } + desc->DescUserControlLS = (user_ctl & 0x00000000FFFFFFFF) >> 0; + desc->DescUserControlMS = (user_ctl & 0xFFFFFFFF00000000) >> 32; + + if (i == acd->mapped_entry_count-1 && p == pcnt-1) + desc->acd = acd; + + dev_dbg(&priv->ldev->pldev->dev, " Filled descriptor %p (acd = %p)\n", desc, desc->acd); + + ldev->desc_next = desc->Next; + desc = desc->Next; + } + } + + // Send the filled descriptors off to the hardware to process! + SetEngineSWPtr(ldev, ldev->desc_next); + + unlock_engine(ldev); + + // If this is a synchronous kiocb, we need to put the calling process to sleep until the transfer is complete + if (kcb == NULL || is_sync_kiocb(kcb)){ + rv = wait_for_completion_interruptible(&done); + // If the user aborted (rv == -ERESTARTSYS), we're no longer responsible for cleaning up the acd + if (rv == -ERESTARTSYS){ + acd->cpl = NULL; + } + if (rv == 0){ + rv = acd->len; + kfree(acd); + } + return rv; + } + + return -EIOCBQUEUED; + + err_descr_too_many: + unlock_engine(ldev); + dma_unmap_sg(&ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, ldev->dir); + sg_free_table(&acd->sgt); + err_dma_map_sg: + err_alloc_sg_table: + for (i = 0 ; i < acd->page_count ; i++){ + put_page(acd->user_pages[i]); + } + err_get_user_pages: + kfree(acd->user_pages); + err_alloc_userpages: + kfree(acd); + dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_transfer returning with error %ld\n", rv); + return rv; +} + +void transfer_complete_cb(struct aio_cb_data *acd, size_t xfr_count, u32 flags) +{ + unsigned int i; + + BUG_ON(acd == NULL); + BUG_ON(acd->user_pages == NULL); + BUG_ON(acd->sgt.sgl == NULL); + BUG_ON(acd->ldev == NULL); + BUG_ON(acd->ldev->pldev == NULL); + + dev_dbg(&acd->ldev->pldev->dev, "transfer_complete_cb(acd = [%p])\n", acd); + + for (i = 0 ; i < acd->page_count ; i++){ + if (!PageReserved(acd->user_pages[i])){ + set_page_dirty(acd->user_pages[i]); + } + } + + dma_unmap_sg(&acd->ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, acd->ldev->dir); + + for (i = 0 ; i < acd->page_count ; i++){ + put_page(acd->user_pages[i]); + } + + sg_free_table(&acd->sgt); + + kfree(acd->user_pages); + + acd->flags = flags; + + if (acd->kcb == NULL || is_sync_kiocb(acd->kcb)){ + if (acd->cpl){ + complete(acd->cpl); + } else { + // There's no completion, so we're responsible for cleaning up the acd + kfree(acd); + } + } else { +#ifdef CONFIG_KPC_DMA_AIO + aio_complete(acd->kcb, acd->len, acd->flags); +#endif + kfree(acd); + } +} + +/********** Fileops **********/ +static +int kpc_dma_open(struct inode *inode, struct file *filp) +{ + struct dev_private_data *priv; + struct kpc_dma_device *ldev = kpc_dma_lookup_device(iminor(inode)); + if (ldev == NULL) + return -ENODEV; + + if (! atomic_dec_and_test(&ldev->open_count)){ + atomic_inc(&ldev->open_count); + return -EBUSY; /* already open */ + } + + priv = kzalloc(sizeof(struct dev_private_data), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->ldev = ldev; + filp->private_data = priv; + + dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_open(inode = [%p], filp = [%p]) priv = [%p] ldev = [%p]\n", inode, filp, priv, priv->ldev); + return 0; +} + +static +int kpc_dma_close(struct inode *inode, struct file *filp) +{ + struct kpc_dma_descriptor *cur; + struct dev_private_data *priv = (struct dev_private_data *)filp->private_data; + struct kpc_dma_device *eng = priv->ldev; + dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_close(inode = [%p], filp = [%p]) priv = [%p], ldev = [%p]\n", inode, filp, priv, priv->ldev); + + lock_engine(eng); + + stop_dma_engine(eng); + + cur = eng->desc_completed->Next; + while (cur != eng->desc_next){ + dev_dbg(&eng->pldev->dev, "Aborting descriptor %p (acd = %p)\n", cur, cur->acd); + if (cur->DescControlFlags & DMA_DESC_CTL_EOP){ + if (cur->acd) + transfer_complete_cb(cur->acd, 0, ACD_FLAG_ABORT); + } + + clear_desc(cur); + eng->desc_completed = cur; + + cur = cur->Next; + } + + start_dma_engine(eng); + + unlock_engine(eng); + + atomic_inc(&priv->ldev->open_count); /* release the device */ + kfree(priv); + return 0; +} + +#ifdef CONFIG_KPC_DMA_AIO +static +int kpc_dma_aio_cancel(struct kiocb *kcb) +{ + struct dev_private_data *priv = (struct dev_private_data *)kcb->ki_filp->private_data; + dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_aio_cancel(kcb = [%p]) priv = [%p], ldev = [%p]\n", kcb, priv, priv->ldev); + return 0; +} + +static +ssize_t kpc_dma_aio_read(struct kiocb *kcb, const struct iovec *iov, unsigned long iov_count, loff_t pos) +{ + struct dev_private_data *priv = (struct dev_private_data *)kcb->ki_filp->private_data; + dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_aio_read(kcb = [%p], iov = [%p], iov_count = %ld, pos = %lld) priv = [%p], ldev = [%p]\n", kcb, iov, iov_count, pos, priv, priv->ldev); + + if (priv->ldev->dir != DMA_FROM_DEVICE) + return -EMEDIUMTYPE; + + if (iov_count != 1){ + dev_err(&priv->ldev->pldev->dev, "kpc_dma_aio_read() called with iov_count > 1!\n"); + return -EFAULT; + } + + if (!is_sync_kiocb(kcb)) + kiocb_set_cancel_fn(kcb, kpc_dma_aio_cancel); + return kpc_dma_transfer(priv, kcb, (unsigned long)iov->iov_base, iov->iov_len); +} + +static +ssize_t kpc_dma_aio_write(struct kiocb *kcb, const struct iovec *iov, unsigned long iov_count, loff_t pos) +{ + struct dev_private_data *priv = (struct dev_private_data *)kcb->ki_filp->private_data; + dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_aio_write(kcb = [%p], iov = [%p], iov_count = %ld, pos = %lld) priv = [%p], ldev = [%p]\n", kcb, iov, iov_count, pos, priv, priv->ldev); + + if (priv->ldev->dir != DMA_TO_DEVICE) + return -EMEDIUMTYPE; + + if (iov_count != 1){ + dev_err(&priv->ldev->pldev->dev, "kpc_dma_aio_write() called with iov_count > 1!\n"); + return -EFAULT; + } + + if (!is_sync_kiocb(kcb)) + kiocb_set_cancel_fn(kcb, kpc_dma_aio_cancel); + return kpc_dma_transfer(priv, kcb, (unsigned long)iov->iov_base, iov->iov_len); +} +#endif + +static +ssize_t kpc_dma_read( struct file *filp, char __user *user_buf, size_t count, loff_t *ppos) +{ + struct dev_private_data *priv = (struct dev_private_data *)filp->private_data; + dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_read(filp = [%p], user_buf = [%p], count = %zu, ppos = [%p]) priv = [%p], ldev = [%p]\n", filp, user_buf, count, ppos, priv, priv->ldev); + + if (priv->ldev->dir != DMA_FROM_DEVICE) + return -EMEDIUMTYPE; + + return kpc_dma_transfer(priv, (struct kiocb *)NULL, (unsigned long)user_buf, count); +} + +static +ssize_t kpc_dma_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct dev_private_data *priv = (struct dev_private_data *)filp->private_data; + dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_write(filp = [%p], user_buf = [%p], count = %zu, ppos = [%p]) priv = [%p], ldev = [%p]\n", filp, user_buf, count, ppos, priv, priv->ldev); + + if (priv->ldev->dir != DMA_TO_DEVICE) + return -EMEDIUMTYPE; + + return kpc_dma_transfer(priv, (struct kiocb *)NULL, (unsigned long)user_buf, count); +} + +static +long kpc_dma_ioctl(struct file *filp, unsigned int ioctl_num, unsigned long ioctl_param) +{ + struct dev_private_data *priv = (struct dev_private_data *)filp->private_data; + dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_ioctl(filp = [%p], ioctl_num = 0x%x, ioctl_param = 0x%lx) priv = [%p], ldev = [%p]\n", filp, ioctl_num, ioctl_param, priv, priv->ldev); + + switch (ioctl_num){ + case KND_IOCTL_SET_CARD_ADDR: priv->card_addr = ioctl_param; return priv->card_addr; + case KND_IOCTL_SET_USER_CTL: priv->user_ctl = ioctl_param; return priv->user_ctl; + case KND_IOCTL_SET_USER_CTL_LAST: priv->user_ctl_last = ioctl_param; return priv->user_ctl_last; + case KND_IOCTL_GET_USER_STS: return priv->user_sts; + } + + return -ENOTTY; +} + + +struct file_operations kpc_dma_fops = { + .owner = THIS_MODULE, + .open = kpc_dma_open, + .release = kpc_dma_close, + .read = kpc_dma_read, + .write = kpc_dma_write, +#ifdef CONFIG_KPC_DMA_AIO + .aio_read = kpc_dma_aio_read, + .aio_write = kpc_dma_aio_write, +#endif + .unlocked_ioctl = kpc_dma_ioctl, +}; + diff --git a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c new file mode 100644 index 000000000000..aeae58d9bc18 --- /dev/null +++ b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c @@ -0,0 +1,248 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "kpc_dma_driver.h" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Matt.Sickler@daktronics.com"); + +#define KPC_DMA_CHAR_MAJOR UNNAMED_MAJOR +#define KPC_DMA_NUM_MINORS 1 << MINORBITS +static DEFINE_MUTEX(kpc_dma_mtx); +static int assigned_major_num; +static LIST_HEAD(kpc_dma_list); + + +/********** kpc_dma_list list management **********/ +struct kpc_dma_device * kpc_dma_lookup_device(int minor) +{ + struct kpc_dma_device *c; + mutex_lock(&kpc_dma_mtx); + list_for_each_entry(c, &kpc_dma_list, list) { + if (c->pldev->id == minor) { + goto out; + } + } + c = NULL; // not-found case + out: + mutex_unlock(&kpc_dma_mtx); + return c; +} + +void kpc_dma_add_device(struct kpc_dma_device * ldev) +{ + mutex_lock(&kpc_dma_mtx); + list_add(&ldev->list, &kpc_dma_list); + mutex_unlock(&kpc_dma_mtx); +} + +void kpc_dma_del_device(struct kpc_dma_device * ldev) +{ + mutex_lock(&kpc_dma_mtx); + list_del(&ldev->list); + mutex_unlock(&kpc_dma_mtx); +} + +/********** SysFS Attributes **********/ +static ssize_t show_engine_regs(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct kpc_dma_device *ldev; + struct platform_device *pldev = to_platform_device(dev); + if (!pldev) return 0; + ldev = platform_get_drvdata(pldev); + if (!ldev) return 0; + + return scnprintf(buf, PAGE_SIZE, + "EngineControlStatus = 0x%08x\n" + "RegNextDescPtr = 0x%08x\n" + "RegSWDescPtr = 0x%08x\n" + "RegCompletedDescPtr = 0x%08x\n" + "desc_pool_first = %p\n" + "desc_pool_last = %p\n" + "desc_next = %p\n" + "desc_completed = %p\n", + readl(ldev->eng_regs + 1), + readl(ldev->eng_regs + 2), + readl(ldev->eng_regs + 3), + readl(ldev->eng_regs + 4), + ldev->desc_pool_first, + ldev->desc_pool_last, + ldev->desc_next, + ldev->desc_completed + ); +} +DEVICE_ATTR(engine_regs, 0444, show_engine_regs, NULL); + +static const struct attribute * ndd_attr_list[] = { + &dev_attr_engine_regs.attr, + NULL, +}; + +struct class *kpc_dma_class; + + +/********** Platform Driver Functions **********/ +static +int kpc_dma_probe(struct platform_device *pldev) +{ + struct resource *r = NULL; + int rv = 0; + dev_t dev; + + struct kpc_dma_device *ldev = kzalloc(sizeof(struct kpc_dma_device), GFP_KERNEL); + if (!ldev){ + dev_err(&pldev->dev, "kpc_dma_probe: unable to kzalloc space for kpc_dma_device\n"); + rv = -ENOMEM; + goto err_rv; + } + + dev_dbg(&pldev->dev, "kpc_dma_probe(pldev = [%p]) ldev = [%p]\n", pldev, ldev); + + INIT_LIST_HEAD(&ldev->list); + + ldev->pldev = pldev; + platform_set_drvdata(pldev, ldev); + atomic_set(&ldev->open_count, 1); + + mutex_init(&ldev->sem); + lock_engine(ldev); + + // Get Engine regs resource + r = platform_get_resource(pldev, IORESOURCE_MEM, 0); + if (!r){ + dev_err(&ldev->pldev->dev, "kpc_dma_probe: didn't get the engine regs resource!\n"); + rv = -ENXIO; + goto err_kfree; + } + ldev->eng_regs = ioremap_nocache(r->start, resource_size(r)); + if (!ldev->eng_regs){ + dev_err(&ldev->pldev->dev, "kpc_dma_probe: failed to ioremap engine regs!\n"); + rv = -ENXIO; + goto err_kfree; + } + + r = platform_get_resource(pldev, IORESOURCE_IRQ, 0); + if (!r){ + dev_err(&ldev->pldev->dev, "kpc_dma_probe: didn't get the IRQ resource!\n"); + rv = -ENXIO; + goto err_kfree; + } + ldev->irq = r->start; + + // Setup miscdev struct + dev = MKDEV(assigned_major_num, pldev->id); + ldev->kpc_dma_dev = device_create(kpc_dma_class, &pldev->dev, dev, ldev, "kpc_dma%d", pldev->id); + if (IS_ERR(ldev->kpc_dma_dev)){ + dev_err(&ldev->pldev->dev, "kpc_dma_probe: device_create failed: %d\n", rv); + goto err_kfree; + } + + // Setup the DMA engine + rv = setup_dma_engine(ldev, 30); + if (rv){ + dev_err(&ldev->pldev->dev, "kpc_dma_probe: failed to setup_dma_engine: %d\n", rv); + goto err_misc_dereg; + } + + // Setup the sysfs files + rv = sysfs_create_files(&(ldev->pldev->dev.kobj), ndd_attr_list); + if (rv){ + dev_err(&ldev->pldev->dev, "kpc_dma_probe: Failed to add sysfs files: %d\n", rv); + goto err_destroy_eng; + } + + kpc_dma_add_device(ldev); + + return 0; + + err_destroy_eng: + destroy_dma_engine(ldev); + err_misc_dereg: + device_destroy(kpc_dma_class, dev); + err_kfree: + kfree(ldev); + err_rv: + return rv; +} + +static +int kpc_dma_remove(struct platform_device *pldev) +{ + struct kpc_dma_device *ldev = platform_get_drvdata(pldev); + if (!ldev) + return -ENXIO; + + dev_dbg(&ldev->pldev->dev, "kpc_dma_remove(pldev = [%p]) ldev = [%p]\n", pldev, ldev); + + lock_engine(ldev); + sysfs_remove_files(&(ldev->pldev->dev.kobj), ndd_attr_list); + destroy_dma_engine(ldev); + kpc_dma_del_device(ldev); + device_destroy(kpc_dma_class, MKDEV(assigned_major_num, ldev->pldev->id)); + kfree(ldev); + + return 0; +} + + +/********** Driver Functions **********/ +struct platform_driver kpc_dma_plat_driver_i = { + .probe = kpc_dma_probe, + .remove = kpc_dma_remove, + .driver = { + .name = KP_DRIVER_NAME_DMA_CONTROLLER, + .owner = THIS_MODULE, + }, +}; + +static +int __init kpc_dma_driver_init(void) +{ + int err; + + err = __register_chrdev(KPC_DMA_CHAR_MAJOR, 0, KPC_DMA_NUM_MINORS, "kpc_dma", &kpc_dma_fops); + if (err < 0){ + pr_err("Can't allocate a major number (%d) for kpc_dma (err = %d)\n", KPC_DMA_CHAR_MAJOR, err); + goto fail_chrdev_register; + } + assigned_major_num = err; + + kpc_dma_class = class_create(THIS_MODULE, "kpc_dma"); + err = PTR_ERR(kpc_dma_class); + if (IS_ERR(kpc_dma_class)){ + pr_err("Can't create class kpc_dma (err = %d)\n", err); + goto fail_class_create; + } + + err = platform_driver_register(&kpc_dma_plat_driver_i); + if (err){ + pr_err("Can't register platform driver for kpc_dma (err = %d)\n", err); + goto fail_platdriver_register; + } + + return err; + + fail_platdriver_register: + class_destroy(kpc_dma_class); + fail_class_create: + __unregister_chrdev(KPC_DMA_CHAR_MAJOR, 0, KPC_DMA_NUM_MINORS, "kpc_dma"); + fail_chrdev_register: + return err; +} +module_init(kpc_dma_driver_init); + +static +void __exit kpc_dma_driver_exit(void) +{ + platform_driver_unregister(&kpc_dma_plat_driver_i); + class_destroy(kpc_dma_class); + __unregister_chrdev(KPC_DMA_CHAR_MAJOR, 0, KPC_DMA_NUM_MINORS, "kpc_dma"); +} +module_exit(kpc_dma_driver_exit); diff --git a/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h new file mode 100644 index 000000000000..ef913b7496e6 --- /dev/null +++ b/drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h @@ -0,0 +1,220 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef KPC_DMA_DRIVER_H +#define KPC_DMA_DRIVER_H +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../kpc.h" + + +struct kp2000_device; +struct kpc_dma_device { + struct list_head list; + struct platform_device *pldev; + u32 __iomem *eng_regs; + struct device *kpc_dma_dev; + struct kobject kobj; + char name[16]; + + int dir; // DMA_FROM_DEVICE || DMA_TO_DEVICE + struct mutex sem; + unsigned int irq; + struct work_struct irq_work; + + atomic_t open_count; + + size_t accumulated_bytes; + u32 accumulated_flags; + + // Descriptor "Pool" housekeeping + u32 desc_pool_cnt; + struct dma_pool *desc_pool; + struct kpc_dma_descriptor *desc_pool_first; + struct kpc_dma_descriptor *desc_pool_last; + + struct kpc_dma_descriptor *desc_next; + struct kpc_dma_descriptor *desc_completed; +}; + +struct dev_private_data { + struct kpc_dma_device *ldev; + u64 card_addr; + u64 user_ctl; + u64 user_ctl_last; + u64 user_sts; +}; + +struct kpc_dma_device * kpc_dma_lookup_device(int minor); + +extern struct file_operations kpc_dma_fops; + +#define ENG_CAP_PRESENT 0x00000001 +#define ENG_CAP_DIRECTION 0x00000002 +#define ENG_CAP_TYPE_MASK 0x000000F0 +#define ENG_CAP_NUMBER_MASK 0x0000FF00 +#define ENG_CAP_CARD_ADDR_SIZE_MASK 0x007F0000 +#define ENG_CAP_DESC_MAX_BYTE_CNT_MASK 0x3F000000 +#define ENG_CAP_PERF_SCALE_MASK 0xC0000000 + +#define ENG_CTL_IRQ_ENABLE BIT(0) +#define ENG_CTL_IRQ_ACTIVE BIT(1) +#define ENG_CTL_DESC_COMPLETE BIT(2) +#define ENG_CTL_DESC_ALIGN_ERR BIT(3) +#define ENG_CTL_DESC_FETCH_ERR BIT(4) +#define ENG_CTL_SW_ABORT_ERR BIT(5) +#define ENG_CTL_DESC_CHAIN_END BIT(7) +#define ENG_CTL_DMA_ENABLE BIT(8) +#define ENG_CTL_DMA_RUNNING BIT(10) +#define ENG_CTL_DMA_WAITING BIT(11) +#define ENG_CTL_DMA_WAITING_PERSIST BIT(12) +#define ENG_CTL_DMA_RESET_REQUEST BIT(14) +#define ENG_CTL_DMA_RESET BIT(15) +#define ENG_CTL_DESC_FETCH_ERR_CLASS_MASK 0x700000 + +struct aio_cb_data { + struct dev_private_data *priv; + struct kpc_dma_device *ldev; + struct completion *cpl; + unsigned char flags; + struct kiocb *kcb; + size_t len; + + unsigned int page_count; + struct page **user_pages; + struct sg_table sgt; + int mapped_entry_count; +}; + +#define ACD_FLAG_DONE 0 +#define ACD_FLAG_ABORT 1 +#define ACD_FLAG_ENG_ACCUM_ERROR 4 +#define ACD_FLAG_ENG_ACCUM_SHORT 5 + +struct kpc_dma_descriptor { + struct { + volatile u32 DescByteCount :20; + volatile u32 DescStatusErrorFlags :4; + volatile u32 DescStatusFlags :8; + }; + volatile u32 DescUserControlLS; + volatile u32 DescUserControlMS; + volatile u32 DescCardAddrLS; + struct { + volatile u32 DescBufferByteCount :20; + volatile u32 DescCardAddrMS :4; + volatile u32 DescControlFlags :8; + }; + volatile u32 DescSystemAddrLS; + volatile u32 DescSystemAddrMS; + volatile u32 DescNextDescPtr; + + dma_addr_t MyDMAAddr; + struct kpc_dma_descriptor *Next; + + struct aio_cb_data *acd; +} __attribute__((packed)); +// DescControlFlags: +#define DMA_DESC_CTL_SOP BIT(7) +#define DMA_DESC_CTL_EOP BIT(6) +#define DMA_DESC_CTL_AFIFO BIT(2) +#define DMA_DESC_CTL_IRQONERR BIT(1) +#define DMA_DESC_CTL_IRQONDONE BIT(0) +// DescStatusFlags: +#define DMA_DESC_STS_SOP BIT(7) +#define DMA_DESC_STS_EOP BIT(6) +#define DMA_DESC_STS_ERROR BIT(4) +#define DMA_DESC_STS_USMSZ BIT(3) +#define DMA_DESC_STS_USLSZ BIT(2) +#define DMA_DESC_STS_SHORT BIT(1) +#define DMA_DESC_STS_COMPLETE BIT(0) +// DescStatusErrorFlags: +#define DMA_DESC_ESTS_ECRC BIT(2) +#define DMA_DESC_ESTS_POISON BIT(1) +#define DMA_DESC_ESTS_UNSUCCESSFUL BIT(0) + +#define DMA_DESC_ALIGNMENT 0x20 + +static inline +u32 GetEngineCapabilities(struct kpc_dma_device *eng) +{ + return readl(eng->eng_regs + 0); +} + +static inline +void WriteEngineControl(struct kpc_dma_device *eng, u32 value) +{ + writel(value, eng->eng_regs + 1); +} +static inline +u32 GetEngineControl(struct kpc_dma_device *eng) +{ + return readl(eng->eng_regs + 1); +} +static inline +void SetClearEngineControl(struct kpc_dma_device *eng, u32 set_bits, u32 clear_bits) +{ + u32 val = GetEngineControl(eng); + val |= set_bits; + val &= ~clear_bits; + WriteEngineControl(eng, val); +} + +static inline +void SetEngineNextPtr(struct kpc_dma_device *eng, struct kpc_dma_descriptor * desc) +{ + writel(desc->MyDMAAddr, eng->eng_regs + 2); +} +static inline +void SetEngineSWPtr(struct kpc_dma_device *eng, struct kpc_dma_descriptor * desc) +{ + writel(desc->MyDMAAddr, eng->eng_regs + 3); +} +static inline +void ClearEngineCompletePtr(struct kpc_dma_device *eng) +{ + writel(0, eng->eng_regs + 4); +} +static inline +u32 GetEngineCompletePtr(struct kpc_dma_device *eng) +{ + return readl(eng->eng_regs + 4); +} + +static inline +void lock_engine(struct kpc_dma_device *eng) +{ + BUG_ON(eng == NULL); + mutex_lock(&eng->sem); +} + +static inline +void unlock_engine(struct kpc_dma_device *eng) +{ + BUG_ON(eng == NULL); + mutex_unlock(&eng->sem); +} + + +/// Shared Functions +void start_dma_engine(struct kpc_dma_device *eng); +int setup_dma_engine(struct kpc_dma_device *eng, u32 desc_cnt); +void stop_dma_engine(struct kpc_dma_device *eng); +void destroy_dma_engine(struct kpc_dma_device *eng); +void clear_desc(struct kpc_dma_descriptor *desc); +int count_descriptors_available(struct kpc_dma_device *eng); +void transfer_complete_cb(struct aio_cb_data *acd, size_t xfr_count, u32 flags); + +#endif /* KPC_DMA_DRIVER_H */ + diff --git a/drivers/staging/kpc2000/kpc_dma/uapi.h b/drivers/staging/kpc2000/kpc_dma/uapi.h new file mode 100644 index 000000000000..5ff6a1a36ff9 --- /dev/null +++ b/drivers/staging/kpc2000/kpc_dma/uapi.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef KPC_DMA_DRIVER_UAPI_H_ +#define KPC_DMA_DRIVER_UAPI_H_ +#include + +#define KND_IOCTL_SET_CARD_ADDR _IOW('k', 1, __u32) +#define KND_IOCTL_SET_USER_CTL _IOW('k', 2, __u64) +#define KND_IOCTL_SET_USER_CTL_LAST _IOW('k', 4, __u64) +#define KND_IOCTL_GET_USER_STS _IOR('k', 3, __u64) + +#endif /* KPC_DMA_DRIVER_UAPI_H_ */ -- cgit v1.2.3-59-g8ed1b