aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/kpc2000/kpc_dma/fileops.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/kpc2000/kpc_dma/fileops.c')
-rw-r--r--drivers/staging/kpc2000/kpc_dma/fileops.c269
1 files changed, 101 insertions, 168 deletions
diff --git a/drivers/staging/kpc2000/kpc_dma/fileops.c b/drivers/staging/kpc2000/kpc_dma/fileops.c
index 616658709bd9..48ca88bc6b0b 100644
--- a/drivers/staging/kpc2000/kpc_dma/fileops.c
+++ b/drivers/staging/kpc2000/kpc_dma/fileops.c
@@ -9,7 +9,6 @@
#include <linux/types.h> /* size_t */
#include <linux/cdev.h>
#include <linux/uaccess.h> /* copy_*_user */
-#include <linux/aio.h> /* aio stuff */
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include "kpc_dma_driver.h"
@@ -21,20 +20,19 @@ unsigned int count_pages(unsigned long iov_base, size_t iov_len)
{
unsigned long first = (iov_base & PAGE_MASK) >> PAGE_SHIFT;
unsigned long last = ((iov_base+iov_len-1) & PAGE_MASK) >> PAGE_SHIFT;
+
return last - first + 1;
}
static inline
unsigned int count_parts_for_sge(struct scatterlist *sg)
{
- unsigned int sg_length = sg_dma_len(sg);
- sg_length += (0x80000-1);
- return (sg_length / 0x80000);
+ return DIV_ROUND_UP(sg_dma_len(sg), 0x80000);
}
/********** Transfer Helpers **********/
-static
-int kpc_dma_transfer(struct dev_private_data *priv, struct kiocb *kcb, unsigned long iov_base, size_t iov_len)
+static int kpc_dma_transfer(struct dev_private_data *priv,
+ unsigned long iov_base, size_t iov_len)
{
unsigned int i = 0;
long rv = 0;
@@ -50,75 +48,72 @@ int kpc_dma_transfer(struct dev_private_data *priv, struct kiocb *kcb, unsigned
u64 card_addr;
u64 dma_addr;
u64 user_ctl;
-
+
BUG_ON(priv == NULL);
ldev = priv->ldev;
BUG_ON(ldev == NULL);
-
- dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_transfer(priv = [%p], kcb = [%p], iov_base = [%p], iov_len = %ld) ldev = [%p]\n", priv, kcb, (void*)iov_base, iov_len, ldev);
-
- acd = (struct aio_cb_data *) kzalloc(sizeof(struct aio_cb_data), GFP_KERNEL);
- if (!acd){
+
+ acd = kzalloc(sizeof(*acd), GFP_KERNEL);
+ if (!acd) {
dev_err(&priv->ldev->pldev->dev, "Couldn't kmalloc space for for the aio data\n");
return -ENOMEM;
}
memset(acd, 0x66, sizeof(struct aio_cb_data));
-
+
acd->priv = priv;
acd->ldev = priv->ldev;
acd->cpl = &done;
acd->flags = 0;
- acd->kcb = kcb;
acd->len = iov_len;
acd->page_count = count_pages(iov_base, iov_len);
-
+
// Allocate an array of page pointers
acd->user_pages = kzalloc(sizeof(struct page *) * acd->page_count, GFP_KERNEL);
- if (!acd->user_pages){
+ if (!acd->user_pages) {
dev_err(&priv->ldev->pldev->dev, "Couldn't kmalloc space for for the page pointers\n");
rv = -ENOMEM;
goto err_alloc_userpages;
}
-
+
// Lock the user buffer pages in memory, and hold on to the page pointers (for the sglist)
down_read(&current->mm->mmap_sem); /* get memory map semaphore */
rv = get_user_pages(iov_base, acd->page_count, FOLL_TOUCH | FOLL_WRITE | FOLL_GET, acd->user_pages, NULL);
up_read(&current->mm->mmap_sem); /* release the semaphore */
- if (rv != acd->page_count){
+ if (rv != acd->page_count) {
dev_err(&priv->ldev->pldev->dev, "Couldn't get_user_pages (%ld)\n", rv);
goto err_get_user_pages;
}
-
+
// Allocate and setup the sg_table (scatterlist entries)
rv = sg_alloc_table_from_pages(&acd->sgt, acd->user_pages, acd->page_count, iov_base & (PAGE_SIZE-1), iov_len, GFP_KERNEL);
- if (rv){
+ if (rv) {
dev_err(&priv->ldev->pldev->dev, "Couldn't alloc sg_table (%ld)\n", rv);
goto err_alloc_sg_table;
}
-
+
// Setup the DMA mapping for all the sg entries
acd->mapped_entry_count = dma_map_sg(&ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, ldev->dir);
- if (acd->mapped_entry_count <= 0){
+ if (acd->mapped_entry_count <= 0) {
dev_err(&priv->ldev->pldev->dev, "Couldn't dma_map_sg (%d)\n", acd->mapped_entry_count);
goto err_dma_map_sg;
}
// Calculate how many descriptors are actually needed for this transfer.
- for_each_sg(acd->sgt.sgl, sg, acd->mapped_entry_count, i){
+ for_each_sg(acd->sgt.sgl, sg, acd->mapped_entry_count, i) {
desc_needed += count_parts_for_sge(sg);
}
-
+
lock_engine(ldev);
-
+
// Figoure out how many descriptors are available and return an error if there aren't enough
num_descrs_avail = count_descriptors_available(ldev);
dev_dbg(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d\n", acd->mapped_entry_count, desc_needed, num_descrs_avail);
- if (desc_needed >= ldev->desc_pool_cnt){
+ if (desc_needed >= ldev->desc_pool_cnt) {
dev_warn(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d TOO MANY to ever complete!\n", acd->mapped_entry_count, desc_needed, num_descrs_avail);
rv = -EAGAIN;
goto err_descr_too_many;
}
- if (desc_needed > num_descrs_avail){
+ if (desc_needed > num_descrs_avail) {
dev_warn(&priv->ldev->pldev->dev, " mapped_entry_count = %d num_descrs_needed = %d num_descrs_avail = %d Too many to complete right now.\n", acd->mapped_entry_count, desc_needed, num_descrs_avail);
rv = -EMSGSIZE;
goto err_descr_too_many;
@@ -127,70 +122,67 @@ int kpc_dma_transfer(struct dev_private_data *priv, struct kiocb *kcb, unsigned
// Loop through all the sg table entries and fill out a descriptor for each one.
desc = ldev->desc_next;
card_addr = acd->priv->card_addr;
- for_each_sg(acd->sgt.sgl, sg, acd->mapped_entry_count, i){
+ for_each_sg(acd->sgt.sgl, sg, acd->mapped_entry_count, i) {
pcnt = count_parts_for_sge(sg);
- for (p = 0 ; p < pcnt ; p++){
+ for (p = 0 ; p < pcnt ; p++) {
// Fill out the descriptor
BUG_ON(desc == NULL);
clear_desc(desc);
- if (p != pcnt-1){
+ if (p != pcnt-1) {
desc->DescByteCount = 0x80000;
} else {
desc->DescByteCount = sg_dma_len(sg) - (p * 0x80000);
}
desc->DescBufferByteCount = desc->DescByteCount;
-
+
desc->DescControlFlags |= DMA_DESC_CTL_IRQONERR;
if (i == 0 && p == 0)
desc->DescControlFlags |= DMA_DESC_CTL_SOP;
if (i == acd->mapped_entry_count-1 && p == pcnt-1)
desc->DescControlFlags |= DMA_DESC_CTL_EOP | DMA_DESC_CTL_IRQONDONE;
-
+
desc->DescCardAddrLS = (card_addr & 0xFFFFFFFF);
desc->DescCardAddrMS = (card_addr >> 32) & 0xF;
card_addr += desc->DescByteCount;
-
+
dma_addr = sg_dma_address(sg) + (p * 0x80000);
desc->DescSystemAddrLS = (dma_addr & 0x00000000FFFFFFFF) >> 0;
desc->DescSystemAddrMS = (dma_addr & 0xFFFFFFFF00000000) >> 32;
-
+
user_ctl = acd->priv->user_ctl;
- if (i == acd->mapped_entry_count-1 && p == pcnt-1){
+ if (i == acd->mapped_entry_count-1 && p == pcnt-1) {
user_ctl = acd->priv->user_ctl_last;
}
desc->DescUserControlLS = (user_ctl & 0x00000000FFFFFFFF) >> 0;
desc->DescUserControlMS = (user_ctl & 0xFFFFFFFF00000000) >> 32;
-
+
if (i == acd->mapped_entry_count-1 && p == pcnt-1)
desc->acd = acd;
-
+
dev_dbg(&priv->ldev->pldev->dev, " Filled descriptor %p (acd = %p)\n", desc, desc->acd);
-
+
ldev->desc_next = desc->Next;
desc = desc->Next;
}
}
-
+
// Send the filled descriptors off to the hardware to process!
SetEngineSWPtr(ldev, ldev->desc_next);
-
+
unlock_engine(ldev);
-
- // If this is a synchronous kiocb, we need to put the calling process to sleep until the transfer is complete
- if (kcb == NULL || is_sync_kiocb(kcb)){
- rv = wait_for_completion_interruptible(&done);
- // If the user aborted (rv == -ERESTARTSYS), we're no longer responsible for cleaning up the acd
- if (rv == -ERESTARTSYS){
- acd->cpl = NULL;
- }
- if (rv == 0){
- rv = acd->len;
- kfree(acd);
- }
- return rv;
+
+ rv = wait_for_completion_interruptible(&done);
+ /*
+ * If the user aborted (rv == -ERESTARTSYS), we're no longer responsible
+ * for cleaning up the acd
+ */
+ if (rv == -ERESTARTSYS)
+ acd->cpl = NULL;
+ if (rv == 0) {
+ rv = acd->len;
+ kfree(acd);
}
-
- return -EIOCBQUEUED;
+ return rv;
err_descr_too_many:
unlock_engine(ldev);
@@ -198,58 +190,52 @@ int kpc_dma_transfer(struct dev_private_data *priv, struct kiocb *kcb, unsigned
sg_free_table(&acd->sgt);
err_dma_map_sg:
err_alloc_sg_table:
- for (i = 0 ; i < acd->page_count ; i++){
+ for (i = 0 ; i < acd->page_count ; i++) {
put_page(acd->user_pages[i]);
}
err_get_user_pages:
kfree(acd->user_pages);
err_alloc_userpages:
kfree(acd);
- dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_transfer returning with error %ld\n", rv);
+ dev_dbg(&priv->ldev->pldev->dev, "%s returning with error %ld\n", __func__, rv);
return rv;
}
void transfer_complete_cb(struct aio_cb_data *acd, size_t xfr_count, u32 flags)
{
unsigned int i;
-
+
BUG_ON(acd == NULL);
BUG_ON(acd->user_pages == NULL);
BUG_ON(acd->sgt.sgl == NULL);
BUG_ON(acd->ldev == NULL);
BUG_ON(acd->ldev->pldev == NULL);
-
- dev_dbg(&acd->ldev->pldev->dev, "transfer_complete_cb(acd = [%p])\n", acd);
-
- for (i = 0 ; i < acd->page_count ; i++){
- if (!PageReserved(acd->user_pages[i])){
+
+ for (i = 0 ; i < acd->page_count ; i++) {
+ if (!PageReserved(acd->user_pages[i])) {
set_page_dirty(acd->user_pages[i]);
}
}
-
+
dma_unmap_sg(&acd->ldev->pldev->dev, acd->sgt.sgl, acd->sgt.nents, acd->ldev->dir);
-
- for (i = 0 ; i < acd->page_count ; i++){
+
+ for (i = 0 ; i < acd->page_count ; i++) {
put_page(acd->user_pages[i]);
}
-
+
sg_free_table(&acd->sgt);
-
+
kfree(acd->user_pages);
-
+
acd->flags = flags;
-
- if (acd->kcb == NULL || is_sync_kiocb(acd->kcb)){
- if (acd->cpl){
- complete(acd->cpl);
- } else {
- // There's no completion, so we're responsible for cleaning up the acd
- kfree(acd);
- }
+
+ if (acd->cpl) {
+ complete(acd->cpl);
} else {
-#ifdef CONFIG_KPC_DMA_AIO
- aio_complete(acd->kcb, acd->len, acd->flags);
-#endif
+ /*
+ * There's no completion, so we're responsible for cleaning up
+ * the acd
+ */
kfree(acd);
}
}
@@ -260,22 +246,22 @@ int kpc_dma_open(struct inode *inode, struct file *filp)
{
struct dev_private_data *priv;
struct kpc_dma_device *ldev = kpc_dma_lookup_device(iminor(inode));
- if (ldev == NULL)
+
+ if (!ldev)
return -ENODEV;
-
- if (! atomic_dec_and_test(&ldev->open_count)){
+
+ if (!atomic_dec_and_test(&ldev->open_count)) {
atomic_inc(&ldev->open_count);
return -EBUSY; /* already open */
}
-
+
priv = kzalloc(sizeof(struct dev_private_data), GFP_KERNEL);
if (!priv)
return -ENOMEM;
-
+
priv->ldev = ldev;
filp->private_data = priv;
-
- dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_open(inode = [%p], filp = [%p]) priv = [%p] ldev = [%p]\n", inode, filp, priv, priv->ldev);
+
return 0;
}
@@ -285,134 +271,81 @@ int kpc_dma_close(struct inode *inode, struct file *filp)
struct kpc_dma_descriptor *cur;
struct dev_private_data *priv = (struct dev_private_data *)filp->private_data;
struct kpc_dma_device *eng = priv->ldev;
- dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_close(inode = [%p], filp = [%p]) priv = [%p], ldev = [%p]\n", inode, filp, priv, priv->ldev);
-
+
lock_engine(eng);
-
+
stop_dma_engine(eng);
-
+
cur = eng->desc_completed->Next;
- while (cur != eng->desc_next){
+ while (cur != eng->desc_next) {
dev_dbg(&eng->pldev->dev, "Aborting descriptor %p (acd = %p)\n", cur, cur->acd);
- if (cur->DescControlFlags & DMA_DESC_CTL_EOP){
+ if (cur->DescControlFlags & DMA_DESC_CTL_EOP) {
if (cur->acd)
transfer_complete_cb(cur->acd, 0, ACD_FLAG_ABORT);
}
-
+
clear_desc(cur);
eng->desc_completed = cur;
-
+
cur = cur->Next;
}
-
+
start_dma_engine(eng);
-
+
unlock_engine(eng);
-
+
atomic_inc(&priv->ldev->open_count); /* release the device */
kfree(priv);
return 0;
}
-#ifdef CONFIG_KPC_DMA_AIO
static
-int kpc_dma_aio_cancel(struct kiocb *kcb)
+ssize_t kpc_dma_read(struct file *filp, char __user *user_buf, size_t count, loff_t *ppos)
{
- struct dev_private_data *priv = (struct dev_private_data *)kcb->ki_filp->private_data;
- dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_aio_cancel(kcb = [%p]) priv = [%p], ldev = [%p]\n", kcb, priv, priv->ldev);
- return 0;
-}
+ struct dev_private_data *priv = (struct dev_private_data *)filp->private_data;
-static
-ssize_t kpc_dma_aio_read(struct kiocb *kcb, const struct iovec *iov, unsigned long iov_count, loff_t pos)
-{
- struct dev_private_data *priv = (struct dev_private_data *)kcb->ki_filp->private_data;
- dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_aio_read(kcb = [%p], iov = [%p], iov_count = %ld, pos = %lld) priv = [%p], ldev = [%p]\n", kcb, iov, iov_count, pos, priv, priv->ldev);
-
if (priv->ldev->dir != DMA_FROM_DEVICE)
return -EMEDIUMTYPE;
-
- if (iov_count != 1){
- dev_err(&priv->ldev->pldev->dev, "kpc_dma_aio_read() called with iov_count > 1!\n");
- return -EFAULT;
- }
-
- if (!is_sync_kiocb(kcb))
- kiocb_set_cancel_fn(kcb, kpc_dma_aio_cancel);
- return kpc_dma_transfer(priv, kcb, (unsigned long)iov->iov_base, iov->iov_len);
-}
-static
-ssize_t kpc_dma_aio_write(struct kiocb *kcb, const struct iovec *iov, unsigned long iov_count, loff_t pos)
-{
- struct dev_private_data *priv = (struct dev_private_data *)kcb->ki_filp->private_data;
- dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_aio_write(kcb = [%p], iov = [%p], iov_count = %ld, pos = %lld) priv = [%p], ldev = [%p]\n", kcb, iov, iov_count, pos, priv, priv->ldev);
-
- if (priv->ldev->dir != DMA_TO_DEVICE)
- return -EMEDIUMTYPE;
-
- if (iov_count != 1){
- dev_err(&priv->ldev->pldev->dev, "kpc_dma_aio_write() called with iov_count > 1!\n");
- return -EFAULT;
- }
-
- if (!is_sync_kiocb(kcb))
- kiocb_set_cancel_fn(kcb, kpc_dma_aio_cancel);
- return kpc_dma_transfer(priv, kcb, (unsigned long)iov->iov_base, iov->iov_len);
-}
-#endif
-
-static
-ssize_t kpc_dma_read( struct file *filp, char __user *user_buf, size_t count, loff_t *ppos)
-{
- struct dev_private_data *priv = (struct dev_private_data *)filp->private_data;
- dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_read(filp = [%p], user_buf = [%p], count = %zu, ppos = [%p]) priv = [%p], ldev = [%p]\n", filp, user_buf, count, ppos, priv, priv->ldev);
-
- if (priv->ldev->dir != DMA_FROM_DEVICE)
- return -EMEDIUMTYPE;
-
- return kpc_dma_transfer(priv, (struct kiocb *)NULL, (unsigned long)user_buf, count);
+ return kpc_dma_transfer(priv, (unsigned long)user_buf, count);
}
static
ssize_t kpc_dma_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *ppos)
{
struct dev_private_data *priv = (struct dev_private_data *)filp->private_data;
- dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_write(filp = [%p], user_buf = [%p], count = %zu, ppos = [%p]) priv = [%p], ldev = [%p]\n", filp, user_buf, count, ppos, priv, priv->ldev);
-
+
if (priv->ldev->dir != DMA_TO_DEVICE)
return -EMEDIUMTYPE;
-
- return kpc_dma_transfer(priv, (struct kiocb *)NULL, (unsigned long)user_buf, count);
+
+ return kpc_dma_transfer(priv, (unsigned long)user_buf, count);
}
static
long kpc_dma_ioctl(struct file *filp, unsigned int ioctl_num, unsigned long ioctl_param)
{
struct dev_private_data *priv = (struct dev_private_data *)filp->private_data;
- dev_dbg(&priv->ldev->pldev->dev, "kpc_dma_ioctl(filp = [%p], ioctl_num = 0x%x, ioctl_param = 0x%lx) priv = [%p], ldev = [%p]\n", filp, ioctl_num, ioctl_param, priv, priv->ldev);
-
- switch (ioctl_num){
- case KND_IOCTL_SET_CARD_ADDR: priv->card_addr = ioctl_param; return priv->card_addr;
- case KND_IOCTL_SET_USER_CTL: priv->user_ctl = ioctl_param; return priv->user_ctl;
- case KND_IOCTL_SET_USER_CTL_LAST: priv->user_ctl_last = ioctl_param; return priv->user_ctl_last;
- case KND_IOCTL_GET_USER_STS: return priv->user_sts;
+
+ switch (ioctl_num) {
+ case KND_IOCTL_SET_CARD_ADDR:
+ priv->card_addr = ioctl_param; return priv->card_addr;
+ case KND_IOCTL_SET_USER_CTL:
+ priv->user_ctl = ioctl_param; return priv->user_ctl;
+ case KND_IOCTL_SET_USER_CTL_LAST:
+ priv->user_ctl_last = ioctl_param; return priv->user_ctl_last;
+ case KND_IOCTL_GET_USER_STS:
+ return priv->user_sts;
}
-
+
return -ENOTTY;
}
-
-struct file_operations kpc_dma_fops = {
+const struct file_operations kpc_dma_fops = {
.owner = THIS_MODULE,
.open = kpc_dma_open,
.release = kpc_dma_close,
.read = kpc_dma_read,
.write = kpc_dma_write,
-#ifdef CONFIG_KPC_DMA_AIO
- .aio_read = kpc_dma_aio_read,
- .aio_write = kpc_dma_aio_write,
-#endif
.unlocked_ioctl = kpc_dma_ioctl,
};