aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ioat/dma.c
diff options
context:
space:
mode:
authorDave Jiang <dave.jiang@intel.com>2015-08-11 08:48:43 -0700
committerVinod Koul <vinod.koul@intel.com>2015-08-17 13:37:30 +0530
commitc0f28ce66ecfd9fa0ae662a2c7f3e68e537e77f4 (patch)
tree29b72d67fd006b880c44882fbe412204850657a9 /drivers/dma/ioat/dma.c
parentdmaengine: ioatdma: move all sysfs related code (diff)
downloadlinux-dev-c0f28ce66ecfd9fa0ae662a2c7f3e68e537e77f4.tar.xz
linux-dev-c0f28ce66ecfd9fa0ae662a2c7f3e68e537e77f4.zip
dmaengine: ioatdma: move all the init routines
Moving all the init routines to init.c and fixup anything broken during the move. Signed-off-by: Dave Jiang <dave.jiang@intel.com> Acked-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/ioat/dma.c')
-rw-r--r--drivers/dma/ioat/dma.c509
1 files changed, 5 insertions, 504 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1746f7b4c3b4..5d78cafdd3f2 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -37,30 +37,12 @@
#include "../dmaengine.h"
-int ioat_pending_level = 4;
-module_param(ioat_pending_level, int, 0644);
-MODULE_PARM_DESC(ioat_pending_level,
- "high-water mark for pushing ioat descriptors (default: 4)");
-int ioat_ring_alloc_order = 8;
-module_param(ioat_ring_alloc_order, int, 0644);
-MODULE_PARM_DESC(ioat_ring_alloc_order,
- "ioat+: allocate 2^n descriptors per channel (default: 8 max: 16)");
-static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
-module_param(ioat_ring_max_alloc_order, int, 0644);
-MODULE_PARM_DESC(ioat_ring_max_alloc_order,
- "ioat+: upper limit for ring size (default: 16)");
-static char ioat_interrupt_style[32] = "msix";
-module_param_string(ioat_interrupt_style, ioat_interrupt_style,
- sizeof(ioat_interrupt_style), 0644);
-MODULE_PARM_DESC(ioat_interrupt_style,
- "set ioat interrupt style: msix (default), msi, intx");
-
/**
* ioat_dma_do_interrupt - handler used for single vector interrupt mode
* @irq: interrupt id
* @data: interrupt data
*/
-static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
+irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
{
struct ioatdma_device *instance = data;
struct ioatdma_chan *ioat_chan;
@@ -94,7 +76,7 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
* @irq: interrupt id
* @data: interrupt data
*/
-static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
+irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
{
struct ioatdma_chan *ioat_chan = data;
@@ -104,28 +86,6 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
return IRQ_HANDLED;
}
-/* common channel initialization */
-void
-ioat_init_channel(struct ioatdma_device *ioat_dma,
- struct ioatdma_chan *ioat_chan, int idx)
-{
- struct dma_device *dma = &ioat_dma->dma_dev;
- struct dma_chan *c = &ioat_chan->dma_chan;
- unsigned long data = (unsigned long) c;
-
- ioat_chan->ioat_dma = ioat_dma;
- ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1));
- spin_lock_init(&ioat_chan->cleanup_lock);
- ioat_chan->dma_chan.device = dma;
- dma_cookie_init(&ioat_chan->dma_chan);
- list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
- ioat_dma->idx[idx] = ioat_chan;
- init_timer(&ioat_chan->timer);
- ioat_chan->timer.function = ioat_dma->timer_fn;
- ioat_chan->timer.data = data;
- tasklet_init(&ioat_chan->cleanup_task, ioat_dma->cleanup_fn, data);
-}
-
void ioat_stop(struct ioatdma_chan *ioat_chan)
{
struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
@@ -214,299 +174,6 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
return dma_cookie_status(c, cookie, txstate);
}
-/*
- * Perform a IOAT transaction to verify the HW works.
- */
-#define IOAT_TEST_SIZE 2000
-
-static void ioat_dma_test_callback(void *dma_async_param)
-{
- struct completion *cmp = dma_async_param;
-
- complete(cmp);
-}
-
-/**
- * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
- * @ioat_dma: dma device to be tested
- */
-int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
-{
- int i;
- u8 *src;
- u8 *dest;
- struct dma_device *dma = &ioat_dma->dma_dev;
- struct device *dev = &ioat_dma->pdev->dev;
- struct dma_chan *dma_chan;
- struct dma_async_tx_descriptor *tx;
- dma_addr_t dma_dest, dma_src;
- dma_cookie_t cookie;
- int err = 0;
- struct completion cmp;
- unsigned long tmo;
- unsigned long flags;
-
- src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
- if (!src)
- return -ENOMEM;
- dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
- if (!dest) {
- kfree(src);
- return -ENOMEM;
- }
-
- /* Fill in src buffer */
- for (i = 0; i < IOAT_TEST_SIZE; i++)
- src[i] = (u8)i;
-
- /* Start copy, using first DMA channel */
- dma_chan = container_of(dma->channels.next, struct dma_chan,
- device_node);
- if (dma->device_alloc_chan_resources(dma_chan) < 1) {
- dev_err(dev, "selftest cannot allocate chan resource\n");
- err = -ENODEV;
- goto out;
- }
-
- dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_src)) {
- dev_err(dev, "mapping src buffer failed\n");
- goto free_resources;
- }
- dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, dma_dest)) {
- dev_err(dev, "mapping dest buffer failed\n");
- goto unmap_src;
- }
- flags = DMA_PREP_INTERRUPT;
- tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest,
- dma_src, IOAT_TEST_SIZE,
- flags);
- if (!tx) {
- dev_err(dev, "Self-test prep failed, disabling\n");
- err = -ENODEV;
- goto unmap_dma;
- }
-
- async_tx_ack(tx);
- init_completion(&cmp);
- tx->callback = ioat_dma_test_callback;
- tx->callback_param = &cmp;
- cookie = tx->tx_submit(tx);
- if (cookie < 0) {
- dev_err(dev, "Self-test setup failed, disabling\n");
- err = -ENODEV;
- goto unmap_dma;
- }
- dma->device_issue_pending(dma_chan);
-
- tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
-
- if (tmo == 0 ||
- dma->device_tx_status(dma_chan, cookie, NULL)
- != DMA_COMPLETE) {
- dev_err(dev, "Self-test copy timed out, disabling\n");
- err = -ENODEV;
- goto unmap_dma;
- }
- if (memcmp(src, dest, IOAT_TEST_SIZE)) {
- dev_err(dev, "Self-test copy failed compare, disabling\n");
- err = -ENODEV;
- goto free_resources;
- }
-
-unmap_dma:
- dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
-unmap_src:
- dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
-free_resources:
- dma->device_free_chan_resources(dma_chan);
-out:
- kfree(src);
- kfree(dest);
- return err;
-}
-
-/**
- * ioat_dma_setup_interrupts - setup interrupt handler
- * @ioat_dma: ioat dma device
- */
-int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma)
-{
- struct ioatdma_chan *ioat_chan;
- struct pci_dev *pdev = ioat_dma->pdev;
- struct device *dev = &pdev->dev;
- struct msix_entry *msix;
- int i, j, msixcnt;
- int err = -EINVAL;
- u8 intrctrl = 0;
-
- if (!strcmp(ioat_interrupt_style, "msix"))
- goto msix;
- if (!strcmp(ioat_interrupt_style, "msi"))
- goto msi;
- if (!strcmp(ioat_interrupt_style, "intx"))
- goto intx;
- dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
- goto err_no_irq;
-
-msix:
- /* The number of MSI-X vectors should equal the number of channels */
- msixcnt = ioat_dma->dma_dev.chancnt;
- for (i = 0; i < msixcnt; i++)
- ioat_dma->msix_entries[i].entry = i;
-
- err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt);
- if (err)
- goto msi;
-
- for (i = 0; i < msixcnt; i++) {
- msix = &ioat_dma->msix_entries[i];
- ioat_chan = ioat_chan_by_index(ioat_dma, i);
- err = devm_request_irq(dev, msix->vector,
- ioat_dma_do_interrupt_msix, 0,
- "ioat-msix", ioat_chan);
- if (err) {
- for (j = 0; j < i; j++) {
- msix = &ioat_dma->msix_entries[j];
- ioat_chan = ioat_chan_by_index(ioat_dma, j);
- devm_free_irq(dev, msix->vector, ioat_chan);
- }
- goto msi;
- }
- }
- intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
- ioat_dma->irq_mode = IOAT_MSIX;
- goto done;
-
-msi:
- err = pci_enable_msi(pdev);
- if (err)
- goto intx;
-
- err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
- "ioat-msi", ioat_dma);
- if (err) {
- pci_disable_msi(pdev);
- goto intx;
- }
- ioat_dma->irq_mode = IOAT_MSI;
- goto done;
-
-intx:
- err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
- IRQF_SHARED, "ioat-intx", ioat_dma);
- if (err)
- goto err_no_irq;
-
- ioat_dma->irq_mode = IOAT_INTX;
-done:
- if (ioat_dma->intr_quirk)
- ioat_dma->intr_quirk(ioat_dma);
- intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
- writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
- return 0;
-
-err_no_irq:
- /* Disable all interrupt generation */
- writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
- ioat_dma->irq_mode = IOAT_NOIRQ;
- dev_err(dev, "no usable interrupts\n");
- return err;
-}
-EXPORT_SYMBOL(ioat_dma_setup_interrupts);
-
-static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma)
-{
- /* Disable all interrupt generation */
- writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
-}
-
-int ioat_probe(struct ioatdma_device *ioat_dma)
-{
- int err = -ENODEV;
- struct dma_device *dma = &ioat_dma->dma_dev;
- struct pci_dev *pdev = ioat_dma->pdev;
- struct device *dev = &pdev->dev;
-
- /* DMA coherent memory pool for DMA descriptor allocations */
- ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev,
- sizeof(struct ioat_dma_descriptor),
- 64, 0);
- if (!ioat_dma->dma_pool) {
- err = -ENOMEM;
- goto err_dma_pool;
- }
-
- ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev,
- sizeof(u64),
- SMP_CACHE_BYTES,
- SMP_CACHE_BYTES);
-
- if (!ioat_dma->completion_pool) {
- err = -ENOMEM;
- goto err_completion_pool;
- }
-
- ioat_dma->enumerate_channels(ioat_dma);
-
- dma_cap_set(DMA_MEMCPY, dma->cap_mask);
- dma->dev = &pdev->dev;
-
- if (!dma->chancnt) {
- dev_err(dev, "channel enumeration error\n");
- goto err_setup_interrupts;
- }
-
- err = ioat_dma_setup_interrupts(ioat_dma);
- if (err)
- goto err_setup_interrupts;
-
- err = ioat_dma->self_test(ioat_dma);
- if (err)
- goto err_self_test;
-
- return 0;
-
-err_self_test:
- ioat_disable_interrupts(ioat_dma);
-err_setup_interrupts:
- pci_pool_destroy(ioat_dma->completion_pool);
-err_completion_pool:
- pci_pool_destroy(ioat_dma->dma_pool);
-err_dma_pool:
- return err;
-}
-
-int ioat_register(struct ioatdma_device *ioat_dma)
-{
- int err = dma_async_device_register(&ioat_dma->dma_dev);
-
- if (err) {
- ioat_disable_interrupts(ioat_dma);
- pci_pool_destroy(ioat_dma->completion_pool);
- pci_pool_destroy(ioat_dma->dma_pool);
- }
-
- return err;
-}
-
-void ioat_dma_remove(struct ioatdma_device *ioat_dma)
-{
- struct dma_device *dma = &ioat_dma->dma_dev;
-
- ioat_disable_interrupts(ioat_dma);
-
- ioat_kobject_del(ioat_dma);
-
- dma_async_device_unregister(dma);
-
- pci_pool_destroy(ioat_dma->dma_pool);
- pci_pool_destroy(ioat_dma->completion_pool);
-
- INIT_LIST_HEAD(&dma->channels);
-}
-
void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
{
ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
@@ -577,7 +244,7 @@ static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
__ioat_issue_pending(ioat_chan);
}
-static void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
+void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
{
spin_lock_bh(&ioat_chan->prep_lock);
__ioat_start_null_desc(ioat_chan);
@@ -645,49 +312,6 @@ int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
return err;
}
-/**
- * ioat_enumerate_channels - find and initialize the device's channels
- * @ioat_dma: the ioat dma device to be enumerated
- */
-int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
-{
- struct ioatdma_chan *ioat_chan;
- struct device *dev = &ioat_dma->pdev->dev;
- struct dma_device *dma = &ioat_dma->dma_dev;
- u8 xfercap_log;
- int i;
-
- INIT_LIST_HEAD(&dma->channels);
- dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
- dma->chancnt &= 0x1f; /* bits [4:0] valid */
- if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) {
- dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
- dma->chancnt, ARRAY_SIZE(ioat_dma->idx));
- dma->chancnt = ARRAY_SIZE(ioat_dma->idx);
- }
- xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
- xfercap_log &= 0x1f; /* bits [4:0] valid */
- if (xfercap_log == 0)
- return 0;
- dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
-
- for (i = 0; i < dma->chancnt; i++) {
- ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
- if (!ioat_chan)
- break;
-
- ioat_init_channel(ioat_dma, ioat_chan, i);
- ioat_chan->xfercap_log = xfercap_log;
- spin_lock_init(&ioat_chan->prep_lock);
- if (ioat_dma->reset_hw(ioat_chan)) {
- i = 0;
- break;
- }
- }
- dma->chancnt = i;
- return i;
-}
-
static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
{
struct dma_chan *c = tx->chan;
@@ -741,8 +365,7 @@ ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
return desc;
}
-static void
-ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
+void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
{
struct ioatdma_device *ioat_dma;
@@ -751,7 +374,7 @@ ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
kmem_cache_free(ioat_cache, desc);
}
-static struct ioat_ring_ent **
+struct ioat_ring_ent **
ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
{
struct ioat_ring_ent **ring;
@@ -788,128 +411,6 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
return ring;
}
-/**
- * ioat_free_chan_resources - release all the descriptors
- * @chan: the channel to be cleaned
- */
-void ioat_free_chan_resources(struct dma_chan *c)
-{
- struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
- struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
- struct ioat_ring_ent *desc;
- const int total_descs = 1 << ioat_chan->alloc_order;
- int descs;
- int i;
-
- /* Before freeing channel resources first check
- * if they have been previously allocated for this channel.
- */
- if (!ioat_chan->ring)
- return;
-
- ioat_stop(ioat_chan);
- ioat_dma->reset_hw(ioat_chan);
-
- spin_lock_bh(&ioat_chan->cleanup_lock);
- spin_lock_bh(&ioat_chan->prep_lock);
- descs = ioat_ring_space(ioat_chan);
- dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs);
- for (i = 0; i < descs; i++) {
- desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i);
- ioat_free_ring_ent(desc, c);
- }
-
- if (descs < total_descs)
- dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n",
- total_descs - descs);
-
- for (i = 0; i < total_descs - descs; i++) {
- desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i);
- dump_desc_dbg(ioat_chan, desc);
- ioat_free_ring_ent(desc, c);
- }
-
- kfree(ioat_chan->ring);
- ioat_chan->ring = NULL;
- ioat_chan->alloc_order = 0;
- pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
- ioat_chan->completion_dma);
- spin_unlock_bh(&ioat_chan->prep_lock);
- spin_unlock_bh(&ioat_chan->cleanup_lock);
-
- ioat_chan->last_completion = 0;
- ioat_chan->completion_dma = 0;
- ioat_chan->dmacount = 0;
-}
-
-/* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring
- * @chan: channel to be initialized
- */
-int ioat_alloc_chan_resources(struct dma_chan *c)
-{
- struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
- struct ioat_ring_ent **ring;
- u64 status;
- int order;
- int i = 0;
- u32 chanerr;
-
- /* have we already been set up? */
- if (ioat_chan->ring)
- return 1 << ioat_chan->alloc_order;
-
- /* Setup register to interrupt and write completion status on error */
- writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
-
- /* allocate a completion writeback area */
- /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
- ioat_chan->completion =
- pci_pool_alloc(ioat_chan->ioat_dma->completion_pool,
- GFP_KERNEL, &ioat_chan->completion_dma);
- if (!ioat_chan->completion)
- return -ENOMEM;
-
- memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion));
- writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF,
- ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
- writel(((u64)ioat_chan->completion_dma) >> 32,
- ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
-
- order = ioat_get_alloc_order();
- ring = ioat_alloc_ring(c, order, GFP_KERNEL);
- if (!ring)
- return -ENOMEM;
-
- spin_lock_bh(&ioat_chan->cleanup_lock);
- spin_lock_bh(&ioat_chan->prep_lock);
- ioat_chan->ring = ring;
- ioat_chan->head = 0;
- ioat_chan->issued = 0;
- ioat_chan->tail = 0;
- ioat_chan->alloc_order = order;
- set_bit(IOAT_RUN, &ioat_chan->state);
- spin_unlock_bh(&ioat_chan->prep_lock);
- spin_unlock_bh(&ioat_chan->cleanup_lock);
-
- ioat_start_null_desc(ioat_chan);
-
- /* check that we got off the ground */
- do {
- udelay(1);
- status = ioat_chansts(ioat_chan);
- } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
-
- if (is_ioat_active(status) || is_ioat_idle(status))
- return 1 << ioat_chan->alloc_order;
-
- chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
-
- dev_WARN(to_dev(ioat_chan),
- "failed to start channel chanerr: %#x\n", chanerr);
- ioat_free_chan_resources(c);
- return -EFAULT;
-}
-
bool reshape_ring(struct ioatdma_chan *ioat_chan, int order)
{
/* reshape differs from normal ring allocation in that we want