aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/rsxx/dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/rsxx/dma.c')
-rw-r--r--drivers/block/rsxx/dma.c119
1 files changed, 60 insertions, 59 deletions
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index bed32f16b084..fc88ba3e1bd2 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -221,6 +221,21 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
}
/*----------------- RSXX DMA Handling -------------------*/
+static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma)
+{
+ if (dma->cmd != HW_CMD_BLK_DISCARD) {
+ if (!pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
+ pci_unmap_page(ctrl->card->dev, dma->dma_addr,
+ get_dma_size(dma),
+ dma->cmd == HW_CMD_BLK_WRITE ?
+ PCI_DMA_TODEVICE :
+ PCI_DMA_FROMDEVICE);
+ }
+ }
+
+ kmem_cache_free(rsxx_dma_pool, dma);
+}
+
static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
struct rsxx_dma *dma,
unsigned int status)
@@ -232,21 +247,14 @@ static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
if (status & DMA_CANCELLED)
ctrl->stats.dma_cancelled++;
- if (dma->dma_addr)
- pci_unmap_page(ctrl->card->dev, dma->dma_addr,
- get_dma_size(dma),
- dma->cmd == HW_CMD_BLK_WRITE ?
- PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE);
-
if (dma->cb)
dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
- kmem_cache_free(rsxx_dma_pool, dma);
+ rsxx_free_dma(ctrl, dma);
}
int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
- struct list_head *q)
+ struct list_head *q, unsigned int done)
{
struct rsxx_dma *dma;
struct rsxx_dma *tmp;
@@ -254,7 +262,10 @@ int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
list_for_each_entry_safe(dma, tmp, q, list) {
list_del(&dma->list);
- rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
+ if (done & COMPLETE_DMA)
+ rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
+ else
+ rsxx_free_dma(ctrl, dma);
cnt++;
}
@@ -370,7 +381,7 @@ static void dma_engine_stalled(unsigned long data)
/* Clean up the DMA queue */
spin_lock(&ctrl->queue_lock);
- cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue);
+ cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
spin_unlock(&ctrl->queue_lock);
cnt += rsxx_dma_cancel(ctrl);
@@ -388,6 +399,7 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
int tag;
int cmds_pending = 0;
struct hw_cmd *hw_cmd_buf;
+ int dir;
hw_cmd_buf = ctrl->cmd.buf;
@@ -424,6 +436,31 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
continue;
}
+ if (dma->cmd != HW_CMD_BLK_DISCARD) {
+ if (dma->cmd == HW_CMD_BLK_WRITE)
+ dir = PCI_DMA_TODEVICE;
+ else
+ dir = PCI_DMA_FROMDEVICE;
+
+ /*
+ * The function pci_map_page is placed here because we
+ * can only, by design, issue up to 255 commands to the
+ * hardware at one time per DMA channel. So the maximum
+ * amount of mapped memory would be 255 * 4 channels *
+ * 4096 Bytes which is less than 2GB, the limit of a x8
+ * Non-HWWD PCIe slot. This way the pci_map_page
+ * function should never fail because of a lack of
+ * mappable memory.
+ */
+ dma->dma_addr = pci_map_page(ctrl->card->dev, dma->page,
+ dma->pg_off, dma->sub_page.cnt << 9, dir);
+ if (pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
+ push_tracker(ctrl->trackers, tag);
+ rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
+ continue;
+ }
+ }
+
set_tracker_dma(ctrl->trackers, tag, dma);
hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd;
hw_cmd_buf[ctrl->cmd.idx].tag = tag;
@@ -620,14 +657,6 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card,
if (!dma)
return -ENOMEM;
- dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len,
- dir ? PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE);
- if (!dma->dma_addr) {
- kmem_cache_free(rsxx_dma_pool, dma);
- return -ENOMEM;
- }
-
dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
dma->laddr = laddr;
dma->sub_page.off = (dma_off >> 9);
@@ -736,11 +765,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
return 0;
bvec_err:
- for (i = 0; i < card->n_targets; i++) {
- spin_lock_bh(&card->ctrl[i].queue_lock);
- rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i]);
- spin_unlock_bh(&card->ctrl[i].queue_lock);
- }
+ for (i = 0; i < card->n_targets; i++)
+ rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i],
+ FREE_DMA);
return st;
}
@@ -990,7 +1017,7 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card)
/* Clean up the DMA queue */
spin_lock_bh(&ctrl->queue_lock);
- rsxx_cleanup_dma_queue(ctrl, &ctrl->queue);
+ rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
spin_unlock_bh(&ctrl->queue_lock);
rsxx_dma_cancel(ctrl);
@@ -1032,6 +1059,14 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
else
card->ctrl[i].stats.reads_issued--;
+ if (dma->cmd != HW_CMD_BLK_DISCARD) {
+ pci_unmap_page(card->dev, dma->dma_addr,
+ get_dma_size(dma),
+ dma->cmd == HW_CMD_BLK_WRITE ?
+ PCI_DMA_TODEVICE :
+ PCI_DMA_FROMDEVICE);
+ }
+
list_add_tail(&dma->list, &issued_dmas[i]);
push_tracker(card->ctrl[i].trackers, j);
cnt++;
@@ -1043,15 +1078,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
card->ctrl[i].stats.sw_q_depth += cnt;
card->ctrl[i].e_cnt = 0;
-
- list_for_each_entry(dma, &card->ctrl[i].queue, list) {
- if (dma->dma_addr)
- pci_unmap_page(card->dev, dma->dma_addr,
- get_dma_size(dma),
- dma->cmd == HW_CMD_BLK_WRITE ?
- PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE);
- }
spin_unlock_bh(&card->ctrl[i].queue_lock);
}
@@ -1060,31 +1086,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
return 0;
}
-int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
-{
- struct rsxx_dma *dma;
- int i;
-
- for (i = 0; i < card->n_targets; i++) {
- spin_lock_bh(&card->ctrl[i].queue_lock);
- list_for_each_entry(dma, &card->ctrl[i].queue, list) {
- dma->dma_addr = pci_map_page(card->dev, dma->page,
- dma->pg_off, get_dma_size(dma),
- dma->cmd == HW_CMD_BLK_WRITE ?
- PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE);
- if (!dma->dma_addr) {
- spin_unlock_bh(&card->ctrl[i].queue_lock);
- kmem_cache_free(rsxx_dma_pool, dma);
- return -ENOMEM;
- }
- }
- spin_unlock_bh(&card->ctrl[i].queue_lock);
- }
-
- return 0;
-}
-
int rsxx_dma_init(void)
{
rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);