aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/nvme/host
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-12-02 17:46:18 +0100
committerJens Axboe <axboe@kernel.dk>2018-12-04 11:38:18 -0700
commit6322307809649cba6f545640563f95d686ecf404 (patch)
tree0d18bf6816246d55fa70c3fc61b4a27d6dfd2f37 /drivers/nvme/host
parentnvme-pci: use atomic bitops to mark a queue enabled (diff)
downloadlinux-dev-6322307809649cba6f545640563f95d686ecf404.tar.xz
linux-dev-6322307809649cba6f545640563f95d686ecf404.zip
nvme-pci: cleanup SQ allocation a bit
Use a bit flag to mark if the SQ was allocated from the CMB, and clean up the surrounding code a bit. Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/nvme/host')
-rw-r--r--drivers/nvme/host/pci.c33
1 files changed, 15 insertions, 18 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 022395a319f4..b820dd0351cb 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -186,7 +186,6 @@ struct nvme_queue {
struct nvme_dev *dev;
spinlock_t sq_lock;
struct nvme_command *sq_cmds;
- bool sq_cmds_is_io;
spinlock_t cq_lock ____cacheline_aligned_in_smp;
volatile struct nvme_completion *cqes;
struct blk_mq_tags **tags;
@@ -203,6 +202,7 @@ struct nvme_queue {
u8 cq_phase;
unsigned long flags;
#define NVMEQ_ENABLED 0
+#define NVMEQ_SQ_CMB 1
u32 *dbbuf_sq_db;
u32 *dbbuf_cq_db;
u32 *dbbuf_sq_ei;
@@ -1366,17 +1366,15 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
{
dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
+ if (!nvmeq->sq_cmds)
+ return;
- if (nvmeq->sq_cmds) {
- if (nvmeq->sq_cmds_is_io)
- pci_free_p2pmem(to_pci_dev(nvmeq->q_dmadev),
- nvmeq->sq_cmds,
- SQ_SIZE(nvmeq->q_depth));
- else
- dma_free_coherent(nvmeq->q_dmadev,
- SQ_SIZE(nvmeq->q_depth),
- nvmeq->sq_cmds,
- nvmeq->sq_dma_addr);
+ if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) {
+ pci_free_p2pmem(to_pci_dev(nvmeq->q_dmadev),
+ nvmeq->sq_cmds, SQ_SIZE(nvmeq->q_depth));
+ } else {
+ dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+ nvmeq->sq_cmds, nvmeq->sq_dma_addr);
}
}
@@ -1462,15 +1460,14 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(depth));
nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
nvmeq->sq_cmds);
- nvmeq->sq_cmds_is_io = true;
- }
-
- if (!nvmeq->sq_cmds) {
- nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
- &nvmeq->sq_dma_addr, GFP_KERNEL);
- nvmeq->sq_cmds_is_io = false;
+ if (nvmeq->sq_dma_addr) {
+ set_bit(NVMEQ_SQ_CMB, &nvmeq->flags);
+ return 0;
+ }
}
+ nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
+ &nvmeq->sq_dma_addr, GFP_KERNEL);
if (!nvmeq->sq_cmds)
return -ENOMEM;
return 0;