aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/nvme
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-10-07 09:35:50 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-10-07 09:35:50 -0700
commit7c989b1da3946e40bf71be00a0b401015235605a (patch)
tree33de4ff984af6a301d6e80a05d40a893909388c9 /drivers/nvme
parentMerge tag 'for-6.1/block-2022-10-03' of git://git.kernel.dk/linux (diff)
parentio_uring: Add missing inline to io_uring_cmd_import_fixed() dummy (diff)
downloadwireguard-linux-7c989b1da3946e40bf71be00a0b401015235605a.tar.xz
wireguard-linux-7c989b1da3946e40bf71be00a0b401015235605a.zip
Merge tag 'for-6.1/passthrough-2022-10-04' of git://git.kernel.dk/linux
Pull passthrough updates from Jens Axboe: "With these changes, passthrough NVMe support over io_uring now performs at the same level as block device O_DIRECT, and in many cases 6-8% better. This contains: - Add support for fixed buffers for passthrough (Anuj, Kanchan) - Enable batched allocations and freeing on passthrough, similarly to what we support on the normal storage path (me) - Fix from Geert fixing an issue with !CONFIG_IO_URING" * tag 'for-6.1/passthrough-2022-10-04' of git://git.kernel.dk/linux: io_uring: Add missing inline to io_uring_cmd_import_fixed() dummy nvme: wire up fixed buffer support for nvme passthrough nvme: pass ubuffer as an integer block: extend functionality to map bvec iterator block: factor out blk_rq_map_bio_alloc helper block: rename bio_map_put to blk_mq_map_bio_put nvme: refactor nvme_alloc_request nvme: refactor nvme_add_user_metadata nvme: Use blk_rq_map_user_io helper scsi: Use blk_rq_map_user_io helper block: add blk_rq_map_user_io io_uring: introduce fixed buffer support for io_uring_cmd io_uring: add io_uring_cmd_import_fixed nvme: enable batched completions of passthrough IO nvme: split out metadata vs non metadata end_io uring_cmd completions block: allow end_io based requests in the completion batch handling block: change request end_io handler to pass back a return value block: enable batched allocation for blk_mq_alloc_request() block: kill deprecated BUG_ON() in the flush handling
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/core.c6
-rw-r--r--drivers/nvme/host/ioctl.c227
-rw-r--r--drivers/nvme/host/pci.c12
-rw-r--r--drivers/nvme/target/passthru.c5
4 files changed, 161 insertions, 89 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 64f599a64a7f..059737c1a2c1 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1172,7 +1172,8 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2);
}
-static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
+static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
+ blk_status_t status)
{
struct nvme_ctrl *ctrl = rq->end_io_data;
unsigned long flags;
@@ -1184,7 +1185,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
dev_err(ctrl->device,
"failed nvme_keep_alive_end_io error=%d\n",
status);
- return;
+ return RQ_END_IO_NONE;
}
ctrl->comp_seen = false;
@@ -1195,6 +1196,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
spin_unlock_irqrestore(&ctrl->lock, flags);
if (startka)
nvme_queue_keep_alive_work(ctrl);
+ return RQ_END_IO_NONE;
}
static void nvme_keep_alive_work(struct work_struct *work)
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 357791ff0623..81f5550b670d 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -20,19 +20,20 @@ static void __user *nvme_to_user_ptr(uintptr_t ptrval)
return (void __user *)ptrval;
}
-static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
- unsigned len, u32 seed, bool write)
+static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
+ unsigned len, u32 seed)
{
struct bio_integrity_payload *bip;
int ret = -ENOMEM;
void *buf;
+ struct bio *bio = req->bio;
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
goto out;
ret = -EFAULT;
- if (write && copy_from_user(buf, ubuf, len))
+ if ((req_op(req) == REQ_OP_DRV_OUT) && copy_from_user(buf, ubuf, len))
goto out_free_meta;
bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
@@ -45,9 +46,13 @@ static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
bip->bip_iter.bi_sector = seed;
ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
offset_in_page(buf));
- if (ret == len)
- return buf;
- ret = -ENOMEM;
+ if (ret != len) {
+ ret = -ENOMEM;
+ goto out_free_meta;
+ }
+
+ req->cmd_flags |= REQ_INTEGRITY;
+ return buf;
out_free_meta:
kfree(buf);
out:
@@ -65,74 +70,76 @@ static int nvme_finish_user_metadata(struct request *req, void __user *ubuf,
}
static struct request *nvme_alloc_user_request(struct request_queue *q,
- struct nvme_command *cmd, void __user *ubuffer,
- unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
- u32 meta_seed, void **metap, unsigned timeout, bool vec,
- blk_opf_t rq_flags, blk_mq_req_flags_t blk_flags)
+ struct nvme_command *cmd, blk_opf_t rq_flags,
+ blk_mq_req_flags_t blk_flags)
{
- bool write = nvme_is_write(cmd);
- struct nvme_ns *ns = q->queuedata;
- struct block_device *bdev = ns ? ns->disk->part0 : NULL;
struct request *req;
- struct bio *bio = NULL;
- void *meta = NULL;
- int ret;
req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
if (IS_ERR(req))
return req;
nvme_init_request(req, cmd);
-
- if (timeout)
- req->timeout = timeout;
nvme_req(req)->flags |= NVME_REQ_USERCMD;
+ return req;
+}
- if (ubuffer && bufflen) {
- if (!vec)
- ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
- GFP_KERNEL);
- else {
- struct iovec fast_iov[UIO_FASTIOV];
- struct iovec *iov = fast_iov;
- struct iov_iter iter;
-
- ret = import_iovec(rq_data_dir(req), ubuffer, bufflen,
- UIO_FASTIOV, &iov, &iter);
- if (ret < 0)
- goto out;
- ret = blk_rq_map_user_iov(q, req, NULL, &iter,
- GFP_KERNEL);
- kfree(iov);
- }
- if (ret)
+static int nvme_map_user_request(struct request *req, u64 ubuffer,
+ unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
+ u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd,
+ bool vec)
+{
+ struct request_queue *q = req->q;
+ struct nvme_ns *ns = q->queuedata;
+ struct block_device *bdev = ns ? ns->disk->part0 : NULL;
+ struct bio *bio = NULL;
+ void *meta = NULL;
+ int ret;
+
+ if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
+ struct iov_iter iter;
+
+ /* fixedbufs is only for non-vectored io */
+ if (WARN_ON_ONCE(vec))
+ return -EINVAL;
+ ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
+ rq_data_dir(req), &iter, ioucmd);
+ if (ret < 0)
goto out;
- bio = req->bio;
- if (bdev)
- bio_set_dev(bio, bdev);
- if (bdev && meta_buffer && meta_len) {
- meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
- meta_seed, write);
- if (IS_ERR(meta)) {
- ret = PTR_ERR(meta);
- goto out_unmap;
- }
- req->cmd_flags |= REQ_INTEGRITY;
- *metap = meta;
+ ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
+ } else {
+ ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
+ bufflen, GFP_KERNEL, vec, 0, 0,
+ rq_data_dir(req));
+ }
+
+ if (ret)
+ goto out;
+ bio = req->bio;
+ if (bdev)
+ bio_set_dev(bio, bdev);
+
+ if (bdev && meta_buffer && meta_len) {
+ meta = nvme_add_user_metadata(req, meta_buffer, meta_len,
+ meta_seed);
+ if (IS_ERR(meta)) {
+ ret = PTR_ERR(meta);
+ goto out_unmap;
}
+ *metap = meta;
}
- return req;
+ return ret;
out_unmap:
if (bio)
blk_rq_unmap_user(bio);
out:
blk_mq_free_request(req);
- return ERR_PTR(ret);
+ return ret;
}
static int nvme_submit_user_cmd(struct request_queue *q,
- struct nvme_command *cmd, void __user *ubuffer,
+ struct nvme_command *cmd, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
u32 meta_seed, u64 *result, unsigned timeout, bool vec)
{
@@ -143,11 +150,18 @@ static int nvme_submit_user_cmd(struct request_queue *q,
u32 effects;
int ret;
- req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer,
- meta_len, meta_seed, &meta, timeout, vec, 0, 0);
+ req = nvme_alloc_user_request(q, cmd, 0, 0);
if (IS_ERR(req))
return PTR_ERR(req);
+ req->timeout = timeout;
+ if (ubuffer && bufflen) {
+ ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
+ meta_len, meta_seed, &meta, NULL, vec);
+ if (ret)
+ return ret;
+ }
+
bio = req->bio;
ctrl = nvme_req(req)->ctrl;
@@ -227,7 +241,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.appmask = cpu_to_le16(io.appmask);
return nvme_submit_user_cmd(ns->queue, &c,
- nvme_to_user_ptr(io.addr), length,
+ io.addr, length,
metadata, meta_len, lower_32_bits(io.slba), NULL, 0,
false);
}
@@ -281,7 +295,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
timeout = msecs_to_jiffies(cmd.timeout_ms);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
- nvme_to_user_ptr(cmd.addr), cmd.data_len,
+ cmd.addr, cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &result, timeout, false);
@@ -327,7 +341,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
timeout = msecs_to_jiffies(cmd.timeout_ms);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
- nvme_to_user_ptr(cmd.addr), cmd.data_len,
+ cmd.addr, cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &cmd.result, timeout, vec);
@@ -356,9 +370,15 @@ struct nvme_uring_cmd_pdu {
struct bio *bio;
struct request *req;
};
- void *meta; /* kernel-resident buffer */
- void __user *meta_buffer;
u32 meta_len;
+ u32 nvme_status;
+ union {
+ struct {
+ void *meta; /* kernel-resident buffer */
+ void __user *meta_buffer;
+ };
+ u64 result;
+ } u;
};
static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
@@ -367,11 +387,10 @@ static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
}
-static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
+static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd)
{
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
struct request *req = pdu->req;
- struct bio *bio = req->bio;
int status;
u64 result;
@@ -382,26 +401,39 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
result = le64_to_cpu(nvme_req(req)->result.u64);
- if (pdu->meta)
- status = nvme_finish_user_metadata(req, pdu->meta_buffer,
- pdu->meta, pdu->meta_len, status);
- if (bio)
- blk_rq_unmap_user(bio);
+ if (pdu->meta_len)
+ status = nvme_finish_user_metadata(req, pdu->u.meta_buffer,
+ pdu->u.meta, pdu->meta_len, status);
+ if (req->bio)
+ blk_rq_unmap_user(req->bio);
blk_mq_free_request(req);
io_uring_cmd_done(ioucmd, status, result);
}
-static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
+static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
+{
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+
+ if (pdu->bio)
+ blk_rq_unmap_user(pdu->bio);
+
+ io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result);
+}
+
+static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
+ blk_status_t err)
{
struct io_uring_cmd *ioucmd = req->end_io_data;
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
- /* extract bio before reusing the same field for request */
- struct bio *bio = pdu->bio;
void *cookie = READ_ONCE(ioucmd->cookie);
- pdu->req = req;
- req->bio = bio;
+ req->bio = pdu->bio;
+ if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
+ pdu->nvme_status = -EINTR;
+ else
+ pdu->nvme_status = nvme_req(req)->status;
+ pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
/*
* For iopoll, complete it directly.
@@ -411,6 +443,30 @@ static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
nvme_uring_task_cb(ioucmd);
else
io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
+
+ return RQ_END_IO_FREE;
+}
+
+static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
+ blk_status_t err)
+{
+ struct io_uring_cmd *ioucmd = req->end_io_data;
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+ void *cookie = READ_ONCE(ioucmd->cookie);
+
+ req->bio = pdu->bio;
+ pdu->req = req;
+
+ /*
+ * For iopoll, complete it directly.
+ * Otherwise, move the completion to task work.
+ */
+ if (cookie != NULL && blk_rq_is_poll(req))
+ nvme_uring_task_meta_cb(ioucmd);
+ else
+ io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_meta_cb);
+
+ return RQ_END_IO_NONE;
}
static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
@@ -425,6 +481,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
blk_opf_t rq_flags = 0;
blk_mq_req_flags_t blk_flags = 0;
void *meta = NULL;
+ int ret;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -464,15 +521,18 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
rq_flags |= REQ_POLLED;
retry:
- req = nvme_alloc_user_request(q, &c, nvme_to_user_ptr(d.addr),
- d.data_len, nvme_to_user_ptr(d.metadata),
- d.metadata_len, 0, &meta, d.timeout_ms ?
- msecs_to_jiffies(d.timeout_ms) : 0, vec, rq_flags,
- blk_flags);
+ req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags);
if (IS_ERR(req))
return PTR_ERR(req);
- req->end_io = nvme_uring_cmd_end_io;
- req->end_io_data = ioucmd;
+ req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
+
+ if (d.addr && d.data_len) {
+ ret = nvme_map_user_request(req, d.addr,
+ d.data_len, nvme_to_user_ptr(d.metadata),
+ d.metadata_len, 0, &meta, ioucmd, vec);
+ if (ret)
+ return ret;
+ }
if (issue_flags & IO_URING_F_IOPOLL && rq_flags & REQ_POLLED) {
if (unlikely(!req->bio)) {
@@ -487,10 +547,15 @@ retry:
}
/* to free bio on completion, as req->bio will be null at that time */
pdu->bio = req->bio;
- pdu->meta = meta;
- pdu->meta_buffer = nvme_to_user_ptr(d.metadata);
pdu->meta_len = d.metadata_len;
-
+ req->end_io_data = ioucmd;
+ if (pdu->meta_len) {
+ pdu->u.meta = meta;
+ pdu->u.meta_buffer = nvme_to_user_ptr(d.metadata);
+ req->end_io = nvme_uring_cmd_end_io_meta;
+ } else {
+ req->end_io = nvme_uring_cmd_end_io;
+ }
blk_execute_rq_nowait(req, false);
return -EIOCBQUEUED;
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 9aafc1ed6439..5b796efa325b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1268,7 +1268,7 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}
-static void abort_endio(struct request *req, blk_status_t error)
+static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error)
{
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
@@ -1276,6 +1276,7 @@ static void abort_endio(struct request *req, blk_status_t error)
"Abort status: 0x%x", nvme_req(req)->status);
atomic_inc(&nvmeq->dev->ctrl.abort_limit);
blk_mq_free_request(req);
+ return RQ_END_IO_NONE;
}
static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
@@ -2447,22 +2448,25 @@ out_unlock:
return result;
}
-static void nvme_del_queue_end(struct request *req, blk_status_t error)
+static enum rq_end_io_ret nvme_del_queue_end(struct request *req,
+ blk_status_t error)
{
struct nvme_queue *nvmeq = req->end_io_data;
blk_mq_free_request(req);
complete(&nvmeq->delete_done);
+ return RQ_END_IO_NONE;
}
-static void nvme_del_cq_end(struct request *req, blk_status_t error)
+static enum rq_end_io_ret nvme_del_cq_end(struct request *req,
+ blk_status_t error)
{
struct nvme_queue *nvmeq = req->end_io_data;
if (error)
set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
- nvme_del_queue_end(req, error);
+ return nvme_del_queue_end(req, error);
}
static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 94d3153bae54..79af5140af8b 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -245,14 +245,15 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
nvme_passthru_end(ctrl, effects, req->cmd, status);
}
-static void nvmet_passthru_req_done(struct request *rq,
- blk_status_t blk_status)
+static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
+ blk_status_t blk_status)
{
struct nvmet_req *req = rq->end_io_data;
req->cqe->result = nvme_req(rq)->result;
nvmet_req_complete(req, nvme_req(rq)->status);
blk_mq_free_request(rq);
+ return RQ_END_IO_NONE;
}
static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)