aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/nvme
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/Kconfig1
-rw-r--r--drivers/nvme/Makefile1
-rw-r--r--drivers/nvme/common/Kconfig4
-rw-r--r--drivers/nvme/common/Makefile7
-rw-r--r--drivers/nvme/common/auth.c483
-rw-r--r--drivers/nvme/host/Kconfig50
-rw-r--r--drivers/nvme/host/Makefile9
-rw-r--r--drivers/nvme/host/apple.c1600
-rw-r--r--drivers/nvme/host/auth.c1017
-rw-r--r--drivers/nvme/host/constants.c187
-rw-r--r--drivers/nvme/host/core.c3979
-rw-r--r--drivers/nvme/host/fabrics.c339
-rw-r--r--drivers/nvme/host/fabrics.h56
-rw-r--r--drivers/nvme/host/fault_inject.c2
-rw-r--r--drivers/nvme/host/fc.c1258
-rw-r--r--drivers/nvme/host/fc.h227
-rw-r--r--drivers/nvme/host/hwmon.c63
-rw-r--r--drivers/nvme/host/ioctl.c915
-rw-r--r--drivers/nvme/host/lightnvm.c1272
-rw-r--r--drivers/nvme/host/multipath.c587
-rw-r--r--drivers/nvme/host/nvme.h565
-rw-r--r--drivers/nvme/host/pci.c1456
-rw-r--r--drivers/nvme/host/rdma.c895
-rw-r--r--drivers/nvme/host/tcp.c990
-rw-r--r--drivers/nvme/host/trace.c103
-rw-r--r--drivers/nvme/host/trace.h16
-rw-r--r--drivers/nvme/host/zns.c250
-rw-r--r--drivers/nvme/target/Kconfig30
-rw-r--r--drivers/nvme/target/Makefile3
-rw-r--r--drivers/nvme/target/admin-cmd.c395
-rw-r--r--drivers/nvme/target/auth.c526
-rw-r--r--drivers/nvme/target/configfs.c841
-rw-r--r--drivers/nvme/target/core.c569
-rw-r--r--drivers/nvme/target/discovery.c48
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c537
-rw-r--r--drivers/nvme/target/fabrics-cmd.c131
-rw-r--r--drivers/nvme/target/fc.c906
-rw-r--r--drivers/nvme/target/fcloop.c357
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c224
-rw-r--r--drivers/nvme/target/io-cmd-file.c53
-rw-r--r--drivers/nvme/target/loop.c176
-rw-r--r--drivers/nvme/target/nvmet.h268
-rw-r--r--drivers/nvme/target/passthru.c659
-rw-r--r--drivers/nvme/target/rdma.c709
-rw-r--r--drivers/nvme/target/tcp.c444
-rw-r--r--drivers/nvme/target/trace.c18
-rw-r--r--drivers/nvme/target/trace.h46
-rw-r--r--drivers/nvme/target/zns.c627
48 files changed, 17752 insertions, 6147 deletions
diff --git a/drivers/nvme/Kconfig b/drivers/nvme/Kconfig
index 87ae409a32b9..656e46d938da 100644
--- a/drivers/nvme/Kconfig
+++ b/drivers/nvme/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "NVME Support"
+source "drivers/nvme/common/Kconfig"
source "drivers/nvme/host/Kconfig"
source "drivers/nvme/target/Kconfig"
diff --git a/drivers/nvme/Makefile b/drivers/nvme/Makefile
index fb42c44609a8..eedca8c72098 100644
--- a/drivers/nvme/Makefile
+++ b/drivers/nvme/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NVME_COMMON) += common/
obj-y += host/
obj-y += target/
diff --git a/drivers/nvme/common/Kconfig b/drivers/nvme/common/Kconfig
new file mode 100644
index 000000000000..4514f44362dd
--- /dev/null
+++ b/drivers/nvme/common/Kconfig
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config NVME_COMMON
+ tristate
diff --git a/drivers/nvme/common/Makefile b/drivers/nvme/common/Makefile
new file mode 100644
index 000000000000..720c625b8a52
--- /dev/null
+++ b/drivers/nvme/common/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -I$(src)
+
+obj-$(CONFIG_NVME_COMMON) += nvme-common.o
+
+nvme-common-y += auth.o
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
new file mode 100644
index 000000000000..d90e4f0c08b7
--- /dev/null
+++ b/drivers/nvme/common/auth.c
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
+ */
+
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <linux/base64.h>
+#include <linux/prandom.h>
+#include <linux/scatterlist.h>
+#include <asm/unaligned.h>
+#include <crypto/hash.h>
+#include <crypto/dh.h>
+#include <linux/nvme.h>
+#include <linux/nvme-auth.h>
+
+static u32 nvme_dhchap_seqnum;
+static DEFINE_MUTEX(nvme_dhchap_mutex);
+
+u32 nvme_auth_get_seqnum(void)
+{
+ u32 seqnum;
+
+ mutex_lock(&nvme_dhchap_mutex);
+ if (!nvme_dhchap_seqnum)
+ nvme_dhchap_seqnum = get_random_u32();
+ else {
+ nvme_dhchap_seqnum++;
+ if (!nvme_dhchap_seqnum)
+ nvme_dhchap_seqnum++;
+ }
+ seqnum = nvme_dhchap_seqnum;
+ mutex_unlock(&nvme_dhchap_mutex);
+ return seqnum;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_get_seqnum);
+
+static struct nvme_auth_dhgroup_map {
+ const char name[16];
+ const char kpp[16];
+} dhgroup_map[] = {
+ [NVME_AUTH_DHGROUP_NULL] = {
+ .name = "null", .kpp = "null" },
+ [NVME_AUTH_DHGROUP_2048] = {
+ .name = "ffdhe2048", .kpp = "ffdhe2048(dh)" },
+ [NVME_AUTH_DHGROUP_3072] = {
+ .name = "ffdhe3072", .kpp = "ffdhe3072(dh)" },
+ [NVME_AUTH_DHGROUP_4096] = {
+ .name = "ffdhe4096", .kpp = "ffdhe4096(dh)" },
+ [NVME_AUTH_DHGROUP_6144] = {
+ .name = "ffdhe6144", .kpp = "ffdhe6144(dh)" },
+ [NVME_AUTH_DHGROUP_8192] = {
+ .name = "ffdhe8192", .kpp = "ffdhe8192(dh)" },
+};
+
+const char *nvme_auth_dhgroup_name(u8 dhgroup_id)
+{
+ if (dhgroup_id >= ARRAY_SIZE(dhgroup_map))
+ return NULL;
+ return dhgroup_map[dhgroup_id].name;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_name);
+
+const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id)
+{
+ if (dhgroup_id >= ARRAY_SIZE(dhgroup_map))
+ return NULL;
+ return dhgroup_map[dhgroup_id].kpp;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_kpp);
+
+u8 nvme_auth_dhgroup_id(const char *dhgroup_name)
+{
+ int i;
+
+ if (!dhgroup_name || !strlen(dhgroup_name))
+ return NVME_AUTH_DHGROUP_INVALID;
+ for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
+ if (!strlen(dhgroup_map[i].name))
+ continue;
+ if (!strncmp(dhgroup_map[i].name, dhgroup_name,
+ strlen(dhgroup_map[i].name)))
+ return i;
+ }
+ return NVME_AUTH_DHGROUP_INVALID;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_id);
+
+static struct nvme_dhchap_hash_map {
+ int len;
+ const char hmac[15];
+ const char digest[8];
+} hash_map[] = {
+ [NVME_AUTH_HASH_SHA256] = {
+ .len = 32,
+ .hmac = "hmac(sha256)",
+ .digest = "sha256",
+ },
+ [NVME_AUTH_HASH_SHA384] = {
+ .len = 48,
+ .hmac = "hmac(sha384)",
+ .digest = "sha384",
+ },
+ [NVME_AUTH_HASH_SHA512] = {
+ .len = 64,
+ .hmac = "hmac(sha512)",
+ .digest = "sha512",
+ },
+};
+
+const char *nvme_auth_hmac_name(u8 hmac_id)
+{
+ if (hmac_id >= ARRAY_SIZE(hash_map))
+ return NULL;
+ return hash_map[hmac_id].hmac;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_hmac_name);
+
+const char *nvme_auth_digest_name(u8 hmac_id)
+{
+ if (hmac_id >= ARRAY_SIZE(hash_map))
+ return NULL;
+ return hash_map[hmac_id].digest;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_digest_name);
+
+u8 nvme_auth_hmac_id(const char *hmac_name)
+{
+ int i;
+
+ if (!hmac_name || !strlen(hmac_name))
+ return NVME_AUTH_HASH_INVALID;
+
+ for (i = 0; i < ARRAY_SIZE(hash_map); i++) {
+ if (!strlen(hash_map[i].hmac))
+ continue;
+ if (!strncmp(hash_map[i].hmac, hmac_name,
+ strlen(hash_map[i].hmac)))
+ return i;
+ }
+ return NVME_AUTH_HASH_INVALID;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_hmac_id);
+
+size_t nvme_auth_hmac_hash_len(u8 hmac_id)
+{
+ if (hmac_id >= ARRAY_SIZE(hash_map))
+ return 0;
+ return hash_map[hmac_id].len;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_hmac_hash_len);
+
+struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
+ u8 key_hash)
+{
+ struct nvme_dhchap_key *key;
+ unsigned char *p;
+ u32 crc;
+ int ret, key_len;
+ size_t allocated_len = strlen(secret);
+
+ /* Secret might be affixed with a ':' */
+ p = strrchr(secret, ':');
+ if (p)
+ allocated_len = p - secret;
+ key = kzalloc(sizeof(*key), GFP_KERNEL);
+ if (!key)
+ return ERR_PTR(-ENOMEM);
+ key->key = kzalloc(allocated_len, GFP_KERNEL);
+ if (!key->key) {
+ ret = -ENOMEM;
+ goto out_free_key;
+ }
+
+ key_len = base64_decode(secret, allocated_len, key->key);
+ if (key_len < 0) {
+ pr_debug("base64 key decoding error %d\n",
+ key_len);
+ ret = key_len;
+ goto out_free_secret;
+ }
+
+ if (key_len != 36 && key_len != 52 &&
+ key_len != 68) {
+ pr_err("Invalid key len %d\n", key_len);
+ ret = -EINVAL;
+ goto out_free_secret;
+ }
+
+ if (key_hash > 0 &&
+ (key_len - 4) != nvme_auth_hmac_hash_len(key_hash)) {
+ pr_err("Mismatched key len %d for %s\n", key_len,
+ nvme_auth_hmac_name(key_hash));
+ ret = -EINVAL;
+ goto out_free_secret;
+ }
+
+ /* The last four bytes is the CRC in little-endian format */
+ key_len -= 4;
+ /*
+ * The linux implementation doesn't do pre- and post-increments,
+ * so we have to do it manually.
+ */
+ crc = ~crc32(~0, key->key, key_len);
+
+ if (get_unaligned_le32(key->key + key_len) != crc) {
+ pr_err("key crc mismatch (key %08x, crc %08x)\n",
+ get_unaligned_le32(key->key + key_len), crc);
+ ret = -EKEYREJECTED;
+ goto out_free_secret;
+ }
+ key->len = key_len;
+ key->hash = key_hash;
+ return key;
+out_free_secret:
+ kfree_sensitive(key->key);
+out_free_key:
+ kfree(key);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_extract_key);
+
+void nvme_auth_free_key(struct nvme_dhchap_key *key)
+{
+ if (!key)
+ return;
+ kfree_sensitive(key->key);
+ kfree(key);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_free_key);
+
+u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn)
+{
+ const char *hmac_name;
+ struct crypto_shash *key_tfm;
+ struct shash_desc *shash;
+ u8 *transformed_key;
+ int ret;
+
+ if (!key || !key->key) {
+ pr_warn("No key specified\n");
+ return ERR_PTR(-ENOKEY);
+ }
+ if (key->hash == 0) {
+ transformed_key = kmemdup(key->key, key->len, GFP_KERNEL);
+ return transformed_key ? transformed_key : ERR_PTR(-ENOMEM);
+ }
+ hmac_name = nvme_auth_hmac_name(key->hash);
+ if (!hmac_name) {
+ pr_warn("Invalid key hash id %d\n", key->hash);
+ return ERR_PTR(-EINVAL);
+ }
+
+ key_tfm = crypto_alloc_shash(hmac_name, 0, 0);
+ if (IS_ERR(key_tfm))
+ return (u8 *)key_tfm;
+
+ shash = kmalloc(sizeof(struct shash_desc) +
+ crypto_shash_descsize(key_tfm),
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+ goto out_free_key;
+ }
+
+ transformed_key = kzalloc(crypto_shash_digestsize(key_tfm), GFP_KERNEL);
+ if (!transformed_key) {
+ ret = -ENOMEM;
+ goto out_free_shash;
+ }
+
+ shash->tfm = key_tfm;
+ ret = crypto_shash_setkey(key_tfm, key->key, key->len);
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_init(shash);
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_update(shash, nqn, strlen(nqn));
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_update(shash, "NVMe-over-Fabrics", 17);
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_final(shash, transformed_key);
+ if (ret < 0)
+ goto out_free_transformed_key;
+
+ kfree(shash);
+ crypto_free_shash(key_tfm);
+
+ return transformed_key;
+
+out_free_transformed_key:
+ kfree_sensitive(transformed_key);
+out_free_shash:
+ kfree(shash);
+out_free_key:
+ crypto_free_shash(key_tfm);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_transform_key);
+
+static int nvme_auth_hash_skey(int hmac_id, u8 *skey, size_t skey_len, u8 *hkey)
+{
+ const char *digest_name;
+ struct crypto_shash *tfm;
+ int ret;
+
+ digest_name = nvme_auth_digest_name(hmac_id);
+ if (!digest_name) {
+ pr_debug("%s: failed to get digest for %d\n", __func__,
+ hmac_id);
+ return -EINVAL;
+ }
+ tfm = crypto_alloc_shash(digest_name, 0, 0);
+ if (IS_ERR(tfm))
+ return -ENOMEM;
+
+ ret = crypto_shash_tfm_digest(tfm, skey, skey_len, hkey);
+ if (ret < 0)
+ pr_debug("%s: Failed to hash digest len %zu\n", __func__,
+ skey_len);
+
+ crypto_free_shash(tfm);
+ return ret;
+}
+
+int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
+ u8 *challenge, u8 *aug, size_t hlen)
+{
+ struct crypto_shash *tfm;
+ struct shash_desc *desc;
+ u8 *hashed_key;
+ const char *hmac_name;
+ int ret;
+
+ hashed_key = kmalloc(hlen, GFP_KERNEL);
+ if (!hashed_key)
+ return -ENOMEM;
+
+ ret = nvme_auth_hash_skey(hmac_id, skey,
+ skey_len, hashed_key);
+ if (ret < 0)
+ goto out_free_key;
+
+ hmac_name = nvme_auth_hmac_name(hmac_id);
+ if (!hmac_name) {
+ pr_warn("%s: invalid hash algorithm %d\n",
+ __func__, hmac_id);
+ ret = -EINVAL;
+ goto out_free_key;
+ }
+
+ tfm = crypto_alloc_shash(hmac_name, 0, 0);
+ if (IS_ERR(tfm)) {
+ ret = PTR_ERR(tfm);
+ goto out_free_key;
+ }
+
+ desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
+ GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto out_free_hash;
+ }
+ desc->tfm = tfm;
+
+ ret = crypto_shash_setkey(tfm, hashed_key, hlen);
+ if (ret)
+ goto out_free_desc;
+
+ ret = crypto_shash_init(desc);
+ if (ret)
+ goto out_free_desc;
+
+ ret = crypto_shash_update(desc, challenge, hlen);
+ if (ret)
+ goto out_free_desc;
+
+ ret = crypto_shash_final(desc, aug);
+out_free_desc:
+ kfree_sensitive(desc);
+out_free_hash:
+ crypto_free_shash(tfm);
+out_free_key:
+ kfree_sensitive(hashed_key);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_augmented_challenge);
+
+int nvme_auth_gen_privkey(struct crypto_kpp *dh_tfm, u8 dh_gid)
+{
+ int ret;
+
+ ret = crypto_kpp_set_secret(dh_tfm, NULL, 0);
+ if (ret)
+ pr_debug("failed to set private key, error %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_gen_privkey);
+
+int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm,
+ u8 *host_key, size_t host_key_len)
+{
+ struct kpp_request *req;
+ struct crypto_wait wait;
+ struct scatterlist dst;
+ int ret;
+
+ req = kpp_request_alloc(dh_tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ crypto_init_wait(&wait);
+ kpp_request_set_input(req, NULL, 0);
+ sg_init_one(&dst, host_key, host_key_len);
+ kpp_request_set_output(req, &dst, host_key_len);
+ kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+
+ ret = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait);
+ kpp_request_free(req);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_gen_pubkey);
+
+int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm,
+ u8 *ctrl_key, size_t ctrl_key_len,
+ u8 *sess_key, size_t sess_key_len)
+{
+ struct kpp_request *req;
+ struct crypto_wait wait;
+ struct scatterlist src, dst;
+ int ret;
+
+ req = kpp_request_alloc(dh_tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ crypto_init_wait(&wait);
+ sg_init_one(&src, ctrl_key, ctrl_key_len);
+ kpp_request_set_input(req, &src, ctrl_key_len);
+ sg_init_one(&dst, sess_key, sess_key_len);
+ kpp_request_set_output(req, &dst, sess_key_len);
+ kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+
+ ret = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait);
+
+ kpp_request_free(req);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_gen_shared_secret);
+
+int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key)
+{
+ struct nvme_dhchap_key *key;
+ u8 key_hash;
+
+ if (!secret) {
+ *ret_key = NULL;
+ return 0;
+ }
+
+ if (sscanf(secret, "DHHC-1:%hhd:%*s:", &key_hash) != 1)
+ return -EINVAL;
+
+ /* Pass in the secret without the 'DHHC-1:XX:' prefix */
+ key = nvme_auth_extract_key(secret + 10, key_hash);
+ if (IS_ERR(key)) {
+ *ret_key = NULL;
+ return PTR_ERR(key);
+ }
+
+ *ret_key = key;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index b9358db83e96..2f6a7f8c94e8 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -7,7 +7,7 @@ config BLK_DEV_NVME
tristate "NVM Express block device"
depends on PCI && BLOCK
select NVME_CORE
- ---help---
+ help
The NVM Express driver is for solid state drives directly
connected to the PCI or PCI Express bus. If you know you
don't have one of these, it is safe to answer N.
@@ -18,12 +18,20 @@ config BLK_DEV_NVME
config NVME_MULTIPATH
bool "NVMe multipath support"
depends on NVME_CORE
- ---help---
+ help
This option enables support for multipath access to NVMe
subsystems. If this option is enabled only a single
- /dev/nvmeXnY device will show up for each NVMe namespaces,
+ /dev/nvmeXnY device will show up for each NVMe namespace,
even if it is accessible through multiple controllers.
+config NVME_VERBOSE_ERRORS
+ bool "NVMe verbose error reporting"
+ depends on NVME_CORE
+ help
+ This option enables verbose reporting for NVMe errors. The
+ error translation table will grow the kernel image size by
+ about 4 KB.
+
config NVME_HWMON
bool "NVMe hardware monitoring"
depends on (NVME_CORE=y && HWMON=y) || (NVME_CORE=m && HWMON)
@@ -32,15 +40,13 @@ config NVME_HWMON
a hardware monitoring device will be created for each NVMe drive
in the system.
- If unsure, say N.
-
config NVME_FABRICS
+ select NVME_CORE
tristate
config NVME_RDMA
tristate "NVM Express over Fabrics RDMA host driver"
depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK
- select NVME_CORE
select NVME_FABRICS
select SG_POOL
help
@@ -57,7 +63,6 @@ config NVME_FC
tristate "NVM Express over Fabrics FC host driver"
depends on BLOCK
depends on HAS_DMA
- select NVME_CORE
select NVME_FABRICS
select SG_POOL
help
@@ -73,8 +78,9 @@ config NVME_FC
config NVME_TCP
tristate "NVM Express over Fabrics TCP host driver"
depends on INET
- depends on BLK_DEV_NVME
+ depends on BLOCK
select NVME_FABRICS
+ select CRYPTO
select CRYPTO_CRC32C
help
This provides support for the NVMe over Fabrics protocol using
@@ -85,3 +91,31 @@ config NVME_TCP
from https://github.com/linux-nvme/nvme-cli.
If unsure, say N.
+
+config NVME_AUTH
+ bool "NVM Express over Fabrics In-Band Authentication"
+ depends on NVME_CORE
+ select NVME_COMMON
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_DH
+ select CRYPTO_DH_RFC7919_GROUPS
+ help
+ This provides support for NVMe over Fabrics In-Band Authentication.
+
+ If unsure, say N.
+
+config NVME_APPLE
+ tristate "Apple ANS2 NVM Express host driver"
+ depends on OF && BLOCK
+ depends on APPLE_RTKIT && APPLE_SART
+ depends on ARCH_APPLE || COMPILE_TEST
+ select NVME_CORE
+ help
+ This provides support for the NVMe controller embedded in Apple SoCs
+ such as the M1.
+
+ To compile this driver as a module, choose M here: the
+ module will be called nvme-apple.
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index fc7b26be692d..e27202d22c7d 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -8,13 +8,16 @@ obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o
obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
obj-$(CONFIG_NVME_FC) += nvme-fc.o
obj-$(CONFIG_NVME_TCP) += nvme-tcp.o
+obj-$(CONFIG_NVME_APPLE) += nvme-apple.o
-nvme-core-y := core.o
+nvme-core-y += core.o ioctl.o
+nvme-core-$(CONFIG_NVME_VERBOSE_ERRORS) += constants.o
nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
-nvme-core-$(CONFIG_NVM) += lightnvm.o
+nvme-core-$(CONFIG_BLK_DEV_ZONED) += zns.o
nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o
nvme-core-$(CONFIG_NVME_HWMON) += hwmon.o
+nvme-core-$(CONFIG_NVME_AUTH) += auth.o
nvme-y += pci.o
@@ -25,3 +28,5 @@ nvme-rdma-y += rdma.o
nvme-fc-y += fc.o
nvme-tcp-y += tcp.o
+
+nvme-apple-y += apple.o
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
new file mode 100644
index 000000000000..ff8b083dc5c6
--- /dev/null
+++ b/drivers/nvme/host/apple.c
@@ -0,0 +1,1600 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Apple ANS NVM Express device driver
+ * Copyright The Asahi Linux Contributors
+ *
+ * Based on the pci.c NVM Express device driver
+ * Copyright (c) 2011-2014, Intel Corporation.
+ * and on the rdma.c NVMe over Fabrics RDMA host code.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ */
+
+#include <linux/async.h>
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/once.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/soc/apple/rtkit.h>
+#include <linux/soc/apple/sart.h>
+#include <linux/reset.h>
+#include <linux/time64.h>
+
+#include "nvme.h"
+
+#define APPLE_ANS_BOOT_TIMEOUT USEC_PER_SEC
+#define APPLE_ANS_MAX_QUEUE_DEPTH 64
+
+#define APPLE_ANS_COPROC_CPU_CONTROL 0x44
+#define APPLE_ANS_COPROC_CPU_CONTROL_RUN BIT(4)
+
+#define APPLE_ANS_ACQ_DB 0x1004
+#define APPLE_ANS_IOCQ_DB 0x100c
+
+#define APPLE_ANS_MAX_PEND_CMDS_CTRL 0x1210
+
+#define APPLE_ANS_BOOT_STATUS 0x1300
+#define APPLE_ANS_BOOT_STATUS_OK 0xde71ce55
+
+#define APPLE_ANS_UNKNOWN_CTRL 0x24008
+#define APPLE_ANS_PRP_NULL_CHECK BIT(11)
+
+#define APPLE_ANS_LINEAR_SQ_CTRL 0x24908
+#define APPLE_ANS_LINEAR_SQ_EN BIT(0)
+
+#define APPLE_ANS_LINEAR_ASQ_DB 0x2490c
+#define APPLE_ANS_LINEAR_IOSQ_DB 0x24910
+
+#define APPLE_NVMMU_NUM_TCBS 0x28100
+#define APPLE_NVMMU_ASQ_TCB_BASE 0x28108
+#define APPLE_NVMMU_IOSQ_TCB_BASE 0x28110
+#define APPLE_NVMMU_TCB_INVAL 0x28118
+#define APPLE_NVMMU_TCB_STAT 0x28120
+
+/*
+ * This controller is a bit weird in the way command tags works: Both the
+ * admin and the IO queue share the same tag space. Additionally, tags
+ * cannot be higher than 0x40 which effectively limits the combined
+ * queue depth to 0x40. Instead of wasting half of that on the admin queue
+ * which gets much less traffic we instead reduce its size here.
+ * The controller also doesn't support async event such that no space must
+ * be reserved for NVME_NR_AEN_COMMANDS.
+ */
+#define APPLE_NVME_AQ_DEPTH 2
+#define APPLE_NVME_AQ_MQ_TAG_DEPTH (APPLE_NVME_AQ_DEPTH - 1)
+
+/*
+ * These can be higher, but we need to ensure that any command doesn't
+ * require an sg allocation that needs more than a page of data.
+ */
+#define NVME_MAX_KB_SZ 4096
+#define NVME_MAX_SEGS 127
+
+/*
+ * This controller comes with an embedded IOMMU known as NVMMU.
+ * The NVMMU is pointed to an array of TCBs indexed by the command tag.
+ * Each command must be configured inside this structure before it's allowed
+ * to execute, including commands that don't require DMA transfers.
+ *
+ * An exception to this are Apple's vendor-specific commands (opcode 0xD8 on the
+ * admin queue): Those commands must still be added to the NVMMU but the DMA
+ * buffers cannot be represented as PRPs and must instead be allowed using SART.
+ *
+ * Programming the PRPs to the same values as those in the submission queue
+ * looks rather silly at first. This hardware is however designed for a kernel
+ * that runs the NVMMU code in a higher exception level than the NVMe driver.
+ * In that setting the NVMe driver first programs the submission queue entry
+ * and then executes a hypercall to the code that is allowed to program the
+ * NVMMU. The NVMMU driver then creates a shadow copy of the PRPs while
+ * verifying that they don't point to kernel text, data, pagetables, or similar
+ * protected areas before programming the TCB to point to this shadow copy.
+ * Since Linux doesn't do any of that we may as well just point both the queue
+ * and the TCB PRP pointer to the same memory.
+ */
+struct apple_nvmmu_tcb {
+ u8 opcode;
+
+#define APPLE_ANS_TCB_DMA_FROM_DEVICE BIT(0)
+#define APPLE_ANS_TCB_DMA_TO_DEVICE BIT(1)
+ u8 dma_flags;
+
+ u8 command_id;
+ u8 _unk0;
+ __le16 length;
+ u8 _unk1[18];
+ __le64 prp1;
+ __le64 prp2;
+ u8 _unk2[16];
+ u8 aes_iv[8];
+ u8 _aes_unk[64];
+};
+
+/*
+ * The Apple NVMe controller only supports a single admin and a single IO queue
+ * which are both limited to 64 entries and share a single interrupt.
+ *
+ * The completion queue works as usual. The submission "queue" instead is
+ * an array indexed by the command tag on this hardware. Commands must also be
+ * present in the NVMMU's tcb array. They are triggered by writing their tag to
+ * a MMIO register.
+ */
+struct apple_nvme_queue {
+ struct nvme_command *sqes;
+ struct nvme_completion *cqes;
+ struct apple_nvmmu_tcb *tcbs;
+
+ dma_addr_t sq_dma_addr;
+ dma_addr_t cq_dma_addr;
+ dma_addr_t tcb_dma_addr;
+
+ u32 __iomem *sq_db;
+ u32 __iomem *cq_db;
+
+ u16 cq_head;
+ u8 cq_phase;
+
+ bool is_adminq;
+ bool enabled;
+};
+
+/*
+ * The apple_nvme_iod describes the data in an I/O.
+ *
+ * The sg pointer contains the list of PRP chunk allocations in addition
+ * to the actual struct scatterlist.
+ */
+struct apple_nvme_iod {
+ struct nvme_request req;
+ struct nvme_command cmd;
+ struct apple_nvme_queue *q;
+ int npages; /* In the PRP list. 0 means small pool in use */
+ int nents; /* Used in scatterlist */
+ dma_addr_t first_dma;
+ unsigned int dma_len; /* length of single DMA segment mapping */
+ struct scatterlist *sg;
+};
+
+struct apple_nvme {
+ struct device *dev;
+
+ void __iomem *mmio_coproc;
+ void __iomem *mmio_nvme;
+
+ struct device **pd_dev;
+ struct device_link **pd_link;
+ int pd_count;
+
+ struct apple_sart *sart;
+ struct apple_rtkit *rtk;
+ struct reset_control *reset;
+
+ struct dma_pool *prp_page_pool;
+ struct dma_pool *prp_small_pool;
+ mempool_t *iod_mempool;
+
+ struct nvme_ctrl ctrl;
+ struct work_struct remove_work;
+
+ struct apple_nvme_queue adminq;
+ struct apple_nvme_queue ioq;
+
+ struct blk_mq_tag_set admin_tagset;
+ struct blk_mq_tag_set tagset;
+
+ int irq;
+ spinlock_t lock;
+};
+
+static_assert(sizeof(struct nvme_command) == 64);
+static_assert(sizeof(struct apple_nvmmu_tcb) == 128);
+
+static inline struct apple_nvme *ctrl_to_apple_nvme(struct nvme_ctrl *ctrl)
+{
+ return container_of(ctrl, struct apple_nvme, ctrl);
+}
+
+static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q)
+{
+ if (q->is_adminq)
+ return container_of(q, struct apple_nvme, adminq);
+ else
+ return container_of(q, struct apple_nvme, ioq);
+}
+
+static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
+{
+ if (q->is_adminq)
+ return APPLE_NVME_AQ_DEPTH;
+ else
+ return APPLE_ANS_MAX_QUEUE_DEPTH;
+}
+
+static void apple_nvme_rtkit_crashed(void *cookie)
+{
+ struct apple_nvme *anv = cookie;
+
+ dev_warn(anv->dev, "RTKit crashed; unable to recover without a reboot");
+ nvme_reset_ctrl(&anv->ctrl);
+}
+
+static int apple_nvme_sart_dma_setup(void *cookie,
+ struct apple_rtkit_shmem *bfr)
+{
+ struct apple_nvme *anv = cookie;
+ int ret;
+
+ if (bfr->iova)
+ return -EINVAL;
+ if (!bfr->size)
+ return -EINVAL;
+
+ bfr->buffer =
+ dma_alloc_coherent(anv->dev, bfr->size, &bfr->iova, GFP_KERNEL);
+ if (!bfr->buffer)
+ return -ENOMEM;
+
+ ret = apple_sart_add_allowed_region(anv->sart, bfr->iova, bfr->size);
+ if (ret) {
+ dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova);
+ bfr->buffer = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void apple_nvme_sart_dma_destroy(void *cookie,
+ struct apple_rtkit_shmem *bfr)
+{
+ struct apple_nvme *anv = cookie;
+
+ apple_sart_remove_allowed_region(anv->sart, bfr->iova, bfr->size);
+ dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova);
+}
+
+static const struct apple_rtkit_ops apple_nvme_rtkit_ops = {
+ .crashed = apple_nvme_rtkit_crashed,
+ .shmem_setup = apple_nvme_sart_dma_setup,
+ .shmem_destroy = apple_nvme_sart_dma_destroy,
+};
+
+static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag)
+{
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+
+ writel(tag, anv->mmio_nvme + APPLE_NVMMU_TCB_INVAL);
+ if (readl(anv->mmio_nvme + APPLE_NVMMU_TCB_STAT))
+ dev_warn_ratelimited(anv->dev,
+ "NVMMU TCB invalidation failed\n");
+}
+
+static void apple_nvme_submit_cmd(struct apple_nvme_queue *q,
+ struct nvme_command *cmd)
+{
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+ u32 tag = nvme_tag_from_cid(cmd->common.command_id);
+ struct apple_nvmmu_tcb *tcb = &q->tcbs[tag];
+
+ tcb->opcode = cmd->common.opcode;
+ tcb->prp1 = cmd->common.dptr.prp1;
+ tcb->prp2 = cmd->common.dptr.prp2;
+ tcb->length = cmd->rw.length;
+ tcb->command_id = tag;
+
+ if (nvme_is_write(cmd))
+ tcb->dma_flags = APPLE_ANS_TCB_DMA_TO_DEVICE;
+ else
+ tcb->dma_flags = APPLE_ANS_TCB_DMA_FROM_DEVICE;
+
+ memcpy(&q->sqes[tag], cmd, sizeof(*cmd));
+
+ /*
+ * This lock here doesn't make much sense at a first glace but
+ * removing it will result in occasional missed completetion
+ * interrupts even though the commands still appear on the CQ.
+ * It's unclear why this happens but our best guess is that
+ * there is a bug in the firmware triggered when a new command
+ * is issued while we're inside the irq handler between the
+ * NVMMU invalidation (and making the tag available again)
+ * and the final CQ update.
+ */
+ spin_lock_irq(&anv->lock);
+ writel(tag, q->sq_db);
+ spin_unlock_irq(&anv->lock);
+}
+
+/*
+ * From pci.c:
+ * Will slightly overestimate the number of pages needed. This is OK
+ * as it only leads to a small amount of wasted memory for the lifetime of
+ * the I/O.
+ */
+static inline size_t apple_nvme_iod_alloc_size(void)
+{
+ const unsigned int nprps = DIV_ROUND_UP(
+ NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE);
+ const int npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
+ const size_t alloc_size = sizeof(__le64 *) * npages +
+ sizeof(struct scatterlist) * NVME_MAX_SEGS;
+
+ return alloc_size;
+}
+
+static void **apple_nvme_iod_list(struct request *req)
+{
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
+}
+
+static void apple_nvme_free_prps(struct apple_nvme *anv, struct request *req)
+{
+ const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ dma_addr_t dma_addr = iod->first_dma;
+ int i;
+
+ for (i = 0; i < iod->npages; i++) {
+ __le64 *prp_list = apple_nvme_iod_list(req)[i];
+ dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+
+ dma_pool_free(anv->prp_page_pool, prp_list, dma_addr);
+ dma_addr = next_dma_addr;
+ }
+}
+
+static void apple_nvme_unmap_data(struct apple_nvme *anv, struct request *req)
+{
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ if (iod->dma_len) {
+ dma_unmap_page(anv->dev, iod->first_dma, iod->dma_len,
+ rq_dma_dir(req));
+ return;
+ }
+
+ WARN_ON_ONCE(!iod->nents);
+
+ dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req));
+ if (iod->npages == 0)
+ dma_pool_free(anv->prp_small_pool, apple_nvme_iod_list(req)[0],
+ iod->first_dma);
+ else
+ apple_nvme_free_prps(anv, req);
+ mempool_free(iod->sg, anv->iod_mempool);
+}
+
+static void apple_nvme_print_sgl(struct scatterlist *sgl, int nents)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sgl, sg, nents, i) {
+ dma_addr_t phys = sg_phys(sg);
+
+ pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d dma_address:%pad dma_length:%d\n",
+ i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
+ sg_dma_len(sg));
+ }
+}
+
+static blk_status_t apple_nvme_setup_prps(struct apple_nvme *anv,
+ struct request *req,
+ struct nvme_rw_command *cmnd)
+{
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct dma_pool *pool;
+ int length = blk_rq_payload_bytes(req);
+ struct scatterlist *sg = iod->sg;
+ int dma_len = sg_dma_len(sg);
+ u64 dma_addr = sg_dma_address(sg);
+ int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
+ __le64 *prp_list;
+ void **list = apple_nvme_iod_list(req);
+ dma_addr_t prp_dma;
+ int nprps, i;
+
+ length -= (NVME_CTRL_PAGE_SIZE - offset);
+ if (length <= 0) {
+ iod->first_dma = 0;
+ goto done;
+ }
+
+ dma_len -= (NVME_CTRL_PAGE_SIZE - offset);
+ if (dma_len) {
+ dma_addr += (NVME_CTRL_PAGE_SIZE - offset);
+ } else {
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ if (length <= NVME_CTRL_PAGE_SIZE) {
+ iod->first_dma = dma_addr;
+ goto done;
+ }
+
+ nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
+ if (nprps <= (256 / 8)) {
+ pool = anv->prp_small_pool;
+ iod->npages = 0;
+ } else {
+ pool = anv->prp_page_pool;
+ iod->npages = 1;
+ }
+
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ if (!prp_list) {
+ iod->first_dma = dma_addr;
+ iod->npages = -1;
+ return BLK_STS_RESOURCE;
+ }
+ list[0] = prp_list;
+ iod->first_dma = prp_dma;
+ i = 0;
+ for (;;) {
+ if (i == NVME_CTRL_PAGE_SIZE >> 3) {
+ __le64 *old_prp_list = prp_list;
+
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ if (!prp_list)
+ goto free_prps;
+ list[iod->npages++] = prp_list;
+ prp_list[0] = old_prp_list[i - 1];
+ old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+ i = 1;
+ }
+ prp_list[i++] = cpu_to_le64(dma_addr);
+ dma_len -= NVME_CTRL_PAGE_SIZE;
+ dma_addr += NVME_CTRL_PAGE_SIZE;
+ length -= NVME_CTRL_PAGE_SIZE;
+ if (length <= 0)
+ break;
+ if (dma_len > 0)
+ continue;
+ if (unlikely(dma_len < 0))
+ goto bad_sgl;
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+done:
+ cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
+ return BLK_STS_OK;
+free_prps:
+ apple_nvme_free_prps(anv, req);
+ return BLK_STS_RESOURCE;
+bad_sgl:
+ WARN(DO_ONCE(apple_nvme_print_sgl, iod->sg, iod->nents),
+ "Invalid SGL for payload:%d nents:%d\n", blk_rq_payload_bytes(req),
+ iod->nents);
+ return BLK_STS_IOERR;
+}
+
+static blk_status_t apple_nvme_setup_prp_simple(struct apple_nvme *anv,
+ struct request *req,
+ struct nvme_rw_command *cmnd,
+ struct bio_vec *bv)
+{
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1);
+ unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset;
+
+ iod->first_dma = dma_map_bvec(anv->dev, bv, rq_dma_dir(req), 0);
+ if (dma_mapping_error(anv->dev, iod->first_dma))
+ return BLK_STS_RESOURCE;
+ iod->dma_len = bv->bv_len;
+
+ cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
+ if (bv->bv_len > first_prp_len)
+ cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
+ return BLK_STS_OK;
+}
+
+static blk_status_t apple_nvme_map_data(struct apple_nvme *anv,
+ struct request *req,
+ struct nvme_command *cmnd)
+{
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ blk_status_t ret = BLK_STS_RESOURCE;
+ int nr_mapped;
+
+ if (blk_rq_nr_phys_segments(req) == 1) {
+ struct bio_vec bv = req_bvec(req);
+
+ if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
+ return apple_nvme_setup_prp_simple(anv, req, &cmnd->rw,
+ &bv);
+ }
+
+ iod->dma_len = 0;
+ iod->sg = mempool_alloc(anv->iod_mempool, GFP_ATOMIC);
+ if (!iod->sg)
+ return BLK_STS_RESOURCE;
+ sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
+ iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
+ if (!iod->nents)
+ goto out_free_sg;
+
+ nr_mapped = dma_map_sg_attrs(anv->dev, iod->sg, iod->nents,
+ rq_dma_dir(req), DMA_ATTR_NO_WARN);
+ if (!nr_mapped)
+ goto out_free_sg;
+
+ ret = apple_nvme_setup_prps(anv, req, &cmnd->rw);
+ if (ret != BLK_STS_OK)
+ goto out_unmap_sg;
+ return BLK_STS_OK;
+
+out_unmap_sg:
+ dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req));
+out_free_sg:
+ mempool_free(iod->sg, anv->iod_mempool);
+ return ret;
+}
+
+static __always_inline void apple_nvme_unmap_rq(struct request *req)
+{
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct apple_nvme *anv = queue_to_apple_nvme(iod->q);
+
+ if (blk_rq_nr_phys_segments(req))
+ apple_nvme_unmap_data(anv, req);
+}
+
+static void apple_nvme_complete_rq(struct request *req)
+{
+ apple_nvme_unmap_rq(req);
+ nvme_complete_rq(req);
+}
+
+static void apple_nvme_complete_batch(struct io_comp_batch *iob)
+{
+ nvme_complete_batch(iob, apple_nvme_unmap_rq);
+}
+
+static inline bool apple_nvme_cqe_pending(struct apple_nvme_queue *q)
+{
+ struct nvme_completion *hcqe = &q->cqes[q->cq_head];
+
+ return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == q->cq_phase;
+}
+
+static inline struct blk_mq_tags *
+apple_nvme_queue_tagset(struct apple_nvme *anv, struct apple_nvme_queue *q)
+{
+ if (q->is_adminq)
+ return anv->admin_tagset.tags[0];
+ else
+ return anv->tagset.tags[0];
+}
+
+static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
+ struct io_comp_batch *iob, u16 idx)
+{
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+ struct nvme_completion *cqe = &q->cqes[idx];
+ __u16 command_id = READ_ONCE(cqe->command_id);
+ struct request *req;
+
+ apple_nvmmu_inval(q, command_id);
+
+ req = nvme_find_rq(apple_nvme_queue_tagset(anv, q), command_id);
+ if (unlikely(!req)) {
+ dev_warn(anv->dev, "invalid id %d completed", command_id);
+ return;
+ }
+
+ if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
+ !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
+ apple_nvme_complete_batch))
+ apple_nvme_complete_rq(req);
+}
+
+static inline void apple_nvme_update_cq_head(struct apple_nvme_queue *q)
+{
+ u32 tmp = q->cq_head + 1;
+
+ if (tmp == apple_nvme_queue_depth(q)) {
+ q->cq_head = 0;
+ q->cq_phase ^= 1;
+ } else {
+ q->cq_head = tmp;
+ }
+}
+
+static bool apple_nvme_poll_cq(struct apple_nvme_queue *q,
+ struct io_comp_batch *iob)
+{
+ bool found = false;
+
+ while (apple_nvme_cqe_pending(q)) {
+ found = true;
+
+ /*
+ * load-load control dependency between phase and the rest of
+ * the cqe requires a full read memory barrier
+ */
+ dma_rmb();
+ apple_nvme_handle_cqe(q, iob, q->cq_head);
+ apple_nvme_update_cq_head(q);
+ }
+
+ if (found)
+ writel(q->cq_head, q->cq_db);
+
+ return found;
+}
+
+static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force)
+{
+ bool found;
+ DEFINE_IO_COMP_BATCH(iob);
+
+ if (!READ_ONCE(q->enabled) && !force)
+ return false;
+
+ found = apple_nvme_poll_cq(q, &iob);
+
+ if (!rq_list_empty(iob.req_list))
+ apple_nvme_complete_batch(&iob);
+
+ return found;
+}
+
+static irqreturn_t apple_nvme_irq(int irq, void *data)
+{
+ struct apple_nvme *anv = data;
+ bool handled = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&anv->lock, flags);
+ if (apple_nvme_handle_cq(&anv->ioq, false))
+ handled = true;
+ if (apple_nvme_handle_cq(&anv->adminq, false))
+ handled = true;
+ spin_unlock_irqrestore(&anv->lock, flags);
+
+ if (handled)
+ return IRQ_HANDLED;
+ return IRQ_NONE;
+}
+
+static int apple_nvme_create_cq(struct apple_nvme *anv)
+{
+ struct nvme_command c = {};
+
+ /*
+ * Note: we (ab)use the fact that the prp fields survive if no data
+ * is attached to the request.
+ */
+ c.create_cq.opcode = nvme_admin_create_cq;
+ c.create_cq.prp1 = cpu_to_le64(anv->ioq.cq_dma_addr);
+ c.create_cq.cqid = cpu_to_le16(1);
+ c.create_cq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1);
+ c.create_cq.cq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED);
+ c.create_cq.irq_vector = cpu_to_le16(0);
+
+ return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
+}
+
+static int apple_nvme_remove_cq(struct apple_nvme *anv)
+{
+ struct nvme_command c = {};
+
+ c.delete_queue.opcode = nvme_admin_delete_cq;
+ c.delete_queue.qid = cpu_to_le16(1);
+
+ return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
+}
+
+static int apple_nvme_create_sq(struct apple_nvme *anv)
+{
+ struct nvme_command c = {};
+
+ /*
+ * Note: we (ab)use the fact that the prp fields survive if no data
+ * is attached to the request.
+ */
+ c.create_sq.opcode = nvme_admin_create_sq;
+ c.create_sq.prp1 = cpu_to_le64(anv->ioq.sq_dma_addr);
+ c.create_sq.sqid = cpu_to_le16(1);
+ c.create_sq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1);
+ c.create_sq.sq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG);
+ c.create_sq.cqid = cpu_to_le16(1);
+
+ return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
+}
+
+static int apple_nvme_remove_sq(struct apple_nvme *anv)
+{
+ struct nvme_command c = {};
+
+ c.delete_queue.opcode = nvme_admin_delete_sq;
+ c.delete_queue.qid = cpu_to_le16(1);
+
+ return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
+}
+
+static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct nvme_ns *ns = hctx->queue->queuedata;
+ struct apple_nvme_queue *q = hctx->driver_data;
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+ struct request *req = bd->rq;
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_command *cmnd = &iod->cmd;
+ blk_status_t ret;
+
+ iod->npages = -1;
+ iod->nents = 0;
+
+ /*
+ * We should not need to do this, but we're still using this to
+ * ensure we can drain requests on a dying queue.
+ */
+ if (unlikely(!READ_ONCE(q->enabled)))
+ return BLK_STS_IOERR;
+
+ if (!nvme_check_ready(&anv->ctrl, req, true))
+ return nvme_fail_nonready_command(&anv->ctrl, req);
+
+ ret = nvme_setup_cmd(ns, req);
+ if (ret)
+ return ret;
+
+ if (blk_rq_nr_phys_segments(req)) {
+ ret = apple_nvme_map_data(anv, req, cmnd);
+ if (ret)
+ goto out_free_cmd;
+ }
+
+ blk_mq_start_request(req);
+ apple_nvme_submit_cmd(q, cmnd);
+ return BLK_STS_OK;
+
+out_free_cmd:
+ nvme_cleanup_cmd(req);
+ return ret;
+}
+
+static int apple_nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ hctx->driver_data = data;
+ return 0;
+}
+
+static int apple_nvme_init_request(struct blk_mq_tag_set *set,
+ struct request *req, unsigned int hctx_idx,
+ unsigned int numa_node)
+{
+ struct apple_nvme_queue *q = set->driver_data;
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_request *nreq = nvme_req(req);
+
+ iod->q = q;
+ nreq->ctrl = &anv->ctrl;
+ nreq->cmd = &iod->cmd;
+
+ return 0;
+}
+
+static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
+{
+ u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
+ bool dead = false, freeze = false;
+ unsigned long flags;
+
+ if (apple_rtkit_is_crashed(anv->rtk))
+ dead = true;
+ if (!(csts & NVME_CSTS_RDY))
+ dead = true;
+ if (csts & NVME_CSTS_CFS)
+ dead = true;
+
+ if (anv->ctrl.state == NVME_CTRL_LIVE ||
+ anv->ctrl.state == NVME_CTRL_RESETTING) {
+ freeze = true;
+ nvme_start_freeze(&anv->ctrl);
+ }
+
+ /*
+ * Give the controller a chance to complete all entered requests if
+ * doing a safe shutdown.
+ */
+ if (!dead && shutdown && freeze)
+ nvme_wait_freeze_timeout(&anv->ctrl, NVME_IO_TIMEOUT);
+
+ nvme_stop_queues(&anv->ctrl);
+
+ if (!dead) {
+ if (READ_ONCE(anv->ioq.enabled)) {
+ apple_nvme_remove_sq(anv);
+ apple_nvme_remove_cq(anv);
+ }
+
+ if (shutdown)
+ nvme_shutdown_ctrl(&anv->ctrl);
+ nvme_disable_ctrl(&anv->ctrl);
+ }
+
+ WRITE_ONCE(anv->ioq.enabled, false);
+ WRITE_ONCE(anv->adminq.enabled, false);
+ mb(); /* ensure that nvme_queue_rq() sees that enabled is cleared */
+ nvme_stop_admin_queue(&anv->ctrl);
+
+ /* last chance to complete any requests before nvme_cancel_request */
+ spin_lock_irqsave(&anv->lock, flags);
+ apple_nvme_handle_cq(&anv->ioq, true);
+ apple_nvme_handle_cq(&anv->adminq, true);
+ spin_unlock_irqrestore(&anv->lock, flags);
+
+ nvme_cancel_tagset(&anv->ctrl);
+ nvme_cancel_admin_tagset(&anv->ctrl);
+
+ /*
+ * The driver will not be starting up queues again if shutting down so
+ * must flush all entered requests to their failed completion to avoid
+ * deadlocking blk-mq hot-cpu notifier.
+ */
+ if (shutdown) {
+ nvme_start_queues(&anv->ctrl);
+ nvme_start_admin_queue(&anv->ctrl);
+ }
+}
+
+static enum blk_eh_timer_return apple_nvme_timeout(struct request *req)
+{
+ struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct apple_nvme_queue *q = iod->q;
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+ unsigned long flags;
+ u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
+
+ if (anv->ctrl.state != NVME_CTRL_LIVE) {
+ /*
+ * From rdma.c:
+ * If we are resetting, connecting or deleting we should
+ * complete immediately because we may block controller
+ * teardown or setup sequence
+ * - ctrl disable/shutdown fabrics requests
+ * - connect requests
+ * - initialization admin requests
+ * - I/O requests that entered after unquiescing and
+ * the controller stopped responding
+ *
+ * All other requests should be cancelled by the error
+ * recovery work, so it's fine that we fail it here.
+ */
+ dev_warn(anv->dev,
+ "I/O %d(aq:%d) timeout while not in live state\n",
+ req->tag, q->is_adminq);
+ if (blk_mq_request_started(req) &&
+ !blk_mq_request_completed(req)) {
+ nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
+ nvme_req(req)->flags |= NVME_REQ_CANCELLED;
+ blk_mq_complete_request(req);
+ }
+ return BLK_EH_DONE;
+ }
+
+ /* check if we just missed an interrupt if we're still alive */
+ if (!apple_rtkit_is_crashed(anv->rtk) && !(csts & NVME_CSTS_CFS)) {
+ spin_lock_irqsave(&anv->lock, flags);
+ apple_nvme_handle_cq(q, false);
+ spin_unlock_irqrestore(&anv->lock, flags);
+ if (blk_mq_request_completed(req)) {
+ dev_warn(anv->dev,
+ "I/O %d(aq:%d) timeout: completion polled\n",
+ req->tag, q->is_adminq);
+ return BLK_EH_DONE;
+ }
+ }
+
+ /*
+ * aborting commands isn't supported which leaves a full reset as our
+ * only option here
+ */
+ dev_warn(anv->dev, "I/O %d(aq:%d) timeout: resetting controller\n",
+ req->tag, q->is_adminq);
+ nvme_req(req)->flags |= NVME_REQ_CANCELLED;
+ apple_nvme_disable(anv, false);
+ nvme_reset_ctrl(&anv->ctrl);
+ return BLK_EH_DONE;
+}
+
+static int apple_nvme_poll(struct blk_mq_hw_ctx *hctx,
+ struct io_comp_batch *iob)
+{
+ struct apple_nvme_queue *q = hctx->driver_data;
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+ bool found;
+ unsigned long flags;
+
+ spin_lock_irqsave(&anv->lock, flags);
+ found = apple_nvme_poll_cq(q, iob);
+ spin_unlock_irqrestore(&anv->lock, flags);
+
+ return found;
+}
+
+static const struct blk_mq_ops apple_nvme_mq_admin_ops = {
+ .queue_rq = apple_nvme_queue_rq,
+ .complete = apple_nvme_complete_rq,
+ .init_hctx = apple_nvme_init_hctx,
+ .init_request = apple_nvme_init_request,
+ .timeout = apple_nvme_timeout,
+};
+
+static const struct blk_mq_ops apple_nvme_mq_ops = {
+ .queue_rq = apple_nvme_queue_rq,
+ .complete = apple_nvme_complete_rq,
+ .init_hctx = apple_nvme_init_hctx,
+ .init_request = apple_nvme_init_request,
+ .timeout = apple_nvme_timeout,
+ .poll = apple_nvme_poll,
+};
+
+static void apple_nvme_init_queue(struct apple_nvme_queue *q)
+{
+ unsigned int depth = apple_nvme_queue_depth(q);
+
+ q->cq_head = 0;
+ q->cq_phase = 1;
+ memset(q->tcbs, 0,
+ APPLE_ANS_MAX_QUEUE_DEPTH * sizeof(struct apple_nvmmu_tcb));
+ memset(q->cqes, 0, depth * sizeof(struct nvme_completion));
+ WRITE_ONCE(q->enabled, true);
+ wmb(); /* ensure the first interrupt sees the initialization */
+}
+
+static void apple_nvme_reset_work(struct work_struct *work)
+{
+ unsigned int nr_io_queues = 1;
+ int ret;
+ u32 boot_status, aqa;
+ struct apple_nvme *anv =
+ container_of(work, struct apple_nvme, ctrl.reset_work);
+
+ if (anv->ctrl.state != NVME_CTRL_RESETTING) {
+ dev_warn(anv->dev, "ctrl state %d is not RESETTING\n",
+ anv->ctrl.state);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* there's unfortunately no known way to recover if RTKit crashed :( */
+ if (apple_rtkit_is_crashed(anv->rtk)) {
+ dev_err(anv->dev,
+ "RTKit has crashed without any way to recover.");
+ ret = -EIO;
+ goto out;
+ }
+
+ if (anv->ctrl.ctrl_config & NVME_CC_ENABLE)
+ apple_nvme_disable(anv, false);
+
+ /* RTKit must be shut down cleanly for the (soft)-reset to work */
+ if (apple_rtkit_is_running(anv->rtk)) {
+ dev_dbg(anv->dev, "Trying to shut down RTKit before reset.");
+ ret = apple_rtkit_shutdown(anv->rtk);
+ if (ret)
+ goto out;
+ }
+
+ writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+
+ ret = reset_control_assert(anv->reset);
+ if (ret)
+ goto out;
+
+ ret = apple_rtkit_reinit(anv->rtk);
+ if (ret)
+ goto out;
+
+ ret = reset_control_deassert(anv->reset);
+ if (ret)
+ goto out;
+
+ writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN,
+ anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+ ret = apple_rtkit_boot(anv->rtk);
+ if (ret) {
+ dev_err(anv->dev, "ANS did not boot");
+ goto out;
+ }
+
+ ret = readl_poll_timeout(anv->mmio_nvme + APPLE_ANS_BOOT_STATUS,
+ boot_status,
+ boot_status == APPLE_ANS_BOOT_STATUS_OK,
+ USEC_PER_MSEC, APPLE_ANS_BOOT_TIMEOUT);
+ if (ret) {
+ dev_err(anv->dev, "ANS did not initialize");
+ goto out;
+ }
+
+ dev_dbg(anv->dev, "ANS booted successfully.");
+
+ /*
+ * Limit the max command size to prevent iod->sg allocations going
+ * over a single page.
+ */
+ anv->ctrl.max_hw_sectors = min_t(u32, NVME_MAX_KB_SZ << 1,
+ dma_max_mapping_size(anv->dev) >> 9);
+ anv->ctrl.max_segments = NVME_MAX_SEGS;
+
+ dma_set_max_seg_size(anv->dev, 0xffffffff);
+
+ /*
+ * Enable NVMMU and linear submission queues.
+ * While we could keep those disabled and pretend this is slightly
+ * more common NVMe controller we'd still need some quirks (e.g.
+ * sq entries will be 128 bytes) and Apple might drop support for
+ * that mode in the future.
+ */
+ writel(APPLE_ANS_LINEAR_SQ_EN,
+ anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL);
+
+ /* Allow as many pending command as possible for both queues */
+ writel(APPLE_ANS_MAX_QUEUE_DEPTH | (APPLE_ANS_MAX_QUEUE_DEPTH << 16),
+ anv->mmio_nvme + APPLE_ANS_MAX_PEND_CMDS_CTRL);
+
+ /* Setup the NVMMU for the maximum admin and IO queue depth */
+ writel(APPLE_ANS_MAX_QUEUE_DEPTH - 1,
+ anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS);
+
+ /*
+ * This is probably a chicken bit: without it all commands where any PRP
+ * is set to zero (including those that don't use that field) fail and
+ * the co-processor complains about "completed with err BAD_CMD-" or
+ * a "NULL_PRP_PTR_ERR" in the syslog
+ */
+ writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) &
+ ~APPLE_ANS_PRP_NULL_CHECK,
+ anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL);
+
+ /* Setup the admin queue */
+ aqa = APPLE_NVME_AQ_DEPTH - 1;
+ aqa |= aqa << 16;
+ writel(aqa, anv->mmio_nvme + NVME_REG_AQA);
+ writeq(anv->adminq.sq_dma_addr, anv->mmio_nvme + NVME_REG_ASQ);
+ writeq(anv->adminq.cq_dma_addr, anv->mmio_nvme + NVME_REG_ACQ);
+
+ /* Setup NVMMU for both queues */
+ writeq(anv->adminq.tcb_dma_addr,
+ anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE);
+ writeq(anv->ioq.tcb_dma_addr,
+ anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE);
+
+ anv->ctrl.sqsize =
+ APPLE_ANS_MAX_QUEUE_DEPTH - 1; /* 0's based queue depth */
+ anv->ctrl.cap = readq(anv->mmio_nvme + NVME_REG_CAP);
+
+ dev_dbg(anv->dev, "Enabling controller now");
+ ret = nvme_enable_ctrl(&anv->ctrl);
+ if (ret)
+ goto out;
+
+ dev_dbg(anv->dev, "Starting admin queue");
+ apple_nvme_init_queue(&anv->adminq);
+ nvme_start_admin_queue(&anv->ctrl);
+
+ if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_CONNECTING)) {
+ dev_warn(anv->ctrl.device,
+ "failed to mark controller CONNECTING\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = nvme_init_ctrl_finish(&anv->ctrl);
+ if (ret)
+ goto out;
+
+ dev_dbg(anv->dev, "Creating IOCQ");
+ ret = apple_nvme_create_cq(anv);
+ if (ret)
+ goto out;
+ dev_dbg(anv->dev, "Creating IOSQ");
+ ret = apple_nvme_create_sq(anv);
+ if (ret)
+ goto out_remove_cq;
+
+ apple_nvme_init_queue(&anv->ioq);
+ nr_io_queues = 1;
+ ret = nvme_set_queue_count(&anv->ctrl, &nr_io_queues);
+ if (ret)
+ goto out_remove_sq;
+ if (nr_io_queues != 1) {
+ ret = -ENXIO;
+ goto out_remove_sq;
+ }
+
+ anv->ctrl.queue_count = nr_io_queues + 1;
+
+ nvme_start_queues(&anv->ctrl);
+ nvme_wait_freeze(&anv->ctrl);
+ blk_mq_update_nr_hw_queues(&anv->tagset, 1);
+ nvme_unfreeze(&anv->ctrl);
+
+ if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_LIVE)) {
+ dev_warn(anv->ctrl.device,
+ "failed to mark controller live state\n");
+ ret = -ENODEV;
+ goto out_remove_sq;
+ }
+
+ nvme_start_ctrl(&anv->ctrl);
+
+ dev_dbg(anv->dev, "ANS boot and NVMe init completed.");
+ return;
+
+out_remove_sq:
+ apple_nvme_remove_sq(anv);
+out_remove_cq:
+ apple_nvme_remove_cq(anv);
+out:
+ dev_warn(anv->ctrl.device, "Reset failure status: %d\n", ret);
+ nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING);
+ nvme_get_ctrl(&anv->ctrl);
+ apple_nvme_disable(anv, false);
+ nvme_kill_queues(&anv->ctrl);
+ if (!queue_work(nvme_wq, &anv->remove_work))
+ nvme_put_ctrl(&anv->ctrl);
+}
+
+static void apple_nvme_remove_dead_ctrl_work(struct work_struct *work)
+{
+ struct apple_nvme *anv =
+ container_of(work, struct apple_nvme, remove_work);
+
+ nvme_put_ctrl(&anv->ctrl);
+ device_release_driver(anv->dev);
+}
+
+static int apple_nvme_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
+{
+ *val = readl(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
+ return 0;
+}
+
+static int apple_nvme_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
+{
+ writel(val, ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
+ return 0;
+}
+
+static int apple_nvme_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
+{
+ *val = readq(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
+ return 0;
+}
+
+static int apple_nvme_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
+{
+ struct device *dev = ctrl_to_apple_nvme(ctrl)->dev;
+
+ return snprintf(buf, size, "%s\n", dev_name(dev));
+}
+
+static void apple_nvme_free_ctrl(struct nvme_ctrl *ctrl)
+{
+ struct apple_nvme *anv = ctrl_to_apple_nvme(ctrl);
+
+ if (anv->ctrl.admin_q)
+ blk_put_queue(anv->ctrl.admin_q);
+ put_device(anv->dev);
+}
+
+static const struct nvme_ctrl_ops nvme_ctrl_ops = {
+ .name = "apple-nvme",
+ .module = THIS_MODULE,
+ .flags = 0,
+ .reg_read32 = apple_nvme_reg_read32,
+ .reg_write32 = apple_nvme_reg_write32,
+ .reg_read64 = apple_nvme_reg_read64,
+ .free_ctrl = apple_nvme_free_ctrl,
+ .get_address = apple_nvme_get_address,
+};
+
+static void apple_nvme_async_probe(void *data, async_cookie_t cookie)
+{
+ struct apple_nvme *anv = data;
+
+ flush_work(&anv->ctrl.reset_work);
+ flush_work(&anv->ctrl.scan_work);
+ nvme_put_ctrl(&anv->ctrl);
+}
+
+static void devm_apple_nvme_put_tag_set(void *data)
+{
+ blk_mq_free_tag_set(data);
+}
+
+static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
+{
+ int ret;
+
+ anv->admin_tagset.ops = &apple_nvme_mq_admin_ops;
+ anv->admin_tagset.nr_hw_queues = 1;
+ anv->admin_tagset.queue_depth = APPLE_NVME_AQ_MQ_TAG_DEPTH;
+ anv->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
+ anv->admin_tagset.numa_node = NUMA_NO_NODE;
+ anv->admin_tagset.cmd_size = sizeof(struct apple_nvme_iod);
+ anv->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
+ anv->admin_tagset.driver_data = &anv->adminq;
+
+ ret = blk_mq_alloc_tag_set(&anv->admin_tagset);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
+ &anv->admin_tagset);
+ if (ret)
+ return ret;
+
+ anv->tagset.ops = &apple_nvme_mq_ops;
+ anv->tagset.nr_hw_queues = 1;
+ anv->tagset.nr_maps = 1;
+ /*
+ * Tags are used as an index to the NVMMU and must be unique across
+ * both queues. The admin queue gets the first APPLE_NVME_AQ_DEPTH which
+ * must be marked as reserved in the IO queue.
+ */
+ anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH;
+ anv->tagset.queue_depth = APPLE_ANS_MAX_QUEUE_DEPTH - 1;
+ anv->tagset.timeout = NVME_IO_TIMEOUT;
+ anv->tagset.numa_node = NUMA_NO_NODE;
+ anv->tagset.cmd_size = sizeof(struct apple_nvme_iod);
+ anv->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
+ anv->tagset.driver_data = &anv->ioq;
+
+ ret = blk_mq_alloc_tag_set(&anv->tagset);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
+ &anv->tagset);
+ if (ret)
+ return ret;
+
+ anv->ctrl.admin_tagset = &anv->admin_tagset;
+ anv->ctrl.tagset = &anv->tagset;
+
+ return 0;
+}
+
+static int apple_nvme_queue_alloc(struct apple_nvme *anv,
+ struct apple_nvme_queue *q)
+{
+ unsigned int depth = apple_nvme_queue_depth(q);
+
+ q->cqes = dmam_alloc_coherent(anv->dev,
+ depth * sizeof(struct nvme_completion),
+ &q->cq_dma_addr, GFP_KERNEL);
+ if (!q->cqes)
+ return -ENOMEM;
+
+ q->sqes = dmam_alloc_coherent(anv->dev,
+ depth * sizeof(struct nvme_command),
+ &q->sq_dma_addr, GFP_KERNEL);
+ if (!q->sqes)
+ return -ENOMEM;
+
+ /*
+ * We need the maximum queue depth here because the NVMMU only has a
+ * single depth configuration shared between both queues.
+ */
+ q->tcbs = dmam_alloc_coherent(anv->dev,
+ APPLE_ANS_MAX_QUEUE_DEPTH *
+ sizeof(struct apple_nvmmu_tcb),
+ &q->tcb_dma_addr, GFP_KERNEL);
+ if (!q->tcbs)
+ return -ENOMEM;
+
+ /*
+ * initialize phase to make sure the allocated and empty memory
+ * doesn't look like a full cq already.
+ */
+ q->cq_phase = 1;
+ return 0;
+}
+
+static void apple_nvme_detach_genpd(struct apple_nvme *anv)
+{
+ int i;
+
+ if (anv->pd_count <= 1)
+ return;
+
+ for (i = anv->pd_count - 1; i >= 0; i--) {
+ if (anv->pd_link[i])
+ device_link_del(anv->pd_link[i]);
+ if (!IS_ERR_OR_NULL(anv->pd_dev[i]))
+ dev_pm_domain_detach(anv->pd_dev[i], true);
+ }
+}
+
+static int apple_nvme_attach_genpd(struct apple_nvme *anv)
+{
+ struct device *dev = anv->dev;
+ int i;
+
+ anv->pd_count = of_count_phandle_with_args(
+ dev->of_node, "power-domains", "#power-domain-cells");
+ if (anv->pd_count <= 1)
+ return 0;
+
+ anv->pd_dev = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_dev),
+ GFP_KERNEL);
+ if (!anv->pd_dev)
+ return -ENOMEM;
+
+ anv->pd_link = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_link),
+ GFP_KERNEL);
+ if (!anv->pd_link)
+ return -ENOMEM;
+
+ for (i = 0; i < anv->pd_count; i++) {
+ anv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
+ if (IS_ERR(anv->pd_dev[i])) {
+ apple_nvme_detach_genpd(anv);
+ return PTR_ERR(anv->pd_dev[i]);
+ }
+
+ anv->pd_link[i] = device_link_add(dev, anv->pd_dev[i],
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (!anv->pd_link[i]) {
+ apple_nvme_detach_genpd(anv);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void devm_apple_nvme_mempool_destroy(void *data)
+{
+ mempool_destroy(data);
+}
+
+static int apple_nvme_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct apple_nvme *anv;
+ int ret;
+
+ anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL);
+ if (!anv)
+ return -ENOMEM;
+
+ anv->dev = get_device(dev);
+ anv->adminq.is_adminq = true;
+ platform_set_drvdata(pdev, anv);
+
+ ret = apple_nvme_attach_genpd(anv);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to attach power domains");
+ goto put_dev;
+ }
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
+ ret = -ENXIO;
+ goto put_dev;
+ }
+
+ anv->irq = platform_get_irq(pdev, 0);
+ if (anv->irq < 0) {
+ ret = anv->irq;
+ goto put_dev;
+ }
+ if (!anv->irq) {
+ ret = -ENXIO;
+ goto put_dev;
+ }
+
+ anv->mmio_coproc = devm_platform_ioremap_resource_byname(pdev, "ans");
+ if (IS_ERR(anv->mmio_coproc)) {
+ ret = PTR_ERR(anv->mmio_coproc);
+ goto put_dev;
+ }
+ anv->mmio_nvme = devm_platform_ioremap_resource_byname(pdev, "nvme");
+ if (IS_ERR(anv->mmio_nvme)) {
+ ret = PTR_ERR(anv->mmio_nvme);
+ goto put_dev;
+ }
+
+ anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB;
+ anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB;
+ anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB;
+ anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB;
+
+ anv->sart = devm_apple_sart_get(dev);
+ if (IS_ERR(anv->sart)) {
+ ret = dev_err_probe(dev, PTR_ERR(anv->sart),
+ "Failed to initialize SART");
+ goto put_dev;
+ }
+
+ anv->reset = devm_reset_control_array_get_exclusive(anv->dev);
+ if (IS_ERR(anv->reset)) {
+ ret = dev_err_probe(dev, PTR_ERR(anv->reset),
+ "Failed to get reset control");
+ goto put_dev;
+ }
+
+ INIT_WORK(&anv->ctrl.reset_work, apple_nvme_reset_work);
+ INIT_WORK(&anv->remove_work, apple_nvme_remove_dead_ctrl_work);
+ spin_lock_init(&anv->lock);
+
+ ret = apple_nvme_queue_alloc(anv, &anv->adminq);
+ if (ret)
+ goto put_dev;
+ ret = apple_nvme_queue_alloc(anv, &anv->ioq);
+ if (ret)
+ goto put_dev;
+
+ anv->prp_page_pool = dmam_pool_create("prp list page", anv->dev,
+ NVME_CTRL_PAGE_SIZE,
+ NVME_CTRL_PAGE_SIZE, 0);
+ if (!anv->prp_page_pool) {
+ ret = -ENOMEM;
+ goto put_dev;
+ }
+
+ anv->prp_small_pool =
+ dmam_pool_create("prp list 256", anv->dev, 256, 256, 0);
+ if (!anv->prp_small_pool) {
+ ret = -ENOMEM;
+ goto put_dev;
+ }
+
+ WARN_ON_ONCE(apple_nvme_iod_alloc_size() > PAGE_SIZE);
+ anv->iod_mempool =
+ mempool_create_kmalloc_pool(1, apple_nvme_iod_alloc_size());
+ if (!anv->iod_mempool) {
+ ret = -ENOMEM;
+ goto put_dev;
+ }
+ ret = devm_add_action_or_reset(anv->dev,
+ devm_apple_nvme_mempool_destroy, anv->iod_mempool);
+ if (ret)
+ goto put_dev;
+
+ ret = apple_nvme_alloc_tagsets(anv);
+ if (ret)
+ goto put_dev;
+
+ ret = devm_request_irq(anv->dev, anv->irq, apple_nvme_irq, 0,
+ "nvme-apple", anv);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to request IRQ");
+ goto put_dev;
+ }
+
+ anv->rtk =
+ devm_apple_rtkit_init(dev, anv, NULL, 0, &apple_nvme_rtkit_ops);
+ if (IS_ERR(anv->rtk)) {
+ ret = dev_err_probe(dev, PTR_ERR(anv->rtk),
+ "Failed to initialize RTKit");
+ goto put_dev;
+ }
+
+ ret = nvme_init_ctrl(&anv->ctrl, anv->dev, &nvme_ctrl_ops,
+ NVME_QUIRK_SKIP_CID_GEN);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to initialize nvme_ctrl");
+ goto put_dev;
+ }
+
+ anv->ctrl.admin_q = blk_mq_init_queue(&anv->admin_tagset);
+ if (IS_ERR(anv->ctrl.admin_q)) {
+ ret = -ENOMEM;
+ goto put_dev;
+ }
+
+ if (!blk_get_queue(anv->ctrl.admin_q)) {
+ nvme_start_admin_queue(&anv->ctrl);
+ blk_mq_destroy_queue(anv->ctrl.admin_q);
+ anv->ctrl.admin_q = NULL;
+ ret = -ENODEV;
+ goto put_dev;
+ }
+
+ nvme_reset_ctrl(&anv->ctrl);
+ async_schedule(apple_nvme_async_probe, anv);
+
+ return 0;
+
+put_dev:
+ put_device(anv->dev);
+ return ret;
+}
+
+static int apple_nvme_remove(struct platform_device *pdev)
+{
+ struct apple_nvme *anv = platform_get_drvdata(pdev);
+
+ nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING);
+ flush_work(&anv->ctrl.reset_work);
+ nvme_stop_ctrl(&anv->ctrl);
+ nvme_remove_namespaces(&anv->ctrl);
+ apple_nvme_disable(anv, true);
+ nvme_uninit_ctrl(&anv->ctrl);
+
+ if (apple_rtkit_is_running(anv->rtk))
+ apple_rtkit_shutdown(anv->rtk);
+
+ apple_nvme_detach_genpd(anv);
+
+ return 0;
+}
+
+static void apple_nvme_shutdown(struct platform_device *pdev)
+{
+ struct apple_nvme *anv = platform_get_drvdata(pdev);
+
+ apple_nvme_disable(anv, true);
+ if (apple_rtkit_is_running(anv->rtk))
+ apple_rtkit_shutdown(anv->rtk);
+}
+
+static int apple_nvme_resume(struct device *dev)
+{
+ struct apple_nvme *anv = dev_get_drvdata(dev);
+
+ return nvme_reset_ctrl(&anv->ctrl);
+}
+
+static int apple_nvme_suspend(struct device *dev)
+{
+ struct apple_nvme *anv = dev_get_drvdata(dev);
+ int ret = 0;
+
+ apple_nvme_disable(anv, true);
+
+ if (apple_rtkit_is_running(anv->rtk))
+ ret = apple_rtkit_shutdown(anv->rtk);
+
+ writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+
+ return ret;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(apple_nvme_pm_ops, apple_nvme_suspend,
+ apple_nvme_resume);
+
+static const struct of_device_id apple_nvme_of_match[] = {
+ { .compatible = "apple,nvme-ans2" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, apple_nvme_of_match);
+
+static struct platform_driver apple_nvme_driver = {
+ .driver = {
+ .name = "nvme-apple",
+ .of_match_table = apple_nvme_of_match,
+ .pm = pm_sleep_ptr(&apple_nvme_pm_ops),
+ },
+ .probe = apple_nvme_probe,
+ .remove = apple_nvme_remove,
+ .shutdown = apple_nvme_shutdown,
+};
+module_platform_driver(apple_nvme_driver);
+
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
new file mode 100644
index 000000000000..c8a6db7c4498
--- /dev/null
+++ b/drivers/nvme/host/auth.c
@@ -0,0 +1,1017 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
+ */
+
+#include <linux/crc32.h>
+#include <linux/base64.h>
+#include <linux/prandom.h>
+#include <asm/unaligned.h>
+#include <crypto/hash.h>
+#include <crypto/dh.h>
+#include "nvme.h"
+#include "fabrics.h"
+#include <linux/nvme-auth.h>
+
+struct nvme_dhchap_queue_context {
+ struct list_head entry;
+ struct work_struct auth_work;
+ struct nvme_ctrl *ctrl;
+ struct crypto_shash *shash_tfm;
+ struct crypto_kpp *dh_tfm;
+ void *buf;
+ size_t buf_size;
+ int qid;
+ int error;
+ u32 s1;
+ u32 s2;
+ u16 transaction;
+ u8 status;
+ u8 hash_id;
+ size_t hash_len;
+ u8 dhgroup_id;
+ u8 c1[64];
+ u8 c2[64];
+ u8 response[64];
+ u8 *host_response;
+ u8 *ctrl_key;
+ int ctrl_key_len;
+ u8 *host_key;
+ int host_key_len;
+ u8 *sess_key;
+ int sess_key_len;
+};
+
+#define nvme_auth_flags_from_qid(qid) \
+ (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
+#define nvme_auth_queue_from_qid(ctrl, qid) \
+ (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q
+
+static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
+ void *data, size_t data_len, bool auth_send)
+{
+ struct nvme_command cmd = {};
+ blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid);
+ struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid);
+ int ret;
+
+ cmd.auth_common.opcode = nvme_fabrics_command;
+ cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
+ cmd.auth_common.spsp0 = 0x01;
+ cmd.auth_common.spsp1 = 0x01;
+ if (auth_send) {
+ cmd.auth_send.fctype = nvme_fabrics_type_auth_send;
+ cmd.auth_send.tl = cpu_to_le32(data_len);
+ } else {
+ cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive;
+ cmd.auth_receive.al = cpu_to_le32(data_len);
+ }
+
+ ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
+ qid == 0 ? NVME_QID_ANY : qid,
+ 0, flags);
+ if (ret > 0)
+ dev_warn(ctrl->device,
+ "qid %d auth_send failed with status %d\n", qid, ret);
+ else if (ret < 0)
+ dev_err(ctrl->device,
+ "qid %d auth_send failed with error %d\n", qid, ret);
+ return ret;
+}
+
+static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid,
+ struct nvmf_auth_dhchap_failure_data *data,
+ u16 transaction, u8 expected_msg)
+{
+ dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n",
+ __func__, qid, data->auth_type, data->auth_id);
+
+ if (data->auth_type == NVME_AUTH_COMMON_MESSAGES &&
+ data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
+ return data->rescode_exp;
+ }
+ if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES ||
+ data->auth_id != expected_msg) {
+ dev_warn(ctrl->device,
+ "qid %d invalid message %02x/%02x\n",
+ qid, data->auth_type, data->auth_id);
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ }
+ if (le16_to_cpu(data->t_id) != transaction) {
+ dev_warn(ctrl->device,
+ "qid %d invalid transaction ID %d\n",
+ qid, le16_to_cpu(data->t_id));
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ }
+ return 0;
+}
+
+static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_negotiate_data *data = chap->buf;
+ size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol);
+
+ if (chap->buf_size < size) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return -EINVAL;
+ }
+ memset((u8 *)chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_COMMON_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ data->t_id = cpu_to_le16(chap->transaction);
+ data->sc_c = 0; /* No secure channel concatenation */
+ data->napd = 1;
+ data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
+ data->auth_protocol[0].dhchap.halen = 3;
+ data->auth_protocol[0].dhchap.dhlen = 6;
+ data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256;
+ data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384;
+ data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512;
+ data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL;
+ data->auth_protocol[0].dhchap.idlist[31] = NVME_AUTH_DHGROUP_2048;
+ data->auth_protocol[0].dhchap.idlist[32] = NVME_AUTH_DHGROUP_3072;
+ data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096;
+ data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144;
+ data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192;
+
+ return size;
+}
+
+static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_challenge_data *data = chap->buf;
+ u16 dhvlen = le16_to_cpu(data->dhvlen);
+ size_t size = sizeof(*data) + data->hl + dhvlen;
+ const char *gid_name = nvme_auth_dhgroup_name(data->dhgid);
+ const char *hmac_name, *kpp_name;
+
+ if (chap->buf_size < size) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ hmac_name = nvme_auth_hmac_name(data->hashid);
+ if (!hmac_name) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid HASH ID %d\n",
+ chap->qid, data->hashid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ if (chap->hash_id == data->hashid && chap->shash_tfm &&
+ !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) &&
+ crypto_shash_digestsize(chap->shash_tfm) == data->hl) {
+ dev_dbg(ctrl->device,
+ "qid %d: reuse existing hash %s\n",
+ chap->qid, hmac_name);
+ goto select_kpp;
+ }
+
+ /* Reset if hash cannot be reused */
+ if (chap->shash_tfm) {
+ crypto_free_shash(chap->shash_tfm);
+ chap->hash_id = 0;
+ chap->hash_len = 0;
+ }
+ chap->shash_tfm = crypto_alloc_shash(hmac_name, 0,
+ CRYPTO_ALG_ALLOCATES_MEMORY);
+ if (IS_ERR(chap->shash_tfm)) {
+ dev_warn(ctrl->device,
+ "qid %d: failed to allocate hash %s, error %ld\n",
+ chap->qid, hmac_name, PTR_ERR(chap->shash_tfm));
+ chap->shash_tfm = NULL;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+
+ if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid hash length %d\n",
+ chap->qid, data->hl);
+ crypto_free_shash(chap->shash_tfm);
+ chap->shash_tfm = NULL;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+
+ /* Reset host response if the hash had been changed */
+ if (chap->hash_id != data->hashid) {
+ kfree(chap->host_response);
+ chap->host_response = NULL;
+ }
+
+ chap->hash_id = data->hashid;
+ chap->hash_len = data->hl;
+ dev_dbg(ctrl->device, "qid %d: selected hash %s\n",
+ chap->qid, hmac_name);
+
+select_kpp:
+ kpp_name = nvme_auth_dhgroup_kpp(data->dhgid);
+ if (!kpp_name) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid DH group id %d\n",
+ chap->qid, data->dhgid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ /* Leave previous dh_tfm intact */
+ return NVME_SC_AUTH_REQUIRED;
+ }
+
+ /* Clear host and controller key to avoid accidental reuse */
+ kfree_sensitive(chap->host_key);
+ chap->host_key = NULL;
+ chap->host_key_len = 0;
+ kfree_sensitive(chap->ctrl_key);
+ chap->ctrl_key = NULL;
+ chap->ctrl_key_len = 0;
+
+ if (chap->dhgroup_id == data->dhgid &&
+ (data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) {
+ dev_dbg(ctrl->device,
+ "qid %d: reuse existing DH group %s\n",
+ chap->qid, gid_name);
+ goto skip_kpp;
+ }
+
+ /* Reset dh_tfm if it can't be reused */
+ if (chap->dh_tfm) {
+ crypto_free_kpp(chap->dh_tfm);
+ chap->dh_tfm = NULL;
+ }
+
+ if (data->dhgid != NVME_AUTH_DHGROUP_NULL) {
+ if (dhvlen == 0) {
+ dev_warn(ctrl->device,
+ "qid %d: empty DH value\n",
+ chap->qid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0);
+ if (IS_ERR(chap->dh_tfm)) {
+ int ret = PTR_ERR(chap->dh_tfm);
+
+ dev_warn(ctrl->device,
+ "qid %d: error %d initializing DH group %s\n",
+ chap->qid, ret, gid_name);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ chap->dh_tfm = NULL;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+ dev_dbg(ctrl->device, "qid %d: selected DH group %s\n",
+ chap->qid, gid_name);
+ } else if (dhvlen != 0) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid DH value for NULL DH\n",
+ chap->qid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return NVME_SC_INVALID_FIELD;
+ }
+ chap->dhgroup_id = data->dhgid;
+
+skip_kpp:
+ chap->s1 = le32_to_cpu(data->seqnum);
+ memcpy(chap->c1, data->cval, chap->hash_len);
+ if (dhvlen) {
+ chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL);
+ if (!chap->ctrl_key) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+ chap->ctrl_key_len = dhvlen;
+ memcpy(chap->ctrl_key, data->cval + chap->hash_len,
+ dhvlen);
+ dev_dbg(ctrl->device, "ctrl public key %*ph\n",
+ (int)chap->ctrl_key_len, chap->ctrl_key);
+ }
+
+ return 0;
+}
+
+static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_reply_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ size += 2 * chap->hash_len;
+
+ if (chap->host_key_len)
+ size += chap->host_key_len;
+
+ if (chap->buf_size < size) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return -EINVAL;
+ }
+
+ memset(chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
+ data->t_id = cpu_to_le16(chap->transaction);
+ data->hl = chap->hash_len;
+ data->dhvlen = cpu_to_le16(chap->host_key_len);
+ memcpy(data->rval, chap->response, chap->hash_len);
+ if (ctrl->ctrl_key) {
+ get_random_bytes(chap->c2, chap->hash_len);
+ data->cvalid = 1;
+ chap->s2 = nvme_auth_get_seqnum();
+ memcpy(data->rval + chap->hash_len, chap->c2,
+ chap->hash_len);
+ dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len, chap->c2);
+ } else {
+ memset(chap->c2, 0, chap->hash_len);
+ chap->s2 = 0;
+ }
+ data->seqnum = cpu_to_le32(chap->s2);
+ if (chap->host_key_len) {
+ dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n",
+ __func__, chap->qid,
+ chap->host_key_len, chap->host_key);
+ memcpy(data->rval + 2 * chap->hash_len, chap->host_key,
+ chap->host_key_len);
+ }
+
+ return size;
+}
+
+static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_success1_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ if (ctrl->ctrl_key)
+ size += chap->hash_len;
+
+ if (chap->buf_size < size) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ if (data->hl != chap->hash_len) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid hash length %u\n",
+ chap->qid, data->hl);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ /* Just print out information for the admin queue */
+ if (chap->qid == 0)
+ dev_info(ctrl->device,
+ "qid 0: authenticated with hash %s dhgroup %s\n",
+ nvme_auth_hmac_name(chap->hash_id),
+ nvme_auth_dhgroup_name(chap->dhgroup_id));
+
+ if (!data->rvalid)
+ return 0;
+
+ /* Validate controller response */
+ if (memcmp(chap->response, data->rval, data->hl)) {
+ dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len, data->rval);
+ dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len,
+ chap->response);
+ dev_warn(ctrl->device,
+ "qid %d: controller authentication failed\n",
+ chap->qid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+
+ /* Just print out information for the admin queue */
+ if (chap->qid == 0)
+ dev_info(ctrl->device,
+ "qid 0: controller authenticated\n");
+ return 0;
+}
+
+static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_success2_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ memset(chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
+ data->t_id = cpu_to_le16(chap->transaction);
+
+ return size;
+}
+
+static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_failure_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ memset(chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_COMMON_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
+ data->t_id = cpu_to_le16(chap->transaction);
+ data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
+ data->rescode_exp = chap->status;
+
+ return size;
+}
+
+static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
+ u8 buf[4], *challenge = chap->c1;
+ int ret;
+
+ dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n",
+ __func__, chap->qid, chap->s1, chap->transaction);
+
+ if (!chap->host_response) {
+ chap->host_response = nvme_auth_transform_key(ctrl->host_key,
+ ctrl->opts->host->nqn);
+ if (IS_ERR(chap->host_response)) {
+ ret = PTR_ERR(chap->host_response);
+ chap->host_response = NULL;
+ return ret;
+ }
+ } else {
+ dev_dbg(ctrl->device, "%s: qid %d re-using host response\n",
+ __func__, chap->qid);
+ }
+
+ ret = crypto_shash_setkey(chap->shash_tfm,
+ chap->host_response, ctrl->host_key->len);
+ if (ret) {
+ dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
+ chap->qid, ret);
+ goto out;
+ }
+
+ if (chap->dh_tfm) {
+ challenge = kmalloc(chap->hash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = nvme_auth_augmented_challenge(chap->hash_id,
+ chap->sess_key,
+ chap->sess_key_len,
+ chap->c1, challenge,
+ chap->hash_len);
+ if (ret)
+ goto out;
+ }
+
+ shash->tfm = chap->shash_tfm;
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, chap->hash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(chap->s1, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(chap->transaction, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, sizeof(buf));
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "HostHost", 8);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
+ strlen(ctrl->opts->host->nqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
+ strlen(ctrl->opts->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, chap->response);
+out:
+ if (challenge != chap->c1)
+ kfree(challenge);
+ return ret;
+}
+
+static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
+ u8 *ctrl_response;
+ u8 buf[4], *challenge = chap->c2;
+ int ret;
+
+ ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key,
+ ctrl->opts->subsysnqn);
+ if (IS_ERR(ctrl_response)) {
+ ret = PTR_ERR(ctrl_response);
+ return ret;
+ }
+ ret = crypto_shash_setkey(chap->shash_tfm,
+ ctrl_response, ctrl->ctrl_key->len);
+ if (ret) {
+ dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
+ chap->qid, ret);
+ goto out;
+ }
+
+ if (chap->dh_tfm) {
+ challenge = kmalloc(chap->hash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = nvme_auth_augmented_challenge(chap->hash_id,
+ chap->sess_key,
+ chap->sess_key_len,
+ chap->c2, challenge,
+ chap->hash_len);
+ if (ret)
+ goto out;
+ }
+ dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n",
+ __func__, chap->qid, chap->s2, chap->transaction);
+ dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len, challenge);
+ dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n",
+ __func__, chap->qid, ctrl->opts->subsysnqn);
+ dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n",
+ __func__, chap->qid, ctrl->opts->host->nqn);
+ shash->tfm = chap->shash_tfm;
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, chap->hash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(chap->s2, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(chap->transaction, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, 4);
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "Controller", 10);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
+ strlen(ctrl->opts->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
+ strlen(ctrl->opts->host->nqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, chap->response);
+out:
+ if (challenge != chap->c2)
+ kfree(challenge);
+ kfree(ctrl_response);
+ return ret;
+}
+
+static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ int ret;
+
+ if (chap->host_key && chap->host_key_len) {
+ dev_dbg(ctrl->device,
+ "qid %d: reusing host key\n", chap->qid);
+ goto gen_sesskey;
+ }
+ ret = nvme_auth_gen_privkey(chap->dh_tfm, chap->dhgroup_id);
+ if (ret < 0) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return ret;
+ }
+
+ chap->host_key_len = crypto_kpp_maxsize(chap->dh_tfm);
+
+ chap->host_key = kzalloc(chap->host_key_len, GFP_KERNEL);
+ if (!chap->host_key) {
+ chap->host_key_len = 0;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return -ENOMEM;
+ }
+ ret = nvme_auth_gen_pubkey(chap->dh_tfm,
+ chap->host_key, chap->host_key_len);
+ if (ret) {
+ dev_dbg(ctrl->device,
+ "failed to generate public key, error %d\n", ret);
+ kfree(chap->host_key);
+ chap->host_key = NULL;
+ chap->host_key_len = 0;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return ret;
+ }
+
+gen_sesskey:
+ chap->sess_key_len = chap->host_key_len;
+ chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL);
+ if (!chap->sess_key) {
+ chap->sess_key_len = 0;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return -ENOMEM;
+ }
+
+ ret = nvme_auth_gen_shared_secret(chap->dh_tfm,
+ chap->ctrl_key, chap->ctrl_key_len,
+ chap->sess_key, chap->sess_key_len);
+ if (ret) {
+ dev_dbg(ctrl->device,
+ "failed to generate shared secret, error %d\n", ret);
+ kfree_sensitive(chap->sess_key);
+ chap->sess_key = NULL;
+ chap->sess_key_len = 0;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return ret;
+ }
+ dev_dbg(ctrl->device, "shared secret %*ph\n",
+ (int)chap->sess_key_len, chap->sess_key);
+ return 0;
+}
+
+static void __nvme_auth_reset(struct nvme_dhchap_queue_context *chap)
+{
+ kfree_sensitive(chap->host_response);
+ chap->host_response = NULL;
+ kfree_sensitive(chap->host_key);
+ chap->host_key = NULL;
+ chap->host_key_len = 0;
+ kfree_sensitive(chap->ctrl_key);
+ chap->ctrl_key = NULL;
+ chap->ctrl_key_len = 0;
+ kfree_sensitive(chap->sess_key);
+ chap->sess_key = NULL;
+ chap->sess_key_len = 0;
+ chap->status = 0;
+ chap->error = 0;
+ chap->s1 = 0;
+ chap->s2 = 0;
+ chap->transaction = 0;
+ memset(chap->c1, 0, sizeof(chap->c1));
+ memset(chap->c2, 0, sizeof(chap->c2));
+}
+
+static void __nvme_auth_free(struct nvme_dhchap_queue_context *chap)
+{
+ __nvme_auth_reset(chap);
+ if (chap->shash_tfm)
+ crypto_free_shash(chap->shash_tfm);
+ if (chap->dh_tfm)
+ crypto_free_kpp(chap->dh_tfm);
+ kfree_sensitive(chap->ctrl_key);
+ kfree_sensitive(chap->host_key);
+ kfree_sensitive(chap->sess_key);
+ kfree_sensitive(chap->host_response);
+ kfree(chap->buf);
+ kfree(chap);
+}
+
+static void __nvme_auth_work(struct work_struct *work)
+{
+ struct nvme_dhchap_queue_context *chap =
+ container_of(work, struct nvme_dhchap_queue_context, auth_work);
+ struct nvme_ctrl *ctrl = chap->ctrl;
+ size_t tl;
+ int ret = 0;
+
+ chap->transaction = ctrl->transaction++;
+
+ /* DH-HMAC-CHAP Step 1: send negotiate */
+ dev_dbg(ctrl->device, "%s: qid %d send negotiate\n",
+ __func__, chap->qid);
+ ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap);
+ if (ret < 0) {
+ chap->error = ret;
+ return;
+ }
+ tl = ret;
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ if (ret) {
+ chap->error = ret;
+ return;
+ }
+
+ /* DH-HMAC-CHAP Step 2: receive challenge */
+ dev_dbg(ctrl->device, "%s: qid %d receive challenge\n",
+ __func__, chap->qid);
+
+ memset(chap->buf, 0, chap->buf_size);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, chap->buf_size, false);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d failed to receive challenge, %s %d\n",
+ chap->qid, ret < 0 ? "error" : "nvme status", ret);
+ chap->error = ret;
+ return;
+ }
+ ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction,
+ NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
+ if (ret) {
+ chap->status = ret;
+ chap->error = NVME_SC_AUTH_REQUIRED;
+ return;
+ }
+
+ ret = nvme_auth_process_dhchap_challenge(ctrl, chap);
+ if (ret) {
+ /* Invalid challenge parameters */
+ chap->error = ret;
+ goto fail2;
+ }
+
+ if (chap->ctrl_key_len) {
+ dev_dbg(ctrl->device,
+ "%s: qid %d DH exponential\n",
+ __func__, chap->qid);
+ ret = nvme_auth_dhchap_exponential(ctrl, chap);
+ if (ret) {
+ chap->error = ret;
+ goto fail2;
+ }
+ }
+
+ dev_dbg(ctrl->device, "%s: qid %d host response\n",
+ __func__, chap->qid);
+ ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
+ if (ret) {
+ chap->error = ret;
+ goto fail2;
+ }
+
+ /* DH-HMAC-CHAP Step 3: send reply */
+ dev_dbg(ctrl->device, "%s: qid %d send reply\n",
+ __func__, chap->qid);
+ ret = nvme_auth_set_dhchap_reply_data(ctrl, chap);
+ if (ret < 0) {
+ chap->error = ret;
+ goto fail2;
+ }
+
+ tl = ret;
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ if (ret) {
+ chap->error = ret;
+ goto fail2;
+ }
+
+ /* DH-HMAC-CHAP Step 4: receive success1 */
+ dev_dbg(ctrl->device, "%s: qid %d receive success1\n",
+ __func__, chap->qid);
+
+ memset(chap->buf, 0, chap->buf_size);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, chap->buf_size, false);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d failed to receive success1, %s %d\n",
+ chap->qid, ret < 0 ? "error" : "nvme status", ret);
+ chap->error = ret;
+ return;
+ }
+ ret = nvme_auth_receive_validate(ctrl, chap->qid,
+ chap->buf, chap->transaction,
+ NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
+ if (ret) {
+ chap->status = ret;
+ chap->error = NVME_SC_AUTH_REQUIRED;
+ return;
+ }
+
+ if (ctrl->ctrl_key) {
+ dev_dbg(ctrl->device,
+ "%s: qid %d controller response\n",
+ __func__, chap->qid);
+ ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap);
+ if (ret) {
+ chap->error = ret;
+ goto fail2;
+ }
+ }
+
+ ret = nvme_auth_process_dhchap_success1(ctrl, chap);
+ if (ret) {
+ /* Controller authentication failed */
+ chap->error = NVME_SC_AUTH_REQUIRED;
+ goto fail2;
+ }
+
+ if (ctrl->ctrl_key) {
+ /* DH-HMAC-CHAP Step 5: send success2 */
+ dev_dbg(ctrl->device, "%s: qid %d send success2\n",
+ __func__, chap->qid);
+ tl = nvme_auth_set_dhchap_success2_data(ctrl, chap);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ if (ret)
+ chap->error = ret;
+ }
+ if (!ret) {
+ chap->error = 0;
+ return;
+ }
+
+fail2:
+ dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
+ __func__, chap->qid, chap->status);
+ tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ /*
+ * only update error if send failure2 failed and no other
+ * error had been set during authentication.
+ */
+ if (ret && !chap->error)
+ chap->error = ret;
+}
+
+int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
+{
+ struct nvme_dhchap_queue_context *chap;
+
+ if (!ctrl->host_key) {
+ dev_warn(ctrl->device, "qid %d: no key\n", qid);
+ return -ENOKEY;
+ }
+
+ if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) {
+ dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid);
+ return -ENOKEY;
+ }
+
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ /* Check if the context is already queued */
+ list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
+ WARN_ON(!chap->buf);
+ if (chap->qid == qid) {
+ dev_dbg(ctrl->device, "qid %d: re-using context\n", qid);
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ flush_work(&chap->auth_work);
+ __nvme_auth_reset(chap);
+ queue_work(nvme_wq, &chap->auth_work);
+ return 0;
+ }
+ }
+ chap = kzalloc(sizeof(*chap), GFP_KERNEL);
+ if (!chap) {
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ return -ENOMEM;
+ }
+ chap->qid = (qid == NVME_QID_ANY) ? 0 : qid;
+ chap->ctrl = ctrl;
+
+ /*
+ * Allocate a large enough buffer for the entire negotiation:
+ * 4k should be enough to ffdhe8192.
+ */
+ chap->buf_size = 4096;
+ chap->buf = kzalloc(chap->buf_size, GFP_KERNEL);
+ if (!chap->buf) {
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ kfree(chap);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&chap->auth_work, __nvme_auth_work);
+ list_add(&chap->entry, &ctrl->dhchap_auth_list);
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ queue_work(nvme_wq, &chap->auth_work);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
+
+int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
+{
+ struct nvme_dhchap_queue_context *chap;
+ int ret;
+
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
+ if (chap->qid != qid)
+ continue;
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ flush_work(&chap->auth_work);
+ ret = chap->error;
+ return ret;
+ }
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ return -ENXIO;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_wait);
+
+void nvme_auth_reset(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dhchap_queue_context *chap;
+
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ flush_work(&chap->auth_work);
+ __nvme_auth_reset(chap);
+ }
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_reset);
+
+static void nvme_dhchap_auth_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl =
+ container_of(work, struct nvme_ctrl, dhchap_auth_work);
+ int ret, q;
+
+ /* Authenticate admin queue first */
+ ret = nvme_auth_negotiate(ctrl, 0);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid 0: error %d setting up authentication\n", ret);
+ return;
+ }
+ ret = nvme_auth_wait(ctrl, 0);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid 0: authentication failed\n");
+ return;
+ }
+
+ for (q = 1; q < ctrl->queue_count; q++) {
+ ret = nvme_auth_negotiate(ctrl, q);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d: error %d setting up authentication\n",
+ q, ret);
+ break;
+ }
+ }
+
+ /*
+ * Failure is a soft-state; credentials remain valid until
+ * the controller terminates the connection.
+ */
+}
+
+void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
+{
+ INIT_LIST_HEAD(&ctrl->dhchap_auth_list);
+ INIT_WORK(&ctrl->dhchap_auth_work, nvme_dhchap_auth_work);
+ mutex_init(&ctrl->dhchap_auth_mutex);
+ if (!ctrl->opts)
+ return;
+ nvme_auth_generate_key(ctrl->opts->dhchap_secret, &ctrl->host_key);
+ nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret, &ctrl->ctrl_key);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
+
+void nvme_auth_stop(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dhchap_queue_context *chap = NULL, *tmp;
+
+ cancel_work_sync(&ctrl->dhchap_auth_work);
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry)
+ cancel_work_sync(&chap->auth_work);
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_stop);
+
+void nvme_auth_free(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dhchap_queue_context *chap = NULL, *tmp;
+
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry) {
+ list_del_init(&chap->entry);
+ flush_work(&chap->auth_work);
+ __nvme_auth_free(chap);
+ }
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ if (ctrl->host_key) {
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = NULL;
+ }
+ if (ctrl->ctrl_key) {
+ nvme_auth_free_key(ctrl->ctrl_key);
+ ctrl->ctrl_key = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_auth_free);
diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
new file mode 100644
index 000000000000..e958d5015585
--- /dev/null
+++ b/drivers/nvme/host/constants.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVM Express device driver verbose errors
+ * Copyright (c) 2022, Oracle and/or its affiliates
+ */
+
+#include "nvme.h"
+
+static const char * const nvme_ops[] = {
+ [nvme_cmd_flush] = "Flush",
+ [nvme_cmd_write] = "Write",
+ [nvme_cmd_read] = "Read",
+ [nvme_cmd_write_uncor] = "Write Uncorrectable",
+ [nvme_cmd_compare] = "Compare",
+ [nvme_cmd_write_zeroes] = "Write Zeros",
+ [nvme_cmd_dsm] = "Dataset Management",
+ [nvme_cmd_verify] = "Verify",
+ [nvme_cmd_resv_register] = "Reservation Register",
+ [nvme_cmd_resv_report] = "Reservation Report",
+ [nvme_cmd_resv_acquire] = "Reservation Acquire",
+ [nvme_cmd_resv_release] = "Reservation Release",
+ [nvme_cmd_zone_mgmt_send] = "Zone Management Send",
+ [nvme_cmd_zone_mgmt_recv] = "Zone Management Receive",
+ [nvme_cmd_zone_append] = "Zone Management Append",
+};
+
+static const char * const nvme_admin_ops[] = {
+ [nvme_admin_delete_sq] = "Delete SQ",
+ [nvme_admin_create_sq] = "Create SQ",
+ [nvme_admin_get_log_page] = "Get Log Page",
+ [nvme_admin_delete_cq] = "Delete CQ",
+ [nvme_admin_create_cq] = "Create CQ",
+ [nvme_admin_identify] = "Identify",
+ [nvme_admin_abort_cmd] = "Abort Command",
+ [nvme_admin_set_features] = "Set Features",
+ [nvme_admin_get_features] = "Get Features",
+ [nvme_admin_async_event] = "Async Event",
+ [nvme_admin_ns_mgmt] = "Namespace Management",
+ [nvme_admin_activate_fw] = "Activate Firmware",
+ [nvme_admin_download_fw] = "Download Firmware",
+ [nvme_admin_dev_self_test] = "Device Self Test",
+ [nvme_admin_ns_attach] = "Namespace Attach",
+ [nvme_admin_keep_alive] = "Keep Alive",
+ [nvme_admin_directive_send] = "Directive Send",
+ [nvme_admin_directive_recv] = "Directive Receive",
+ [nvme_admin_virtual_mgmt] = "Virtual Management",
+ [nvme_admin_nvme_mi_send] = "NVMe Send MI",
+ [nvme_admin_nvme_mi_recv] = "NVMe Receive MI",
+ [nvme_admin_dbbuf] = "Doorbell Buffer Config",
+ [nvme_admin_format_nvm] = "Format NVM",
+ [nvme_admin_security_send] = "Security Send",
+ [nvme_admin_security_recv] = "Security Receive",
+ [nvme_admin_sanitize_nvm] = "Sanitize NVM",
+ [nvme_admin_get_lba_status] = "Get LBA Status",
+};
+
+static const char * const nvme_statuses[] = {
+ [NVME_SC_SUCCESS] = "Success",
+ [NVME_SC_INVALID_OPCODE] = "Invalid Command Opcode",
+ [NVME_SC_INVALID_FIELD] = "Invalid Field in Command",
+ [NVME_SC_CMDID_CONFLICT] = "Command ID Conflict",
+ [NVME_SC_DATA_XFER_ERROR] = "Data Transfer Error",
+ [NVME_SC_POWER_LOSS] = "Commands Aborted due to Power Loss Notification",
+ [NVME_SC_INTERNAL] = "Internal Error",
+ [NVME_SC_ABORT_REQ] = "Command Abort Requested",
+ [NVME_SC_ABORT_QUEUE] = "Command Aborted due to SQ Deletion",
+ [NVME_SC_FUSED_FAIL] = "Command Aborted due to Failed Fused Command",
+ [NVME_SC_FUSED_MISSING] = "Command Aborted due to Missing Fused Command",
+ [NVME_SC_INVALID_NS] = "Invalid Namespace or Format",
+ [NVME_SC_CMD_SEQ_ERROR] = "Command Sequence Error",
+ [NVME_SC_SGL_INVALID_LAST] = "Invalid SGL Segment Descriptor",
+ [NVME_SC_SGL_INVALID_COUNT] = "Invalid Number of SGL Descriptors",
+ [NVME_SC_SGL_INVALID_DATA] = "Data SGL Length Invalid",
+ [NVME_SC_SGL_INVALID_METADATA] = "Metadata SGL Length Invalid",
+ [NVME_SC_SGL_INVALID_TYPE] = "SGL Descriptor Type Invalid",
+ [NVME_SC_CMB_INVALID_USE] = "Invalid Use of Controller Memory Buffer",
+ [NVME_SC_PRP_INVALID_OFFSET] = "PRP Offset Invalid",
+ [NVME_SC_ATOMIC_WU_EXCEEDED] = "Atomic Write Unit Exceeded",
+ [NVME_SC_OP_DENIED] = "Operation Denied",
+ [NVME_SC_SGL_INVALID_OFFSET] = "SGL Offset Invalid",
+ [NVME_SC_RESERVED] = "Reserved",
+ [NVME_SC_HOST_ID_INCONSIST] = "Host Identifier Inconsistent Format",
+ [NVME_SC_KA_TIMEOUT_EXPIRED] = "Keep Alive Timeout Expired",
+ [NVME_SC_KA_TIMEOUT_INVALID] = "Keep Alive Timeout Invalid",
+ [NVME_SC_ABORTED_PREEMPT_ABORT] = "Command Aborted due to Preempt and Abort",
+ [NVME_SC_SANITIZE_FAILED] = "Sanitize Failed",
+ [NVME_SC_SANITIZE_IN_PROGRESS] = "Sanitize In Progress",
+ [NVME_SC_SGL_INVALID_GRANULARITY] = "SGL Data Block Granularity Invalid",
+ [NVME_SC_CMD_NOT_SUP_CMB_QUEUE] = "Command Not Supported for Queue in CMB",
+ [NVME_SC_NS_WRITE_PROTECTED] = "Namespace is Write Protected",
+ [NVME_SC_CMD_INTERRUPTED] = "Command Interrupted",
+ [NVME_SC_TRANSIENT_TR_ERR] = "Transient Transport Error",
+ [NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY] = "Admin Command Media Not Ready",
+ [NVME_SC_INVALID_IO_CMD_SET] = "Invalid IO Command Set",
+ [NVME_SC_LBA_RANGE] = "LBA Out of Range",
+ [NVME_SC_CAP_EXCEEDED] = "Capacity Exceeded",
+ [NVME_SC_NS_NOT_READY] = "Namespace Not Ready",
+ [NVME_SC_RESERVATION_CONFLICT] = "Reservation Conflict",
+ [NVME_SC_FORMAT_IN_PROGRESS] = "Format In Progress",
+ [NVME_SC_CQ_INVALID] = "Completion Queue Invalid",
+ [NVME_SC_QID_INVALID] = "Invalid Queue Identifier",
+ [NVME_SC_QUEUE_SIZE] = "Invalid Queue Size",
+ [NVME_SC_ABORT_LIMIT] = "Abort Command Limit Exceeded",
+ [NVME_SC_ABORT_MISSING] = "Reserved", /* XXX */
+ [NVME_SC_ASYNC_LIMIT] = "Asynchronous Event Request Limit Exceeded",
+ [NVME_SC_FIRMWARE_SLOT] = "Invalid Firmware Slot",
+ [NVME_SC_FIRMWARE_IMAGE] = "Invalid Firmware Image",
+ [NVME_SC_INVALID_VECTOR] = "Invalid Interrupt Vector",
+ [NVME_SC_INVALID_LOG_PAGE] = "Invalid Log Page",
+ [NVME_SC_INVALID_FORMAT] = "Invalid Format",
+ [NVME_SC_FW_NEEDS_CONV_RESET] = "Firmware Activation Requires Conventional Reset",
+ [NVME_SC_INVALID_QUEUE] = "Invalid Queue Deletion",
+ [NVME_SC_FEATURE_NOT_SAVEABLE] = "Feature Identifier Not Saveable",
+ [NVME_SC_FEATURE_NOT_CHANGEABLE] = "Feature Not Changeable",
+ [NVME_SC_FEATURE_NOT_PER_NS] = "Feature Not Namespace Specific",
+ [NVME_SC_FW_NEEDS_SUBSYS_RESET] = "Firmware Activation Requires NVM Subsystem Reset",
+ [NVME_SC_FW_NEEDS_RESET] = "Firmware Activation Requires Reset",
+ [NVME_SC_FW_NEEDS_MAX_TIME] = "Firmware Activation Requires Maximum Time Violation",
+ [NVME_SC_FW_ACTIVATE_PROHIBITED] = "Firmware Activation Prohibited",
+ [NVME_SC_OVERLAPPING_RANGE] = "Overlapping Range",
+ [NVME_SC_NS_INSUFFICIENT_CAP] = "Namespace Insufficient Capacity",
+ [NVME_SC_NS_ID_UNAVAILABLE] = "Namespace Identifier Unavailable",
+ [NVME_SC_NS_ALREADY_ATTACHED] = "Namespace Already Attached",
+ [NVME_SC_NS_IS_PRIVATE] = "Namespace Is Private",
+ [NVME_SC_NS_NOT_ATTACHED] = "Namespace Not Attached",
+ [NVME_SC_THIN_PROV_NOT_SUPP] = "Thin Provisioning Not Supported",
+ [NVME_SC_CTRL_LIST_INVALID] = "Controller List Invalid",
+ [NVME_SC_SELT_TEST_IN_PROGRESS] = "Device Self-test In Progress",
+ [NVME_SC_BP_WRITE_PROHIBITED] = "Boot Partition Write Prohibited",
+ [NVME_SC_CTRL_ID_INVALID] = "Invalid Controller Identifier",
+ [NVME_SC_SEC_CTRL_STATE_INVALID] = "Invalid Secondary Controller State",
+ [NVME_SC_CTRL_RES_NUM_INVALID] = "Invalid Number of Controller Resources",
+ [NVME_SC_RES_ID_INVALID] = "Invalid Resource Identifier",
+ [NVME_SC_PMR_SAN_PROHIBITED] = "Sanitize Prohibited",
+ [NVME_SC_ANA_GROUP_ID_INVALID] = "ANA Group Identifier Invalid",
+ [NVME_SC_ANA_ATTACH_FAILED] = "ANA Attach Failed",
+ [NVME_SC_BAD_ATTRIBUTES] = "Conflicting Attributes",
+ [NVME_SC_INVALID_PI] = "Invalid Protection Information",
+ [NVME_SC_READ_ONLY] = "Attempted Write to Read Only Range",
+ [NVME_SC_ONCS_NOT_SUPPORTED] = "ONCS Not Supported",
+ [NVME_SC_ZONE_BOUNDARY_ERROR] = "Zoned Boundary Error",
+ [NVME_SC_ZONE_FULL] = "Zone Is Full",
+ [NVME_SC_ZONE_READ_ONLY] = "Zone Is Read Only",
+ [NVME_SC_ZONE_OFFLINE] = "Zone Is Offline",
+ [NVME_SC_ZONE_INVALID_WRITE] = "Zone Invalid Write",
+ [NVME_SC_ZONE_TOO_MANY_ACTIVE] = "Too Many Active Zones",
+ [NVME_SC_ZONE_TOO_MANY_OPEN] = "Too Many Open Zones",
+ [NVME_SC_ZONE_INVALID_TRANSITION] = "Invalid Zone State Transition",
+ [NVME_SC_WRITE_FAULT] = "Write Fault",
+ [NVME_SC_READ_ERROR] = "Unrecovered Read Error",
+ [NVME_SC_GUARD_CHECK] = "End-to-end Guard Check Error",
+ [NVME_SC_APPTAG_CHECK] = "End-to-end Application Tag Check Error",
+ [NVME_SC_REFTAG_CHECK] = "End-to-end Reference Tag Check Error",
+ [NVME_SC_COMPARE_FAILED] = "Compare Failure",
+ [NVME_SC_ACCESS_DENIED] = "Access Denied",
+ [NVME_SC_UNWRITTEN_BLOCK] = "Deallocated or Unwritten Logical Block",
+ [NVME_SC_INTERNAL_PATH_ERROR] = "Internal Pathing Error",
+ [NVME_SC_ANA_PERSISTENT_LOSS] = "Asymmetric Access Persistent Loss",
+ [NVME_SC_ANA_INACCESSIBLE] = "Asymmetric Access Inaccessible",
+ [NVME_SC_ANA_TRANSITION] = "Asymmetric Access Transition",
+ [NVME_SC_CTRL_PATH_ERROR] = "Controller Pathing Error",
+ [NVME_SC_HOST_PATH_ERROR] = "Host Pathing Error",
+ [NVME_SC_HOST_ABORTED_CMD] = "Host Aborted Command",
+};
+
+const unsigned char *nvme_get_error_status_str(u16 status)
+{
+ status &= 0x7ff;
+ if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status])
+ return nvme_statuses[status & 0x7ff];
+ return "Unknown";
+}
+
+const unsigned char *nvme_get_opcode_str(u8 opcode)
+{
+ if (opcode < ARRAY_SIZE(nvme_ops) && nvme_ops[opcode])
+ return nvme_ops[opcode];
+ return "Unknown";
+}
+EXPORT_SYMBOL_GPL(nvme_get_opcode_str);
+
+const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
+{
+ if (opcode < ARRAY_SIZE(nvme_admin_ops) && nvme_admin_ops[opcode])
+ return nvme_admin_ops[opcode];
+ return "Unknown";
+}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index a4d8c90ee7cc..da55ce45ac70 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -6,30 +6,40 @@
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
+#include <linux/blk-integrity.h>
+#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/hdreg.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
-#include <linux/list_sort.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/pr.h>
#include <linux/ptrace.h>
#include <linux/nvme_ioctl.h>
-#include <linux/t10-pi.h>
#include <linux/pm_qos.h>
#include <asm/unaligned.h>
#include "nvme.h"
#include "fabrics.h"
+#include <linux/nvme-auth.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
#define NVME_MINORS (1U << MINORBITS)
+struct nvme_ns_info {
+ struct nvme_ns_ids ids;
+ u32 nsid;
+ __le32 anagrpid;
+ bool is_shared;
+ bool is_readonly;
+ bool is_ready;
+};
+
unsigned int admin_timeout = 60;
module_param(admin_timeout, uint, 0644);
MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
@@ -57,9 +67,25 @@ static bool force_apst;
module_param(force_apst, bool, 0644);
MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
-static bool streams;
-module_param(streams, bool, 0644);
-MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
+static unsigned long apst_primary_timeout_ms = 100;
+module_param(apst_primary_timeout_ms, ulong, 0644);
+MODULE_PARM_DESC(apst_primary_timeout_ms,
+ "primary APST timeout in ms");
+
+static unsigned long apst_secondary_timeout_ms = 2000;
+module_param(apst_secondary_timeout_ms, ulong, 0644);
+MODULE_PARM_DESC(apst_secondary_timeout_ms,
+ "secondary APST timeout in ms");
+
+static unsigned long apst_primary_latency_tol_us = 15000;
+module_param(apst_primary_latency_tol_us, ulong, 0644);
+MODULE_PARM_DESC(apst_primary_latency_tol_us,
+ "primary APST latency tolerance in us");
+
+static unsigned long apst_secondary_latency_tol_us = 100000;
+module_param(apst_secondary_latency_tol_us, ulong, 0644);
+MODULE_PARM_DESC(apst_secondary_latency_tol_us,
+ "secondary APST latency tolerance in us");
/*
* nvme_wq - hosts nvme related works that are not reset or delete
@@ -85,33 +111,21 @@ static LIST_HEAD(nvme_subsystems);
static DEFINE_MUTEX(nvme_subsystems_lock);
static DEFINE_IDA(nvme_instance_ida);
-static dev_t nvme_chr_devt;
+static dev_t nvme_ctrl_base_chr_devt;
static struct class *nvme_class;
static struct class *nvme_subsys_class;
-static int nvme_revalidate_disk(struct gendisk *disk);
+static DEFINE_IDA(nvme_ns_chr_minor_ida);
+static dev_t nvme_ns_chr_devt;
+static struct class *nvme_ns_chr_class;
+
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
unsigned nsid);
+static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
+ struct nvme_command *cmd);
-static void nvme_set_queue_dying(struct nvme_ns *ns)
-{
- /*
- * Revalidating a dead namespace sets capacity to 0. This will end
- * buffered writers dirtying pages that can't be synced.
- */
- if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
- return;
- blk_set_queue_dying(ns->queue);
- /* Forcibly unquiesce queues to avoid blocking dispatch */
- blk_mq_unquiesce_queue(ns->queue);
- /*
- * Revalidate after unblocking dispatchers that may be holding bd_butex
- */
- revalidate_disk(ns->disk);
-}
-
-static void nvme_queue_scan(struct nvme_ctrl *ctrl)
+void nvme_queue_scan(struct nvme_ctrl *ctrl)
{
/*
* Only new queue scan work when admin and IO queues are both alive
@@ -136,6 +150,38 @@ int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
+static void nvme_failfast_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
+ struct nvme_ctrl, failfast_work);
+
+ if (ctrl->state != NVME_CTRL_CONNECTING)
+ return;
+
+ set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
+ dev_info(ctrl->device, "failfast expired\n");
+ nvme_kick_requeue_lists(ctrl);
+}
+
+static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl)
+{
+ if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1)
+ return;
+
+ schedule_delayed_work(&ctrl->failfast_work,
+ ctrl->opts->fast_io_fail_tmo * HZ);
+}
+
+static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl)
+{
+ if (!ctrl->opts)
+ return;
+
+ cancel_delayed_work_sync(&ctrl->failfast_work);
+ clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
+}
+
+
int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
{
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
@@ -159,19 +205,17 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
return ret;
}
-EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
{
dev_info(ctrl->device,
- "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn);
+ "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl));
flush_work(&ctrl->reset_work);
nvme_stop_ctrl(ctrl);
nvme_remove_namespaces(ctrl);
ctrl->ops->delete_ctrl(ctrl);
nvme_uninit_ctrl(ctrl);
- nvme_put_ctrl(ctrl);
}
static void nvme_delete_ctrl_work(struct work_struct *work)
@@ -192,26 +236,16 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
-static int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
+static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
{
- int ret = 0;
-
/*
* Keep a reference until nvme_do_delete_ctrl() complete,
* since ->delete_ctrl can free the controller.
*/
nvme_get_ctrl(ctrl);
- if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
- ret = -EBUSY;
- if (!ret)
+ if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
nvme_do_delete_ctrl(ctrl);
nvme_put_ctrl(ctrl);
- return ret;
-}
-
-static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
-{
- return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple);
}
static blk_status_t nvme_error_status(u16 status)
@@ -247,68 +281,168 @@ static blk_status_t nvme_error_status(u16 status)
return BLK_STS_NEXUS;
case NVME_SC_HOST_PATH_ERROR:
return BLK_STS_TRANSPORT;
+ case NVME_SC_ZONE_TOO_MANY_ACTIVE:
+ return BLK_STS_ZONE_ACTIVE_RESOURCE;
+ case NVME_SC_ZONE_TOO_MANY_OPEN:
+ return BLK_STS_ZONE_OPEN_RESOURCE;
default:
return BLK_STS_IOERR;
}
}
-static inline bool nvme_req_needs_retry(struct request *req)
-{
- if (blk_noretry_request(req))
- return false;
- if (nvme_req(req)->status & NVME_SC_DNR)
- return false;
- if (nvme_req(req)->retries >= nvme_max_retries)
- return false;
- return true;
-}
-
static void nvme_retry_req(struct request *req)
{
- struct nvme_ns *ns = req->q->queuedata;
unsigned long delay = 0;
u16 crd;
/* The mask and shift result must be <= 3 */
crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
- if (ns && crd)
- delay = ns->ctrl->crdt[crd - 1] * 100;
+ if (crd)
+ delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
nvme_req(req)->retries++;
blk_mq_requeue_request(req, false);
blk_mq_delay_kick_requeue_list(req->q, delay);
}
-void nvme_complete_rq(struct request *req)
+static void nvme_log_error(struct request *req)
{
- blk_status_t status = nvme_error_status(nvme_req(req)->status);
+ struct nvme_ns *ns = req->q->queuedata;
+ struct nvme_request *nr = nvme_req(req);
- trace_nvme_complete_rq(req);
+ if (ns) {
+ pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n",
+ ns->disk ? ns->disk->disk_name : "?",
+ nvme_get_opcode_str(nr->cmd->common.opcode),
+ nr->cmd->common.opcode,
+ (unsigned long long)nvme_sect_to_lba(ns, blk_rq_pos(req)),
+ (unsigned long long)blk_rq_bytes(req) >> ns->lba_shift,
+ nvme_get_error_status_str(nr->status),
+ nr->status >> 8 & 7, /* Status Code Type */
+ nr->status & 0xff, /* Status Code */
+ nr->status & NVME_SC_MORE ? "MORE " : "",
+ nr->status & NVME_SC_DNR ? "DNR " : "");
+ return;
+ }
- nvme_cleanup_cmd(req);
+ pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n",
+ dev_name(nr->ctrl->device),
+ nvme_get_admin_opcode_str(nr->cmd->common.opcode),
+ nr->cmd->common.opcode,
+ nvme_get_error_status_str(nr->status),
+ nr->status >> 8 & 7, /* Status Code Type */
+ nr->status & 0xff, /* Status Code */
+ nr->status & NVME_SC_MORE ? "MORE " : "",
+ nr->status & NVME_SC_DNR ? "DNR " : "");
+}
+
+enum nvme_disposition {
+ COMPLETE,
+ RETRY,
+ FAILOVER,
+ AUTHENTICATE,
+};
- if (nvme_req(req)->ctrl->kas)
- nvme_req(req)->ctrl->comp_seen = true;
+static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
+{
+ if (likely(nvme_req(req)->status == 0))
+ return COMPLETE;
- if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
- if ((req->cmd_flags & REQ_NVME_MPATH) &&
- blk_path_error(status)) {
- nvme_failover_req(req);
- return;
- }
+ if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED)
+ return AUTHENTICATE;
- if (!blk_queue_dying(req->q)) {
- nvme_retry_req(req);
- return;
- }
+ if (blk_noretry_request(req) ||
+ (nvme_req(req)->status & NVME_SC_DNR) ||
+ nvme_req(req)->retries >= nvme_max_retries)
+ return COMPLETE;
+
+ if (req->cmd_flags & REQ_NVME_MPATH) {
+ if (nvme_is_path_error(nvme_req(req)->status) ||
+ blk_queue_dying(req->q))
+ return FAILOVER;
+ } else {
+ if (blk_queue_dying(req->q))
+ return COMPLETE;
}
- nvme_trace_bio_complete(req, status);
+ return RETRY;
+}
+
+static inline void nvme_end_req_zoned(struct request *req)
+{
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ req_op(req) == REQ_OP_ZONE_APPEND)
+ req->__sector = nvme_lba_to_sect(req->q->queuedata,
+ le64_to_cpu(nvme_req(req)->result.u64));
+}
+
+static inline void nvme_end_req(struct request *req)
+{
+ blk_status_t status = nvme_error_status(nvme_req(req)->status);
+
+ if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET)))
+ nvme_log_error(req);
+ nvme_end_req_zoned(req);
+ nvme_trace_bio_complete(req);
blk_mq_end_request(req, status);
}
+
+void nvme_complete_rq(struct request *req)
+{
+ struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
+
+ trace_nvme_complete_rq(req);
+ nvme_cleanup_cmd(req);
+
+ if (ctrl->kas)
+ ctrl->comp_seen = true;
+
+ switch (nvme_decide_disposition(req)) {
+ case COMPLETE:
+ nvme_end_req(req);
+ return;
+ case RETRY:
+ nvme_retry_req(req);
+ return;
+ case FAILOVER:
+ nvme_failover_req(req);
+ return;
+ case AUTHENTICATE:
+#ifdef CONFIG_NVME_AUTH
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+ nvme_retry_req(req);
+#else
+ nvme_end_req(req);
+#endif
+ return;
+ }
+}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
-bool nvme_cancel_request(struct request *req, void *data, bool reserved)
+void nvme_complete_batch_req(struct request *req)
+{
+ trace_nvme_complete_rq(req);
+ nvme_cleanup_cmd(req);
+ nvme_end_req_zoned(req);
+}
+EXPORT_SYMBOL_GPL(nvme_complete_batch_req);
+
+/*
+ * Called to unwind from ->queue_rq on a failed command submission so that the
+ * multipathing code gets called to potentially failover to another path.
+ * The caller needs to unwind all transport specific resource allocations and
+ * must return propagate the return value.
+ */
+blk_status_t nvme_host_path_error(struct request *req)
+{
+ nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
+ blk_mq_set_request_complete(req);
+ nvme_complete_rq(req);
+ return BLK_STS_OK;
+}
+EXPORT_SYMBOL_GPL(nvme_host_path_error);
+
+bool nvme_cancel_request(struct request *req, void *data)
{
dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
"Cancelling I/O %d", req->tag);
@@ -318,11 +452,32 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
return true;
nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
+ nvme_req(req)->flags |= NVME_REQ_CANCELLED;
blk_mq_complete_request(req);
return true;
}
EXPORT_SYMBOL_GPL(nvme_cancel_request);
+void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->tagset) {
+ blk_mq_tagset_busy_iter(ctrl->tagset,
+ nvme_cancel_request, ctrl);
+ blk_mq_tagset_wait_completed_request(ctrl->tagset);
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
+
+void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->admin_tagset) {
+ blk_mq_tagset_busy_iter(ctrl->admin_tagset,
+ nvme_cancel_request, ctrl);
+ blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);
+
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state)
{
@@ -340,7 +495,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
changed = true;
- /* FALLTHRU */
+ fallthrough;
default:
break;
}
@@ -350,7 +505,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
changed = true;
- /* FALLTHRU */
+ fallthrough;
default:
break;
}
@@ -360,7 +515,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
case NVME_CTRL_NEW:
case NVME_CTRL_RESETTING:
changed = true;
- /* FALLTHRU */
+ fallthrough;
default:
break;
}
@@ -371,7 +526,17 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
changed = true;
- /* FALLTHRU */
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case NVME_CTRL_DELETING_NOIO:
+ switch (old_state) {
+ case NVME_CTRL_DELETING:
+ case NVME_CTRL_DEAD:
+ changed = true;
+ fallthrough;
default:
break;
}
@@ -380,7 +545,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (old_state) {
case NVME_CTRL_DELETING:
changed = true;
- /* FALLTHRU */
+ fallthrough;
default:
break;
}
@@ -395,8 +560,17 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
}
spin_unlock_irqrestore(&ctrl->lock, flags);
- if (changed && ctrl->state == NVME_CTRL_LIVE)
+ if (!changed)
+ return false;
+
+ if (ctrl->state == NVME_CTRL_LIVE) {
+ if (old_state == NVME_CTRL_CONNECTING)
+ nvme_stop_failfast_work(ctrl);
nvme_kick_requeue_lists(ctrl);
+ } else if (ctrl->state == NVME_CTRL_CONNECTING &&
+ old_state == NVME_CTRL_RESETTING) {
+ nvme_start_failfast_work(ctrl);
+ }
return changed;
}
EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
@@ -413,6 +587,7 @@ static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
case NVME_CTRL_CONNECTING:
return false;
case NVME_CTRL_DELETING:
+ case NVME_CTRL_DELETING_NOIO:
case NVME_CTRL_DEAD:
return true;
default:
@@ -440,14 +615,18 @@ static void nvme_free_ns_head(struct kref *ref)
container_of(ref, struct nvme_ns_head, ref);
nvme_mpath_remove_disk(head);
- ida_simple_remove(&head->subsys->ns_ida, head->instance);
- list_del_init(&head->entry);
+ ida_free(&head->subsys->ns_ida, head->instance);
cleanup_srcu_struct(&head->srcu);
nvme_put_subsystem(head->subsys);
kfree(head);
}
-static void nvme_put_ns_head(struct nvme_ns_head *head)
+bool nvme_tryget_ns_head(struct nvme_ns_head *head)
+{
+ return kref_get_unless_zero(&head->ref);
+}
+
+void nvme_put_ns_head(struct nvme_ns_head *head)
{
kref_put(&head->ref, nvme_free_ns_head);
}
@@ -456,154 +635,118 @@ static void nvme_free_ns(struct kref *kref)
{
struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
- if (ns->ndev)
- nvme_nvm_unregister(ns);
-
put_disk(ns->disk);
nvme_put_ns_head(ns->head);
nvme_put_ctrl(ns->ctrl);
kfree(ns);
}
-static void nvme_put_ns(struct nvme_ns *ns)
+static inline bool nvme_get_ns(struct nvme_ns *ns)
+{
+ return kref_get_unless_zero(&ns->kref);
+}
+
+void nvme_put_ns(struct nvme_ns *ns)
{
kref_put(&ns->kref, nvme_free_ns);
}
+EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
static inline void nvme_clear_nvme_request(struct request *req)
{
- if (!(req->rq_flags & RQF_DONTPREP)) {
- nvme_req(req)->retries = 0;
- nvme_req(req)->flags = 0;
- req->rq_flags |= RQF_DONTPREP;
- }
+ nvme_req(req)->status = 0;
+ nvme_req(req)->retries = 0;
+ nvme_req(req)->flags = 0;
+ req->rq_flags |= RQF_DONTPREP;
}
-struct request *nvme_alloc_request(struct request_queue *q,
- struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
+/* initialize a passthrough request */
+void nvme_init_request(struct request *req, struct nvme_command *cmd)
{
- unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
- struct request *req;
+ if (req->q->queuedata)
+ req->timeout = NVME_IO_TIMEOUT;
+ else /* no queuedata implies admin queue */
+ req->timeout = NVME_ADMIN_TIMEOUT;
- if (qid == NVME_QID_ANY) {
- req = blk_mq_alloc_request(q, op, flags);
- } else {
- req = blk_mq_alloc_request_hctx(q, op, flags,
- qid ? qid - 1 : 0);
- }
- if (IS_ERR(req))
- return req;
+ /* passthru commands should let the driver set the SGL flags */
+ cmd->common.flags &= ~NVME_CMD_SGL_ALL;
req->cmd_flags |= REQ_FAILFAST_DRIVER;
+ if (req->mq_hctx->type == HCTX_TYPE_POLL)
+ req->cmd_flags |= REQ_POLLED;
nvme_clear_nvme_request(req);
- nvme_req(req)->cmd = cmd;
-
- return req;
-}
-EXPORT_SYMBOL_GPL(nvme_alloc_request);
-
-static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
-{
- struct nvme_command c;
-
- memset(&c, 0, sizeof(c));
-
- c.directive.opcode = nvme_admin_directive_send;
- c.directive.nsid = cpu_to_le32(NVME_NSID_ALL);
- c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE;
- c.directive.dtype = NVME_DIR_IDENTIFY;
- c.directive.tdtype = NVME_DIR_STREAMS;
- c.directive.endir = enable ? NVME_DIR_ENDIR : 0;
-
- return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0);
-}
-
-static int nvme_disable_streams(struct nvme_ctrl *ctrl)
-{
- return nvme_toggle_streams(ctrl, false);
-}
-
-static int nvme_enable_streams(struct nvme_ctrl *ctrl)
-{
- return nvme_toggle_streams(ctrl, true);
+ req->rq_flags |= RQF_QUIET;
+ memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
}
+EXPORT_SYMBOL_GPL(nvme_init_request);
-static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
- struct streams_directive_params *s, u32 nsid)
+/*
+ * For something we're not in a state to send to the device the default action
+ * is to busy it and retry it after the controller state is recovered. However,
+ * if the controller is deleting or if anything is marked for failfast or
+ * nvme multipath it is immediately failed.
+ *
+ * Note: commands used to initialize the controller will be marked for failfast.
+ * Note: nvme cli/ioctl commands are marked for failfast.
+ */
+blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
+ struct request *rq)
{
- struct nvme_command c;
-
- memset(&c, 0, sizeof(c));
- memset(s, 0, sizeof(*s));
-
- c.directive.opcode = nvme_admin_directive_recv;
- c.directive.nsid = cpu_to_le32(nsid);
- c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1);
- c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
- c.directive.dtype = NVME_DIR_STREAMS;
-
- return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s));
+ if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
+ ctrl->state != NVME_CTRL_DELETING &&
+ ctrl->state != NVME_CTRL_DEAD &&
+ !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
+ !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
+ return BLK_STS_RESOURCE;
+ return nvme_host_path_error(rq);
}
+EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
-static int nvme_configure_directives(struct nvme_ctrl *ctrl)
+bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
+ bool queue_live)
{
- struct streams_directive_params s;
- int ret;
-
- if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES))
- return 0;
- if (!streams)
- return 0;
-
- ret = nvme_enable_streams(ctrl);
- if (ret)
- return ret;
-
- ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL);
- if (ret)
- return ret;
-
- ctrl->nssa = le16_to_cpu(s.nssa);
- if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) {
- dev_info(ctrl->device, "too few streams (%u) available\n",
- ctrl->nssa);
- nvme_disable_streams(ctrl);
- return 0;
- }
+ struct nvme_request *req = nvme_req(rq);
- ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1);
- dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams);
- return 0;
-}
-
-/*
- * Check if 'req' has a write hint associated with it. If it does, assign
- * a valid namespace stream to the write.
- */
-static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
- struct request *req, u16 *control,
- u32 *dsmgmt)
-{
- enum rw_hint streamid = req->write_hint;
-
- if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE)
- streamid = 0;
- else {
- streamid--;
- if (WARN_ON_ONCE(streamid > ctrl->nr_streams))
- return;
+ /*
+ * currently we have a problem sending passthru commands
+ * on the admin_q if the controller is not LIVE because we can't
+ * make sure that they are going out after the admin connect,
+ * controller enable and/or other commands in the initialization
+ * sequence. until the controller will be LIVE, fail with
+ * BLK_STS_RESOURCE so that they will be rescheduled.
+ */
+ if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
+ return false;
- *control |= NVME_RW_DTYPE_STREAMS;
- *dsmgmt |= streamid << 16;
+ if (ctrl->ops->flags & NVME_F_FABRICS) {
+ /*
+ * Only allow commands on a live queue, except for the connect
+ * command, which is require to set the queue live in the
+ * appropinquate states.
+ */
+ switch (ctrl->state) {
+ case NVME_CTRL_CONNECTING:
+ if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
+ (req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
+ req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send ||
+ req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive))
+ return true;
+ break;
+ default:
+ break;
+ case NVME_CTRL_DEAD:
+ return false;
+ }
}
- if (streamid < ARRAY_SIZE(req->q->write_hints))
- req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
+ return queue_live;
}
+EXPORT_SYMBOL_GPL(__nvme_check_ready);
static inline void nvme_setup_flush(struct nvme_ns *ns,
struct nvme_command *cmnd)
{
+ memset(cmnd, 0, sizeof(*cmnd));
cmnd->common.opcode = nvme_cmd_flush;
cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
}
@@ -655,6 +798,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
return BLK_STS_IOERR;
}
+ memset(cmnd, 0, sizeof(*cmnd));
cmnd->dsm.opcode = nvme_cmd_dsm;
cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->dsm.nr = cpu_to_le32(segments - 1);
@@ -668,9 +812,35 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
return BLK_STS_OK;
}
+static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
+ struct request *req)
+{
+ u32 upper, lower;
+ u64 ref48;
+
+ /* both rw and write zeroes share the same reftag format */
+ switch (ns->guard_type) {
+ case NVME_NVM_NS_16B_GUARD:
+ cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
+ break;
+ case NVME_NVM_NS_64B_GUARD:
+ ref48 = ext_pi_ref_tag(req);
+ lower = lower_32_bits(ref48);
+ upper = upper_32_bits(ref48);
+
+ cmnd->rw.reftag = cpu_to_le32(lower);
+ cmnd->rw.cdw3 = cpu_to_le32(upper);
+ break;
+ default:
+ break;
+ }
+}
+
static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
struct request *req, struct nvme_command *cmnd)
{
+ memset(cmnd, 0, sizeof(*cmnd));
+
if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
return nvme_setup_discard(ns, req, cmnd);
@@ -680,14 +850,25 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->write_zeroes.length =
cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
- cmnd->write_zeroes.control = 0;
+
+ if (nvme_ns_has_pi(ns)) {
+ cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT);
+
+ switch (ns->pi_type) {
+ case NVME_NS_DPS_PI_TYPE1:
+ case NVME_NS_DPS_PI_TYPE2:
+ nvme_set_ref_tag(ns, cmnd, req);
+ break;
+ }
+ }
+
return BLK_STS_OK;
}
static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
- struct request *req, struct nvme_command *cmnd)
+ struct request *req, struct nvme_command *cmnd,
+ enum nvme_opcode op)
{
- struct nvme_ctrl *ctrl = ns->ctrl;
u16 control = 0;
u32 dsmgmt = 0;
@@ -699,13 +880,17 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
if (req->cmd_flags & REQ_RAHEAD)
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
- cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
+ cmnd->rw.opcode = op;
+ cmnd->rw.flags = 0;
cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
+ cmnd->rw.cdw2 = 0;
+ cmnd->rw.cdw3 = 0;
+ cmnd->rw.metadata = 0;
cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
-
- if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
- nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
+ cmnd->rw.reftag = 0;
+ cmnd->rw.apptag = 0;
+ cmnd->rw.appmask = 0;
if (ns->ms) {
/*
@@ -728,7 +913,9 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
case NVME_NS_DPS_PI_TYPE2:
control |= NVME_RW_PRINFO_PRCHK_GUARD |
NVME_RW_PRINFO_PRCHK_REF;
- cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
+ if (op == nvme_cmd_zone_append)
+ control |= NVME_RW_APPEND_PIREMAP;
+ nvme_set_ref_tag(ns, cmnd, req);
break;
}
}
@@ -741,33 +928,45 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
void nvme_cleanup_cmd(struct request *req)
{
if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
- struct nvme_ns *ns = req->rq_disk->private_data;
- struct page *page = req->special_vec.bv_page;
+ struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
- if (page == ns->ctrl->discard_page)
- clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
+ if (req->special_vec.bv_page == ctrl->discard_page)
+ clear_bit_unlock(0, &ctrl->discard_page_busy);
else
- kfree(page_address(page) + req->special_vec.bv_offset);
+ kfree(bvec_virt(&req->special_vec));
}
}
EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
-blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
- struct nvme_command *cmd)
+blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
{
+ struct nvme_command *cmd = nvme_req(req)->cmd;
blk_status_t ret = BLK_STS_OK;
- nvme_clear_nvme_request(req);
+ if (!(req->rq_flags & RQF_DONTPREP))
+ nvme_clear_nvme_request(req);
- memset(cmd, 0, sizeof(*cmd));
switch (req_op(req)) {
case REQ_OP_DRV_IN:
case REQ_OP_DRV_OUT:
- memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
+ /* these are setup prior to execution in nvme_init_request() */
break;
case REQ_OP_FLUSH:
nvme_setup_flush(ns, cmd);
break;
+ case REQ_OP_ZONE_RESET_ALL:
+ case REQ_OP_ZONE_RESET:
+ ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET);
+ break;
+ case REQ_OP_ZONE_OPEN:
+ ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN);
+ break;
+ case REQ_OP_ZONE_CLOSE:
+ ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE);
+ break;
+ case REQ_OP_ZONE_FINISH:
+ ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH);
+ break;
case REQ_OP_WRITE_ZEROES:
ret = nvme_setup_write_zeroes(ns, req, cmd);
break;
@@ -775,43 +974,41 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
ret = nvme_setup_discard(ns, req, cmd);
break;
case REQ_OP_READ:
+ ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read);
+ break;
case REQ_OP_WRITE:
- ret = nvme_setup_rw(ns, req, cmd);
+ ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write);
+ break;
+ case REQ_OP_ZONE_APPEND:
+ ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append);
break;
default:
WARN_ON_ONCE(1);
return BLK_STS_IOERR;
}
- cmd->common.command_id = req->tag;
+ cmd->common.command_id = nvme_cid(req);
trace_nvme_setup_cmd(req, cmd);
return ret;
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);
-static void nvme_end_sync_rq(struct request *rq, blk_status_t error)
-{
- struct completion *waiting = rq->end_io_data;
-
- rq->end_io_data = NULL;
- complete(waiting);
-}
-
-static void nvme_execute_rq_polled(struct request_queue *q,
- struct gendisk *bd_disk, struct request *rq, int at_head)
+/*
+ * Return values:
+ * 0: success
+ * >0: nvme controller's cqe status response
+ * <0: kernel error in lieu of controller response
+ */
+static int nvme_execute_rq(struct request *rq, bool at_head)
{
- DECLARE_COMPLETION_ONSTACK(wait);
+ blk_status_t status;
- WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags));
-
- rq->cmd_flags |= REQ_HIPRI;
- rq->end_io_data = &wait;
- blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq);
-
- while (!completion_done(&wait)) {
- blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true);
- cond_resched();
- }
+ status = blk_execute_rq(rq, at_head);
+ if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
+ return -EINTR;
+ if (nvme_req(rq)->status)
+ return nvme_req(rq)->status;
+ return blk_status_to_errno(status);
}
/*
@@ -820,17 +1017,20 @@ static void nvme_execute_rq_polled(struct request_queue *q,
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- unsigned timeout, int qid, int at_head,
- blk_mq_req_flags_t flags, bool poll)
+ int qid, int at_head, blk_mq_req_flags_t flags)
{
struct request *req;
int ret;
- req = nvme_alloc_request(q, cmd, flags, qid);
+ if (qid == NVME_QID_ANY)
+ req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
+ else
+ req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
+ qid - 1);
+
if (IS_ERR(req))
return PTR_ERR(req);
-
- req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
+ nvme_init_request(req, cmd);
if (buffer && bufflen) {
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
@@ -838,16 +1038,9 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out;
}
- if (poll)
- nvme_execute_rq_polled(req->q, NULL, req, at_head);
- else
- blk_execute_rq(req->q, NULL, req, at_head);
- if (result)
+ ret = nvme_execute_rq(req, at_head);
+ if (result && ret >= 0)
*result = nvme_req(req)->result;
- if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
- ret = -EINTR;
- else
- ret = nvme_req(req)->status;
out:
blk_mq_free_request(req);
return ret;
@@ -857,104 +1050,130 @@ EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
- return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
- NVME_QID_ANY, 0, 0, false);
+ return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
+ NVME_QID_ANY, 0, 0);
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
-static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
- unsigned len, u32 seed, bool write)
+static u32 nvme_known_admin_effects(u8 opcode)
{
- struct bio_integrity_payload *bip;
- int ret = -ENOMEM;
- void *buf;
+ switch (opcode) {
+ case nvme_admin_format_nvm:
+ return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_NCC |
+ NVME_CMD_EFFECTS_CSE_MASK;
+ case nvme_admin_sanitize_nvm:
+ return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK;
+ default:
+ break;
+ }
+ return 0;
+}
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
- goto out;
+u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
+{
+ u32 effects = 0;
- ret = -EFAULT;
- if (write && copy_from_user(buf, ubuf, len))
- goto out_free_meta;
-
- bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
- if (IS_ERR(bip)) {
- ret = PTR_ERR(bip);
- goto out_free_meta;
- }
-
- bip->bip_iter.bi_size = len;
- bip->bip_iter.bi_sector = seed;
- ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
- offset_in_page(buf));
- if (ret == len)
- return buf;
- ret = -ENOMEM;
-out_free_meta:
- kfree(buf);
-out:
- return ERR_PTR(ret);
+ if (ns) {
+ if (ns->head->effects)
+ effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
+ if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
+ dev_warn_once(ctrl->device,
+ "IO command:%02x has unhandled effects:%08x\n",
+ opcode, effects);
+ return 0;
+ }
+
+ if (ctrl->effects)
+ effects = le32_to_cpu(ctrl->effects->acs[opcode]);
+ effects |= nvme_known_admin_effects(opcode);
+
+ return effects;
}
+EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU);
-static int nvme_submit_user_cmd(struct request_queue *q,
- struct nvme_command *cmd, void __user *ubuffer,
- unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
- u32 meta_seed, u64 *result, unsigned timeout)
+static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ u8 opcode)
{
- bool write = nvme_is_write(cmd);
- struct nvme_ns *ns = q->queuedata;
- struct gendisk *disk = ns ? ns->disk : NULL;
- struct request *req;
- struct bio *bio = NULL;
- void *meta = NULL;
- int ret;
+ u32 effects = nvme_command_effects(ctrl, ns, opcode);
- req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ /*
+ * For simplicity, IO to all namespaces is quiesced even if the command
+ * effects say only one namespace is affected.
+ */
+ if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
+ mutex_lock(&ctrl->scan_lock);
+ mutex_lock(&ctrl->subsys->lock);
+ nvme_mpath_start_freeze(ctrl->subsys);
+ nvme_mpath_wait_freeze(ctrl->subsys);
+ nvme_start_freeze(ctrl);
+ nvme_wait_freeze(ctrl);
+ }
+ return effects;
+}
- req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
- nvme_req(req)->flags |= NVME_REQ_USERCMD;
+void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
+ struct nvme_command *cmd, int status)
+{
+ if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
+ nvme_unfreeze(ctrl);
+ nvme_mpath_unfreeze(ctrl->subsys);
+ mutex_unlock(&ctrl->subsys->lock);
+ nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
+ mutex_unlock(&ctrl->scan_lock);
+ }
+ if (effects & NVME_CMD_EFFECTS_CCC)
+ nvme_init_ctrl_finish(ctrl);
+ if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
+ nvme_queue_scan(ctrl);
+ flush_work(&ctrl->scan_work);
+ }
- if (ubuffer && bufflen) {
- ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
- GFP_KERNEL);
- if (ret)
- goto out;
- bio = req->bio;
- bio->bi_disk = disk;
- if (disk && meta_buffer && meta_len) {
- meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
- meta_seed, write);
- if (IS_ERR(meta)) {
- ret = PTR_ERR(meta);
- goto out_unmap;
- }
- req->cmd_flags |= REQ_INTEGRITY;
+ switch (cmd->common.opcode) {
+ case nvme_admin_set_features:
+ switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) {
+ case NVME_FEAT_KATO:
+ /*
+ * Keep alive commands interval on the host should be
+ * updated when KATO is modified by Set Features
+ * commands.
+ */
+ if (!status)
+ nvme_update_keep_alive(ctrl, cmd);
+ break;
+ default:
+ break;
}
+ break;
+ default:
+ break;
}
+}
+EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);
- blk_execute_rq(req->q, disk, req, 0);
- if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
- ret = -EINTR;
- else
- ret = nvme_req(req)->status;
- if (result)
- *result = le64_to_cpu(nvme_req(req)->result.u64);
- if (meta && !ret && !write) {
- if (copy_to_user(meta_buffer, meta, meta_len))
- ret = -EFAULT;
- }
- kfree(meta);
- out_unmap:
- if (bio)
- blk_rq_unmap_user(bio);
- out:
- blk_mq_free_request(req);
- return ret;
+int nvme_execute_passthru_rq(struct request *rq, u32 *effects)
+{
+ struct nvme_command *cmd = nvme_req(rq)->cmd;
+ struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
+ struct nvme_ns *ns = rq->q->queuedata;
+
+ *effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
+ return nvme_execute_rq(rq, false);
+}
+EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
+
+/*
+ * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1:
+ *
+ * The host should send Keep Alive commands at half of the Keep Alive Timeout
+ * accounting for transport roundtrip times [..].
+ */
+static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
+{
+ queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2);
}
-static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
+static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
+ blk_status_t status)
{
struct nvme_ctrl *ctrl = rq->end_io_data;
unsigned long flags;
@@ -966,7 +1185,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
dev_err(ctrl->device,
"failed nvme_keep_alive_end_io error=%d\n",
status);
- return;
+ return RQ_END_IO_NONE;
}
ctrl->comp_seen = false;
@@ -976,24 +1195,8 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
startka = true;
spin_unlock_irqrestore(&ctrl->lock, flags);
if (startka)
- queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
-}
-
-static int nvme_keep_alive(struct nvme_ctrl *ctrl)
-{
- struct request *rq;
-
- rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
- NVME_QID_ANY);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- rq->timeout = ctrl->kato * HZ;
- rq->end_io_data = ctrl;
-
- blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);
-
- return 0;
+ nvme_queue_keep_alive_work(ctrl);
+ return RQ_END_IO_NONE;
}
static void nvme_keep_alive_work(struct work_struct *work)
@@ -1001,21 +1204,30 @@ static void nvme_keep_alive_work(struct work_struct *work)
struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
struct nvme_ctrl, ka_work);
bool comp_seen = ctrl->comp_seen;
+ struct request *rq;
if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
dev_dbg(ctrl->device,
"reschedule traffic based keep-alive timer\n");
ctrl->comp_seen = false;
- queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ nvme_queue_keep_alive_work(ctrl);
return;
}
- if (nvme_keep_alive(ctrl)) {
+ rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd),
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ if (IS_ERR(rq)) {
/* allocation failure, reset the controller */
- dev_err(ctrl->device, "keep-alive failed\n");
+ dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
nvme_reset_ctrl(ctrl);
return;
}
+ nvme_init_request(rq, &ctrl->ka_cmd);
+
+ rq->timeout = ctrl->kato * HZ;
+ rq->end_io = nvme_keep_alive_end_io;
+ rq->end_io_data = ctrl;
+ blk_execute_rq_nowait(rq, false);
}
static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
@@ -1023,7 +1235,7 @@ static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
if (unlikely(ctrl->kato == 0))
return;
- queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ nvme_queue_keep_alive_work(ctrl);
}
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
@@ -1035,6 +1247,34 @@ void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
+static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
+ struct nvme_command *cmd)
+{
+ unsigned int new_kato =
+ DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000);
+
+ dev_info(ctrl->device,
+ "keep alive interval updated from %u ms to %u ms\n",
+ ctrl->kato * 1000 / 2, new_kato * 1000 / 2);
+
+ nvme_stop_keep_alive(ctrl);
+ ctrl->kato = new_kato;
+ nvme_start_keep_alive(ctrl);
+}
+
+/*
+ * In NVMe 1.0 the CNS field was just a binary controller or namespace
+ * flag, thus sending any new CNS opcodes has a big chance of not working.
+ * Qemu unfortunately had that bug after reporting a 1.1 version compliance
+ * (but not for any later version).
+ */
+static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)
+ return ctrl->vs < NVME_VS(1, 2, 0);
+ return ctrl->vs < NVME_VS(1, 1, 0);
+}
+
static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
{
struct nvme_command c = { };
@@ -1055,17 +1295,73 @@ static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
return error;
}
-static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
- struct nvme_ns_ids *ids)
+static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
+ struct nvme_ns_id_desc *cur, bool *csi_seen)
+{
+ const char *warn_str = "ctrl returned bogus length:";
+ void *data = cur;
+
+ switch (cur->nidt) {
+ case NVME_NIDT_EUI64:
+ if (cur->nidl != NVME_NIDT_EUI64_LEN) {
+ dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
+ warn_str, cur->nidl);
+ return -1;
+ }
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+ return NVME_NIDT_EUI64_LEN;
+ memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
+ return NVME_NIDT_EUI64_LEN;
+ case NVME_NIDT_NGUID:
+ if (cur->nidl != NVME_NIDT_NGUID_LEN) {
+ dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
+ warn_str, cur->nidl);
+ return -1;
+ }
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+ return NVME_NIDT_NGUID_LEN;
+ memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
+ return NVME_NIDT_NGUID_LEN;
+ case NVME_NIDT_UUID:
+ if (cur->nidl != NVME_NIDT_UUID_LEN) {
+ dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
+ warn_str, cur->nidl);
+ return -1;
+ }
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+ return NVME_NIDT_UUID_LEN;
+ uuid_copy(&ids->uuid, data + sizeof(*cur));
+ return NVME_NIDT_UUID_LEN;
+ case NVME_NIDT_CSI:
+ if (cur->nidl != NVME_NIDT_CSI_LEN) {
+ dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n",
+ warn_str, cur->nidl);
+ return -1;
+ }
+ memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN);
+ *csi_seen = true;
+ return NVME_NIDT_CSI_LEN;
+ default:
+ /* Skip unknown types */
+ return cur->nidl;
+ }
+}
+
+static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info)
{
struct nvme_command c = { };
- int status;
+ bool csi_seen = false;
+ int status, pos, len;
void *data;
- int pos;
- int len;
+
+ if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl))
+ return 0;
+ if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
+ return 0;
c.identify.opcode = nvme_admin_identify;
- c.identify.nsid = cpu_to_le32(nsid);
+ c.identify.nsid = cpu_to_le32(info->nsid);
c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
@@ -1074,8 +1370,12 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
NVME_IDENTIFY_DATA_SIZE);
- if (status)
+ if (status) {
+ dev_warn(ctrl->device,
+ "Identify Descriptors failed (nsid=%u, status=0x%x)\n",
+ info->nsid, status);
goto free_data;
+ }
for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
struct nvme_ns_id_desc *cur = data + pos;
@@ -1083,63 +1383,26 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
if (cur->nidl == 0)
break;
- switch (cur->nidt) {
- case NVME_NIDT_EUI64:
- if (cur->nidl != NVME_NIDT_EUI64_LEN) {
- dev_warn(ctrl->device,
- "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n",
- cur->nidl);
- goto free_data;
- }
- len = NVME_NIDT_EUI64_LEN;
- memcpy(ids->eui64, data + pos + sizeof(*cur), len);
+ len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen);
+ if (len < 0)
break;
- case NVME_NIDT_NGUID:
- if (cur->nidl != NVME_NIDT_NGUID_LEN) {
- dev_warn(ctrl->device,
- "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n",
- cur->nidl);
- goto free_data;
- }
- len = NVME_NIDT_NGUID_LEN;
- memcpy(ids->nguid, data + pos + sizeof(*cur), len);
- break;
- case NVME_NIDT_UUID:
- if (cur->nidl != NVME_NIDT_UUID_LEN) {
- dev_warn(ctrl->device,
- "ctrl returned bogus length: %d for NVME_NIDT_UUID\n",
- cur->nidl);
- goto free_data;
- }
- len = NVME_NIDT_UUID_LEN;
- uuid_copy(&ids->uuid, data + pos + sizeof(*cur));
- break;
- default:
- /* Skip unknown types */
- len = cur->nidl;
- break;
- }
len += sizeof(*cur);
}
+
+ if (nvme_multi_css(ctrl) && !csi_seen) {
+ dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
+ info->nsid);
+ status = -EINVAL;
+ }
+
free_data:
kfree(data);
return status;
}
-static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
-{
- struct nvme_command c = { };
-
- c.identify.opcode = nvme_admin_identify;
- c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST;
- c.identify.nsid = cpu_to_le32(nsid);
- return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list,
- NVME_IDENTIFY_DATA_SIZE);
-}
-
-static int nvme_identify_ns(struct nvme_ctrl *ctrl,
- unsigned nsid, struct nvme_id_ns **id)
+static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
+ struct nvme_id_ns **id)
{
struct nvme_command c = { };
int error;
@@ -1156,26 +1419,87 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl,
error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
if (error) {
dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
- kfree(*id);
+ goto out_free_id;
}
+ error = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ if ((*id)->ncap == 0) /* namespace not allocated or attached */
+ goto out_free_id;
+ return 0;
+
+out_free_id:
+ kfree(*id);
return error;
}
+static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info)
+{
+ struct nvme_ns_ids *ids = &info->ids;
+ struct nvme_id_ns *id;
+ int ret;
+
+ ret = nvme_identify_ns(ctrl, info->nsid, &id);
+ if (ret)
+ return ret;
+ info->anagrpid = id->anagrpid;
+ info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
+ info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
+ info->is_ready = true;
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
+ dev_info(ctrl->device,
+ "Ignoring bogus Namespace Identifiers\n");
+ } else {
+ if (ctrl->vs >= NVME_VS(1, 1, 0) &&
+ !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
+ memcpy(ids->eui64, id->eui64, sizeof(ids->eui64));
+ if (ctrl->vs >= NVME_VS(1, 2, 0) &&
+ !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
+ memcpy(ids->nguid, id->nguid, sizeof(ids->nguid));
+ }
+ kfree(id);
+ return 0;
+}
+
+static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info)
+{
+ struct nvme_id_ns_cs_indep *id;
+ struct nvme_command c = {
+ .identify.opcode = nvme_admin_identify,
+ .identify.nsid = cpu_to_le32(info->nsid),
+ .identify.cns = NVME_ID_CNS_NS_CS_INDEP,
+ };
+ int ret;
+
+ id = kmalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return -ENOMEM;
+
+ ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
+ if (!ret) {
+ info->anagrpid = id->anagrpid;
+ info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
+ info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
+ info->is_ready = id->nstat & NVME_NSTAT_NRDY;
+ }
+ kfree(id);
+ return ret;
+}
+
static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen, u32 *result)
{
union nvme_result res = { 0 };
- struct nvme_command c;
+ struct nvme_command c = { };
int ret;
- memset(&c, 0, sizeof(c));
c.features.opcode = op;
c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
- buffer, buflen, 0, NVME_QID_ANY, 0, 0, false);
+ buffer, buflen, NVME_QID_ANY, 0, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(res.u32);
return ret;
@@ -1248,352 +1572,13 @@ static void nvme_enable_aen(struct nvme_ctrl *ctrl)
queue_work(nvme_wq, &ctrl->async_event_work);
}
-static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
-{
- struct nvme_user_io io;
- struct nvme_command c;
- unsigned length, meta_len;
- void __user *metadata;
-
- if (copy_from_user(&io, uio, sizeof(io)))
- return -EFAULT;
- if (io.flags)
- return -EINVAL;
-
- switch (io.opcode) {
- case nvme_cmd_write:
- case nvme_cmd_read:
- case nvme_cmd_compare:
- break;
- default:
- return -EINVAL;
- }
-
- length = (io.nblocks + 1) << ns->lba_shift;
- meta_len = (io.nblocks + 1) * ns->ms;
- metadata = (void __user *)(uintptr_t)io.metadata;
-
- if (ns->ext) {
- length += meta_len;
- meta_len = 0;
- } else if (meta_len) {
- if ((io.metadata & 3) || !io.metadata)
- return -EINVAL;
- }
-
- memset(&c, 0, sizeof(c));
- c.rw.opcode = io.opcode;
- c.rw.flags = io.flags;
- c.rw.nsid = cpu_to_le32(ns->head->ns_id);
- c.rw.slba = cpu_to_le64(io.slba);
- c.rw.length = cpu_to_le16(io.nblocks);
- c.rw.control = cpu_to_le16(io.control);
- c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
- c.rw.reftag = cpu_to_le32(io.reftag);
- c.rw.apptag = cpu_to_le16(io.apptag);
- c.rw.appmask = cpu_to_le16(io.appmask);
-
- return nvme_submit_user_cmd(ns->queue, &c,
- (void __user *)(uintptr_t)io.addr, length,
- metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
-}
-
-static u32 nvme_known_admin_effects(u8 opcode)
-{
- switch (opcode) {
- case nvme_admin_format_nvm:
- return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
- NVME_CMD_EFFECTS_CSE_MASK;
- case nvme_admin_sanitize_nvm:
- return NVME_CMD_EFFECTS_CSE_MASK;
- default:
- break;
- }
- return 0;
-}
-
-static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
- u8 opcode)
+static int nvme_ns_open(struct nvme_ns *ns)
{
- u32 effects = 0;
-
- if (ns) {
- if (ctrl->effects)
- effects = le32_to_cpu(ctrl->effects->iocs[opcode]);
- if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
- dev_warn(ctrl->device,
- "IO command:%02x has unhandled effects:%08x\n",
- opcode, effects);
- return 0;
- }
- if (ctrl->effects)
- effects = le32_to_cpu(ctrl->effects->acs[opcode]);
- effects |= nvme_known_admin_effects(opcode);
-
- /*
- * For simplicity, IO to all namespaces is quiesced even if the command
- * effects say only one namespace is affected.
- */
- if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
- mutex_lock(&ctrl->scan_lock);
- mutex_lock(&ctrl->subsys->lock);
- nvme_mpath_start_freeze(ctrl->subsys);
- nvme_mpath_wait_freeze(ctrl->subsys);
- nvme_start_freeze(ctrl);
- nvme_wait_freeze(ctrl);
- }
- return effects;
-}
-
-static void nvme_update_formats(struct nvme_ctrl *ctrl)
-{
- struct nvme_ns *ns;
-
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
- if (ns->disk && nvme_revalidate_disk(ns->disk))
- nvme_set_queue_dying(ns);
- up_read(&ctrl->namespaces_rwsem);
-}
-
-static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
-{
- /*
- * Revalidate LBA changes prior to unfreezing. This is necessary to
- * prevent memory corruption if a logical block size was changed by
- * this command.
- */
- if (effects & NVME_CMD_EFFECTS_LBCC)
- nvme_update_formats(ctrl);
- if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
- nvme_unfreeze(ctrl);
- nvme_mpath_unfreeze(ctrl->subsys);
- mutex_unlock(&ctrl->subsys->lock);
- nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
- mutex_unlock(&ctrl->scan_lock);
- }
- if (effects & NVME_CMD_EFFECTS_CCC)
- nvme_init_identify(ctrl);
- if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
- nvme_queue_scan(ctrl);
-}
-
-static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
- struct nvme_passthru_cmd __user *ucmd)
-{
- struct nvme_passthru_cmd cmd;
- struct nvme_command c;
- unsigned timeout = 0;
- u32 effects;
- u64 result;
- int status;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
- return -EFAULT;
- if (cmd.flags)
- return -EINVAL;
-
- memset(&c, 0, sizeof(c));
- c.common.opcode = cmd.opcode;
- c.common.flags = cmd.flags;
- c.common.nsid = cpu_to_le32(cmd.nsid);
- c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
- c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
- c.common.cdw10 = cpu_to_le32(cmd.cdw10);
- c.common.cdw11 = cpu_to_le32(cmd.cdw11);
- c.common.cdw12 = cpu_to_le32(cmd.cdw12);
- c.common.cdw13 = cpu_to_le32(cmd.cdw13);
- c.common.cdw14 = cpu_to_le32(cmd.cdw14);
- c.common.cdw15 = cpu_to_le32(cmd.cdw15);
-
- if (cmd.timeout_ms)
- timeout = msecs_to_jiffies(cmd.timeout_ms);
-
- effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
- status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
- (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
- (void __user *)(uintptr_t)cmd.metadata,
- cmd.metadata_len, 0, &result, timeout);
- nvme_passthru_end(ctrl, effects);
-
- if (status >= 0) {
- if (put_user(result, &ucmd->result))
- return -EFAULT;
- }
-
- return status;
-}
-
-static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
- struct nvme_passthru_cmd64 __user *ucmd)
-{
- struct nvme_passthru_cmd64 cmd;
- struct nvme_command c;
- unsigned timeout = 0;
- u32 effects;
- int status;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
- return -EFAULT;
- if (cmd.flags)
- return -EINVAL;
-
- memset(&c, 0, sizeof(c));
- c.common.opcode = cmd.opcode;
- c.common.flags = cmd.flags;
- c.common.nsid = cpu_to_le32(cmd.nsid);
- c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
- c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
- c.common.cdw10 = cpu_to_le32(cmd.cdw10);
- c.common.cdw11 = cpu_to_le32(cmd.cdw11);
- c.common.cdw12 = cpu_to_le32(cmd.cdw12);
- c.common.cdw13 = cpu_to_le32(cmd.cdw13);
- c.common.cdw14 = cpu_to_le32(cmd.cdw14);
- c.common.cdw15 = cpu_to_le32(cmd.cdw15);
-
- if (cmd.timeout_ms)
- timeout = msecs_to_jiffies(cmd.timeout_ms);
-
- effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
- status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
- (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
- (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len,
- 0, &cmd.result, timeout);
- nvme_passthru_end(ctrl, effects);
-
- if (status >= 0) {
- if (put_user(cmd.result, &ucmd->result))
- return -EFAULT;
- }
-
- return status;
-}
-
-/*
- * Issue ioctl requests on the first available path. Note that unlike normal
- * block layer requests we will not retry failed request on another controller.
- */
-static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
- struct nvme_ns_head **head, int *srcu_idx)
-{
-#ifdef CONFIG_NVME_MULTIPATH
- if (disk->fops == &nvme_ns_head_ops) {
- struct nvme_ns *ns;
-
- *head = disk->private_data;
- *srcu_idx = srcu_read_lock(&(*head)->srcu);
- ns = nvme_find_path(*head);
- if (!ns)
- srcu_read_unlock(&(*head)->srcu, *srcu_idx);
- return ns;
- }
-#endif
- *head = NULL;
- *srcu_idx = -1;
- return disk->private_data;
-}
-
-static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
-{
- if (head)
- srcu_read_unlock(&head->srcu, idx);
-}
-
-static bool is_ctrl_ioctl(unsigned int cmd)
-{
- if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
- return true;
- if (is_sed_ioctl(cmd))
- return true;
- return false;
-}
-
-static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
- void __user *argp,
- struct nvme_ns_head *head,
- int srcu_idx)
-{
- struct nvme_ctrl *ctrl = ns->ctrl;
- int ret;
-
- nvme_get_ctrl(ns->ctrl);
- nvme_put_ns_from_disk(head, srcu_idx);
-
- switch (cmd) {
- case NVME_IOCTL_ADMIN_CMD:
- ret = nvme_user_cmd(ctrl, NULL, argp);
- break;
- case NVME_IOCTL_ADMIN64_CMD:
- ret = nvme_user_cmd64(ctrl, NULL, argp);
- break;
- default:
- ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
- break;
- }
- nvme_put_ctrl(ctrl);
- return ret;
-}
-
-static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct nvme_ns_head *head = NULL;
- void __user *argp = (void __user *)arg;
- struct nvme_ns *ns;
- int srcu_idx, ret;
-
- ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
- if (unlikely(!ns))
- return -EWOULDBLOCK;
-
- /*
- * Handle ioctls that apply to the controller instead of the namespace
- * seperately and drop the ns SRCU reference early. This avoids a
- * deadlock when deleting namespaces using the passthrough interface.
- */
- if (is_ctrl_ioctl(cmd))
- return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
-
- switch (cmd) {
- case NVME_IOCTL_ID:
- force_successful_syscall_return();
- ret = ns->head->ns_id;
- break;
- case NVME_IOCTL_IO_CMD:
- ret = nvme_user_cmd(ns->ctrl, ns, argp);
- break;
- case NVME_IOCTL_SUBMIT_IO:
- ret = nvme_submit_io(ns, argp);
- break;
- case NVME_IOCTL_IO64_CMD:
- ret = nvme_user_cmd64(ns->ctrl, ns, argp);
- break;
- default:
- if (ns->ndev)
- ret = nvme_nvm_ioctl(ns, cmd, arg);
- else
- ret = -ENOTTY;
- }
-
- nvme_put_ns_from_disk(head, srcu_idx);
- return ret;
-}
-
-static int nvme_open(struct block_device *bdev, fmode_t mode)
-{
- struct nvme_ns *ns = bdev->bd_disk->private_data;
-
-#ifdef CONFIG_NVME_MULTIPATH
/* should never be called due to GENHD_FL_HIDDEN */
- if (WARN_ON_ONCE(ns->head->disk))
+ if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head)))
goto fail;
-#endif
- if (!kref_get_unless_zero(&ns->kref))
+ if (!nvme_get_ns(ns))
goto fail;
if (!try_module_get(ns->ctrl->ops->module))
goto fail_put_ns;
@@ -1606,15 +1591,24 @@ fail:
return -ENXIO;
}
-static void nvme_release(struct gendisk *disk, fmode_t mode)
+static void nvme_ns_release(struct nvme_ns *ns)
{
- struct nvme_ns *ns = disk->private_data;
module_put(ns->ctrl->ops->module);
nvme_put_ns(ns);
}
-static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int nvme_open(struct block_device *bdev, fmode_t mode)
+{
+ return nvme_ns_open(bdev->bd_disk->private_data);
+}
+
+static void nvme_release(struct gendisk *disk, fmode_t mode)
+{
+ nvme_ns_release(disk->private_data);
+}
+
+int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
/* some standard values */
geo->heads = 1 << 6;
@@ -1624,137 +1618,224 @@ static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
-static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
+static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
+ u32 max_integrity_segments)
{
- struct blk_integrity integrity;
+ struct blk_integrity integrity = { };
- memset(&integrity, 0, sizeof(integrity));
- switch (pi_type) {
+ switch (ns->pi_type) {
case NVME_NS_DPS_PI_TYPE3:
- integrity.profile = &t10_pi_type3_crc;
- integrity.tag_size = sizeof(u16) + sizeof(u32);
- integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+ switch (ns->guard_type) {
+ case NVME_NVM_NS_16B_GUARD:
+ integrity.profile = &t10_pi_type3_crc;
+ integrity.tag_size = sizeof(u16) + sizeof(u32);
+ integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+ break;
+ case NVME_NVM_NS_64B_GUARD:
+ integrity.profile = &ext_pi_type3_crc64;
+ integrity.tag_size = sizeof(u16) + 6;
+ integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+ break;
+ default:
+ integrity.profile = NULL;
+ break;
+ }
break;
case NVME_NS_DPS_PI_TYPE1:
case NVME_NS_DPS_PI_TYPE2:
- integrity.profile = &t10_pi_type1_crc;
- integrity.tag_size = sizeof(u16);
- integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+ switch (ns->guard_type) {
+ case NVME_NVM_NS_16B_GUARD:
+ integrity.profile = &t10_pi_type1_crc;
+ integrity.tag_size = sizeof(u16);
+ integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+ break;
+ case NVME_NVM_NS_64B_GUARD:
+ integrity.profile = &ext_pi_type1_crc64;
+ integrity.tag_size = sizeof(u16);
+ integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+ break;
+ default:
+ integrity.profile = NULL;
+ break;
+ }
break;
default:
integrity.profile = NULL;
break;
}
- integrity.tuple_size = ms;
+
+ integrity.tuple_size = ns->ms;
blk_integrity_register(disk, &integrity);
- blk_queue_max_integrity_segments(disk->queue, 1);
+ blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
}
#else
-static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
+static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
+ u32 max_integrity_segments)
{
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
-static void nvme_set_chunk_size(struct nvme_ns *ns)
-{
- u32 chunk_size = nvme_lba_to_sect(ns, ns->noiob);
- blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
-}
-
static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
{
struct nvme_ctrl *ctrl = ns->ctrl;
struct request_queue *queue = disk->queue;
u32 size = queue_logical_block_size(queue);
- if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) {
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue);
+ if (ctrl->max_discard_sectors == 0) {
+ blk_queue_max_discard_sectors(queue, 0);
return;
}
- if (ctrl->nr_streams && ns->sws && ns->sgs)
- size *= ns->sws * ns->sgs;
-
BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
NVME_DSM_MAX_RANGES);
- queue->limits.discard_alignment = 0;
queue->limits.discard_granularity = size;
/* If discard is already enabled, don't reset queue limits */
- if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue))
+ if (queue->limits.max_discard_sectors)
return;
- blk_queue_max_discard_sectors(queue, UINT_MAX);
- blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
+ if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
+ ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
+
+ blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
+ blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
}
-static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
+static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
{
- u64 max_blocks;
-
- if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) ||
- (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
- return;
- /*
- * Even though NVMe spec explicitly states that MDTS is not
- * applicable to the write-zeroes:- "The restriction does not apply to
- * commands that do not transfer data between the host and the
- * controller (e.g., Write Uncorrectable ro Write Zeroes command).".
- * In order to be more cautious use controller's max_hw_sectors value
- * to configure the maximum sectors for the write-zeroes which is
- * configured based on the controller's MDTS field in the
- * nvme_init_identify() if available.
- */
- if (ns->ctrl->max_hw_sectors == UINT_MAX)
- max_blocks = (u64)USHRT_MAX + 1;
- else
- max_blocks = ns->ctrl->max_hw_sectors + 1;
-
- blk_queue_max_write_zeroes_sectors(disk->queue,
- nvme_lba_to_sect(ns, max_blocks));
+ return uuid_equal(&a->uuid, &b->uuid) &&
+ memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
+ memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 &&
+ a->csi == b->csi;
}
-static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
- struct nvme_id_ns *id, struct nvme_ns_ids *ids)
+static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id)
{
+ bool first = id->dps & NVME_NS_DPS_PI_FIRST;
+ unsigned lbaf = nvme_lbaf_index(id->flbas);
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ struct nvme_command c = { };
+ struct nvme_id_ns_nvm *nvm;
int ret = 0;
+ u32 elbaf;
+
+ ns->pi_size = 0;
+ ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
+ if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) {
+ ns->pi_size = sizeof(struct t10_pi_tuple);
+ ns->guard_type = NVME_NVM_NS_16B_GUARD;
+ goto set_pi;
+ }
- memset(ids, 0, sizeof(*ids));
+ nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
+ if (!nvm)
+ return -ENOMEM;
- if (ctrl->vs >= NVME_VS(1, 1, 0))
- memcpy(ids->eui64, id->eui64, sizeof(id->eui64));
- if (ctrl->vs >= NVME_VS(1, 2, 0))
- memcpy(ids->nguid, id->nguid, sizeof(id->nguid));
- if (ctrl->vs >= NVME_VS(1, 3, 0)) {
- /* Don't treat error as fatal we potentially
- * already have a NGUID or EUI-64
- */
- ret = nvme_identify_ns_descs(ctrl, nsid, ids);
- if (ret)
- dev_warn(ctrl->device,
- "Identify Descriptors failed (%d)\n", ret);
- if (ret > 0)
- ret = 0;
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.nsid = cpu_to_le32(ns->head->ns_id);
+ c.identify.cns = NVME_ID_CNS_CS_NS;
+ c.identify.csi = NVME_CSI_NVM;
+
+ ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, nvm, sizeof(*nvm));
+ if (ret)
+ goto free_data;
+
+ elbaf = le32_to_cpu(nvm->elbaf[lbaf]);
+
+ /* no support for storage tag formats right now */
+ if (nvme_elbaf_sts(elbaf))
+ goto free_data;
+
+ ns->guard_type = nvme_elbaf_guard_type(elbaf);
+ switch (ns->guard_type) {
+ case NVME_NVM_NS_64B_GUARD:
+ ns->pi_size = sizeof(struct crc64_pi_tuple);
+ break;
+ case NVME_NVM_NS_16B_GUARD:
+ ns->pi_size = sizeof(struct t10_pi_tuple);
+ break;
+ default:
+ break;
}
+
+free_data:
+ kfree(nvm);
+set_pi:
+ if (ns->pi_size && (first || ns->ms == ns->pi_size))
+ ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
+ else
+ ns->pi_type = 0;
+
return ret;
}
-static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
+static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
{
- return !uuid_is_null(&ids->uuid) ||
- memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) ||
- memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
+ struct nvme_ctrl *ctrl = ns->ctrl;
+
+ if (nvme_init_ms(ns, id))
+ return;
+
+ ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
+ if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
+ return;
+
+ if (ctrl->ops->flags & NVME_F_FABRICS) {
+ /*
+ * The NVMe over Fabrics specification only supports metadata as
+ * part of the extended data LBA. We rely on HCA/HBA support to
+ * remap the separate metadata buffer from the block layer.
+ */
+ if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
+ return;
+
+ ns->features |= NVME_NS_EXT_LBAS;
+
+ /*
+ * The current fabrics transport drivers support namespace
+ * metadata formats only if nvme_ns_has_pi() returns true.
+ * Suppress support for all other formats so the namespace will
+ * have a 0 capacity and not be usable through the block stack.
+ *
+ * Note, this check will need to be modified if any drivers
+ * gain the ability to use other metadata formats.
+ */
+ if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns))
+ ns->features |= NVME_NS_METADATA_SUPPORTED;
+ } else {
+ /*
+ * For PCIe controllers, we can't easily remap the separate
+ * metadata buffer from the block layer and thus require a
+ * separate metadata buffer for block layer metadata/PI support.
+ * We allow extended LBAs for the passthrough interface, though.
+ */
+ if (id->flbas & NVME_NS_FLBAS_META_EXT)
+ ns->features |= NVME_NS_EXT_LBAS;
+ else
+ ns->features |= NVME_NS_METADATA_SUPPORTED;
+ }
}
-static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
+static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
+ struct request_queue *q)
{
- return uuid_equal(&a->uuid, &b->uuid) &&
- memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
- memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0;
+ bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT;
+
+ if (ctrl->max_hw_sectors) {
+ u32 max_segments =
+ (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;
+
+ max_segments = min_not_zero(max_segments, ctrl->max_segments);
+ blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
+ blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
+ }
+ blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
+ blk_queue_dma_alignment(q, 3);
+ blk_queue_write_cache(q, vwc, vwc);
}
static void nvme_update_disk_info(struct gendisk *disk,
@@ -1762,35 +1843,37 @@ static void nvme_update_disk_info(struct gendisk *disk,
{
sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
unsigned short bs = 1 << ns->lba_shift;
- u32 atomic_bs, phys_bs, io_opt;
+ u32 atomic_bs, phys_bs, io_opt = 0;
+ /*
+ * The block layer can't support LBA sizes larger than the page size
+ * yet, so catch this early and don't allow block I/O.
+ */
if (ns->lba_shift > PAGE_SHIFT) {
- /* unsupported block size, set capacity to 0 later */
+ capacity = 0;
bs = (1 << 9);
}
- blk_mq_freeze_queue(disk->queue);
+
blk_integrity_unregister(disk);
+ atomic_bs = phys_bs = bs;
if (id->nabo == 0) {
/*
* Bit 1 indicates whether NAWUPF is defined for this namespace
* and whether it should be used instead of AWUPF. If NAWUPF ==
* 0 then AWUPF must be used instead.
*/
- if (id->nsfeat & (1 << 1) && id->nawupf)
+ if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
else
atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
- } else {
- atomic_bs = bs;
}
- phys_bs = bs;
- io_opt = bs;
- if (id->nsfeat & (1 << 4)) {
+
+ if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
/* NPWG = Namespace Preferred Write Granularity */
- phys_bs *= 1 + le16_to_cpu(id->npwg);
+ phys_bs = bs * (1 + le16_to_cpu(id->npwg));
/* NOWS = Namespace Optimal Write Size */
- io_opt *= 1 + le16_to_cpu(id->nows);
+ io_opt = bs * (1 + le16_to_cpu(id->nows));
}
blk_queue_logical_block_size(disk->queue, bs);
@@ -1803,105 +1886,180 @@ static void nvme_update_disk_info(struct gendisk *disk,
blk_queue_io_min(disk->queue, phys_bs);
blk_queue_io_opt(disk->queue, io_opt);
- if (ns->ms && !ns->ext &&
- (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
- nvme_init_integrity(disk, ns->ms, ns->pi_type);
- if ((ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) ||
- ns->lba_shift > PAGE_SHIFT)
- capacity = 0;
+ /*
+ * Register a metadata profile for PI, or the plain non-integrity NVMe
+ * metadata masquerading as Type 0 if supported, otherwise reject block
+ * I/O to namespaces with metadata except when the namespace supports
+ * PI, as it can strip/insert in that case.
+ */
+ if (ns->ms) {
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
+ (ns->features & NVME_NS_METADATA_SUPPORTED))
+ nvme_init_integrity(disk, ns,
+ ns->ctrl->max_integrity_segments);
+ else if (!nvme_ns_has_pi(ns))
+ capacity = 0;
+ }
- set_capacity(disk, capacity);
+ set_capacity_and_notify(disk, capacity);
nvme_config_discard(disk, ns);
- nvme_config_write_zeroes(disk, ns);
+ blk_queue_max_write_zeroes_sectors(disk->queue,
+ ns->ctrl->max_zeroes_sectors);
+}
- if (id->nsattr & (1 << 0))
- set_disk_ro(disk, true);
- else
- set_disk_ro(disk, false);
+static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info)
+{
+ return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags);
+}
- blk_mq_unfreeze_queue(disk->queue);
+static inline bool nvme_first_scan(struct gendisk *disk)
+{
+ /* nvme_alloc_ns() scans the disk prior to adding it */
+ return !disk_live(disk);
}
-static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
+static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
{
- struct nvme_ns *ns = disk->private_data;
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ u32 iob;
- /*
- * If identify namespace failed, use default 512 byte block size so
- * block layer can use before failing read/write for 0 capacity.
- */
- ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
- if (ns->lba_shift == 0)
- ns->lba_shift = 9;
- ns->noiob = le16_to_cpu(id->noiob);
- ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
- ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
- /* the PI implementation requires metadata equal t10 pi tuple size */
- if (ns->ms == sizeof(struct t10_pi_tuple))
- ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
+ if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
+ is_power_of_2(ctrl->max_hw_sectors))
+ iob = ctrl->max_hw_sectors;
else
- ns->pi_type = 0;
+ iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
- if (ns->noiob)
- nvme_set_chunk_size(ns);
- nvme_update_disk_info(disk, ns, id);
-#ifdef CONFIG_NVME_MULTIPATH
- if (ns->head->disk) {
- nvme_update_disk_info(ns->head->disk, ns, id);
- blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
- revalidate_disk(ns->head->disk);
+ if (!iob)
+ return;
+
+ if (!is_power_of_2(iob)) {
+ if (nvme_first_scan(ns->disk))
+ pr_warn("%s: ignoring unaligned IO boundary:%u\n",
+ ns->disk->disk_name, iob);
+ return;
}
-#endif
+
+ if (blk_queue_is_zoned(ns->disk->queue)) {
+ if (nvme_first_scan(ns->disk))
+ pr_warn("%s: ignoring zoned namespace IO boundary\n",
+ ns->disk->disk_name);
+ return;
+ }
+
+ blk_queue_chunk_sectors(ns->queue, iob);
}
-static int nvme_revalidate_disk(struct gendisk *disk)
+static int nvme_update_ns_info_generic(struct nvme_ns *ns,
+ struct nvme_ns_info *info)
{
- struct nvme_ns *ns = disk->private_data;
- struct nvme_ctrl *ctrl = ns->ctrl;
- struct nvme_id_ns *id;
- struct nvme_ns_ids ids;
- int ret = 0;
+ blk_mq_freeze_queue(ns->disk->queue);
+ nvme_set_queue_limits(ns->ctrl, ns->queue);
+ set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
+ blk_mq_unfreeze_queue(ns->disk->queue);
- if (test_bit(NVME_NS_DEAD, &ns->flags)) {
- set_capacity(disk, 0);
- return -ENODEV;
+ if (nvme_ns_head_multipath(ns->head)) {
+ blk_mq_freeze_queue(ns->head->disk->queue);
+ set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
+ nvme_mpath_revalidate_paths(ns);
+ blk_stack_limits(&ns->head->disk->queue->limits,
+ &ns->queue->limits, 0);
+ ns->head->disk->flags |= GENHD_FL_HIDDEN;
+ blk_mq_unfreeze_queue(ns->head->disk->queue);
}
- ret = nvme_identify_ns(ctrl, ns->head->ns_id, &id);
+ /* Hide the block-interface for these devices */
+ ns->disk->flags |= GENHD_FL_HIDDEN;
+ set_bit(NVME_NS_READY, &ns->flags);
+
+ return 0;
+}
+
+static int nvme_update_ns_info_block(struct nvme_ns *ns,
+ struct nvme_ns_info *info)
+{
+ struct nvme_id_ns *id;
+ unsigned lbaf;
+ int ret;
+
+ ret = nvme_identify_ns(ns->ctrl, info->nsid, &id);
if (ret)
- goto out;
+ return ret;
+
+ blk_mq_freeze_queue(ns->disk->queue);
+ lbaf = nvme_lbaf_index(id->flbas);
+ ns->lba_shift = id->lbaf[lbaf].ds;
+ nvme_set_queue_limits(ns->ctrl, ns->queue);
+
+ nvme_configure_metadata(ns, id);
+ nvme_set_chunk_sectors(ns, id);
+ nvme_update_disk_info(ns->disk, ns, id);
- if (id->ncap == 0) {
- ret = -ENODEV;
- goto free_id;
+ if (ns->head->ids.csi == NVME_CSI_ZNS) {
+ ret = nvme_update_zone_info(ns, lbaf);
+ if (ret) {
+ blk_mq_unfreeze_queue(ns->disk->queue);
+ goto out;
+ }
}
- __nvme_revalidate_disk(disk, id);
- ret = nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids);
- if (ret)
- goto free_id;
+ set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
+ set_bit(NVME_NS_READY, &ns->flags);
+ blk_mq_unfreeze_queue(ns->disk->queue);
- if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) {
- dev_err(ctrl->device,
- "identifiers changed for nsid %d\n", ns->head->ns_id);
- ret = -ENODEV;
+ if (blk_queue_is_zoned(ns->queue)) {
+ ret = nvme_revalidate_zones(ns);
+ if (ret && !nvme_first_scan(ns->disk))
+ goto out;
}
-free_id:
- kfree(id);
+ if (nvme_ns_head_multipath(ns->head)) {
+ blk_mq_freeze_queue(ns->head->disk->queue);
+ nvme_update_disk_info(ns->head->disk, ns, id);
+ set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
+ nvme_mpath_revalidate_paths(ns);
+ blk_stack_limits(&ns->head->disk->queue->limits,
+ &ns->queue->limits, 0);
+ disk_update_readahead(ns->head->disk);
+ blk_mq_unfreeze_queue(ns->head->disk->queue);
+ }
+
+ ret = 0;
out:
/*
- * Only fail the function if we got a fatal error back from the
- * device, otherwise ignore the error and just move on.
+ * If probing fails due an unsupported feature, hide the block device,
+ * but still allow other access.
*/
- if (ret == -ENOMEM || (ret > 0 && !(ret & NVME_SC_DNR)))
+ if (ret == -ENODEV) {
+ ns->disk->flags |= GENHD_FL_HIDDEN;
+ set_bit(NVME_NS_READY, &ns->flags);
ret = 0;
- else if (ret > 0)
- ret = blk_status_to_errno(nvme_error_status(ret));
+ }
+ kfree(id);
return ret;
}
+static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
+{
+ switch (info->ids.csi) {
+ case NVME_CSI_ZNS:
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ dev_info(ns->ctrl->device,
+ "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
+ info->nsid);
+ return nvme_update_ns_info_generic(ns, info);
+ }
+ return nvme_update_ns_info_block(ns, info);
+ case NVME_CSI_NVM:
+ return nvme_update_ns_info_block(ns, info);
+ default:
+ dev_info(ns->ctrl->device,
+ "block device for nsid %u not supported (csi %u)\n",
+ info->nsid, info->ids.csi);
+ return nvme_update_ns_info_generic(ns, info);
+ }
+}
+
static char nvme_pr_type(enum pr_type type)
{
switch (type) {
@@ -1920,32 +2078,47 @@ static char nvme_pr_type(enum pr_type type)
default:
return 0;
}
-};
+}
+
+static int nvme_send_ns_head_pr_command(struct block_device *bdev,
+ struct nvme_command *c, u8 data[16])
+{
+ struct nvme_ns_head *head = bdev->bd_disk->private_data;
+ int srcu_idx = srcu_read_lock(&head->srcu);
+ struct nvme_ns *ns = nvme_find_path(head);
+ int ret = -EWOULDBLOCK;
+
+ if (ns) {
+ c->common.nsid = cpu_to_le32(ns->head->ns_id);
+ ret = nvme_submit_sync_cmd(ns->queue, c, data, 16);
+ }
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
+
+static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
+ u8 data[16])
+{
+ c->common.nsid = cpu_to_le32(ns->head->ns_id);
+ return nvme_submit_sync_cmd(ns->queue, c, data, 16);
+}
static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
u64 key, u64 sa_key, u8 op)
{
- struct nvme_ns_head *head = NULL;
- struct nvme_ns *ns;
- struct nvme_command c;
- int srcu_idx, ret;
+ struct nvme_command c = { };
u8 data[16] = { 0, };
- ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
- if (unlikely(!ns))
- return -EWOULDBLOCK;
-
put_unaligned_le64(key, &data[0]);
put_unaligned_le64(sa_key, &data[8]);
- memset(&c, 0, sizeof(c));
c.common.opcode = op;
- c.common.nsid = cpu_to_le32(ns->head->ns_id);
c.common.cdw10 = cpu_to_le32(cdw10);
- ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
- nvme_put_ns_from_disk(head, srcu_idx);
- return ret;
+ if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
+ bdev->bd_disk->fops == &nvme_ns_head_ops)
+ return nvme_send_ns_head_pr_command(bdev, &c, data);
+ return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c, data);
}
static int nvme_pr_register(struct block_device *bdev, u64 old,
@@ -1979,22 +2152,25 @@ static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
enum pr_type type, bool abort)
{
u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
+
return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
}
static int nvme_pr_clear(struct block_device *bdev, u64 key)
{
- u32 cdw10 = 1 | (key ? 1 << 3 : 0);
- return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
+ u32 cdw10 = 1 | (key ? 0 : 1 << 3);
+
+ return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}
static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
- u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
+ u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 0 : 1 << 3);
+
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}
-static const struct pr_ops nvme_pr_ops = {
+const struct pr_ops nvme_pr_ops = {
.pr_register = nvme_pr_register,
.pr_reserve = nvme_pr_reserve,
.pr_release = nvme_pr_release,
@@ -2007,9 +2183,8 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
bool send)
{
struct nvme_ctrl *ctrl = data;
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
- memset(&cmd, 0, sizeof(cmd));
if (send)
cmd.common.opcode = nvme_admin_security_send;
else
@@ -2019,52 +2194,36 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
cmd.common.cdw11 = cpu_to_le32(len);
return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
- ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0, false);
+ NVME_QID_ANY, 1, 0);
}
EXPORT_SYMBOL_GPL(nvme_sec_submit);
#endif /* CONFIG_BLK_SED_OPAL */
-static const struct block_device_operations nvme_fops = {
- .owner = THIS_MODULE,
- .ioctl = nvme_ioctl,
- .compat_ioctl = nvme_ioctl,
- .open = nvme_open,
- .release = nvme_release,
- .getgeo = nvme_getgeo,
- .revalidate_disk= nvme_revalidate_disk,
- .pr_ops = &nvme_pr_ops,
-};
-
-#ifdef CONFIG_NVME_MULTIPATH
-static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
+#ifdef CONFIG_BLK_DEV_ZONED
+static int nvme_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
{
- struct nvme_ns_head *head = bdev->bd_disk->private_data;
-
- if (!kref_get_unless_zero(&head->ref))
- return -ENXIO;
- return 0;
-}
-
-static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
-{
- nvme_put_ns_head(disk->private_data);
+ return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb,
+ data);
}
+#else
+#define nvme_report_zones NULL
+#endif /* CONFIG_BLK_DEV_ZONED */
-const struct block_device_operations nvme_ns_head_ops = {
+static const struct block_device_operations nvme_bdev_ops = {
.owner = THIS_MODULE,
- .open = nvme_ns_head_open,
- .release = nvme_ns_head_release,
.ioctl = nvme_ioctl,
- .compat_ioctl = nvme_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
+ .open = nvme_open,
+ .release = nvme_release,
.getgeo = nvme_getgeo,
+ .report_zones = nvme_report_zones,
.pr_ops = &nvme_pr_ops,
};
-#endif /* CONFIG_NVME_MULTIPATH */
-static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
+static int nvme_wait_ready(struct nvme_ctrl *ctrl, u32 timeout, bool enabled)
{
- unsigned long timeout =
- ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
+ unsigned long timeout_jiffies = ((timeout + 1) * HZ / 2) + jiffies;
u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
int ret;
@@ -2074,13 +2233,13 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
if ((csts & NVME_CSTS_RDY) == bit)
break;
- msleep(100);
+ usleep_range(1000, 2000);
if (fatal_signal_pending(current))
return -EINTR;
- if (time_after(jiffies, timeout)) {
+ if (time_after(jiffies, timeout_jiffies)) {
dev_err(ctrl->device,
- "Device not ready; aborting %s\n", enabled ?
- "initialisation" : "reset");
+ "Device not ready; aborting %s, CSTS=0x%x\n",
+ enabled ? "initialisation" : "reset", csts);
return -ENODEV;
}
}
@@ -2108,18 +2267,14 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl)
if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
msleep(NVME_QUIRK_DELAY_AMOUNT);
- return nvme_wait_ready(ctrl, ctrl->cap, false);
+ return nvme_wait_ready(ctrl, NVME_CAP_TIMEOUT(ctrl->cap), false);
}
EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
{
- /*
- * Default to a 4K page size, with the intention to update this
- * path in the future to accomodate architectures with differing
- * kernel and IO page sizes.
- */
- unsigned dev_page_min, page_shift = 12;
+ unsigned dev_page_min;
+ u32 timeout;
int ret;
ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
@@ -2129,25 +2284,55 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
}
dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
- if (page_shift < dev_page_min) {
+ if (NVME_CTRL_PAGE_SHIFT < dev_page_min) {
dev_err(ctrl->device,
"Minimum device page size %u too large for host (%u)\n",
- 1 << dev_page_min, 1 << page_shift);
+ 1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT);
return -ENODEV;
}
- ctrl->page_size = 1 << page_shift;
+ if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI)
+ ctrl->ctrl_config = NVME_CC_CSS_CSI;
+ else
+ ctrl->ctrl_config = NVME_CC_CSS_NVM;
+
+ if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
+ u32 crto;
+
+ ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
+ if (ret) {
+ dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
+ ret);
+ return ret;
+ }
- ctrl->ctrl_config = NVME_CC_CSS_NVM;
- ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
+ if (ctrl->cap & NVME_CAP_CRMS_CRIMS) {
+ ctrl->ctrl_config |= NVME_CC_CRIME;
+ timeout = NVME_CRTO_CRIMT(crto);
+ } else {
+ timeout = NVME_CRTO_CRWMT(crto);
+ }
+ } else {
+ timeout = NVME_CAP_TIMEOUT(ctrl->cap);
+ }
+
+ ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
- ctrl->ctrl_config |= NVME_CC_ENABLE;
+ ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
+ if (ret)
+ return ret;
+
+ /* Flush write to device (required if transport is PCI) */
+ ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config);
+ if (ret)
+ return ret;
+ ctrl->ctrl_config |= NVME_CC_ENABLE;
ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
if (ret)
return ret;
- return nvme_wait_ready(ctrl, ctrl->cap, true);
+ return nvme_wait_ready(ctrl, timeout, true);
}
EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
@@ -2182,28 +2367,6 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
-static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
- struct request_queue *q)
-{
- bool vwc = false;
-
- if (ctrl->max_hw_sectors) {
- u32 max_segments =
- (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
-
- max_segments = min_not_zero(max_segments, ctrl->max_segments);
- blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
- blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
- }
- if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
- is_power_of_2(ctrl->max_hw_sectors))
- blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
- blk_queue_virt_boundary(q, ctrl->page_size - 1);
- if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
- vwc = true;
- blk_queue_write_cache(q, vwc, vwc);
-}
-
static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
{
__le64 ts;
@@ -2221,49 +2384,97 @@ static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
return ret;
}
-static int nvme_configure_acre(struct nvme_ctrl *ctrl)
+static int nvme_configure_host_options(struct nvme_ctrl *ctrl)
{
struct nvme_feat_host_behavior *host;
+ u8 acre = 0, lbafee = 0;
int ret;
/* Don't bother enabling the feature if retry delay is not reported */
- if (!ctrl->crdt[0])
+ if (ctrl->crdt[0])
+ acre = NVME_ENABLE_ACRE;
+ if (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)
+ lbafee = NVME_ENABLE_LBAFEE;
+
+ if (!acre && !lbafee)
return 0;
host = kzalloc(sizeof(*host), GFP_KERNEL);
if (!host)
return 0;
- host->acre = NVME_ENABLE_ACRE;
+ host->acre = acre;
+ host->lbafee = lbafee;
ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
host, sizeof(*host), NULL);
kfree(host);
return ret;
}
-static int nvme_configure_apst(struct nvme_ctrl *ctrl)
+/*
+ * The function checks whether the given total (exlat + enlat) latency of
+ * a power state allows the latter to be used as an APST transition target.
+ * It does so by comparing the latency to the primary and secondary latency
+ * tolerances defined by module params. If there's a match, the corresponding
+ * timeout value is returned and the matching tolerance index (1 or 2) is
+ * reported.
+ */
+static bool nvme_apst_get_transition_time(u64 total_latency,
+ u64 *transition_time, unsigned *last_index)
{
- /*
- * APST (Autonomous Power State Transition) lets us program a
- * table of power state transitions that the controller will
- * perform automatically. We configure it with a simple
- * heuristic: we are willing to spend at most 2% of the time
- * transitioning between power states. Therefore, when running
- * in any given state, we will enter the next lower-power
- * non-operational state after waiting 50 * (enlat + exlat)
- * microseconds, as long as that state's exit latency is under
- * the requested maximum latency.
- *
- * We will not autonomously enter any non-operational state for
- * which the total latency exceeds ps_max_latency_us. Users
- * can set ps_max_latency_us to zero to turn off APST.
- */
+ if (total_latency <= apst_primary_latency_tol_us) {
+ if (*last_index == 1)
+ return false;
+ *last_index = 1;
+ *transition_time = apst_primary_timeout_ms;
+ return true;
+ }
+ if (apst_secondary_timeout_ms &&
+ total_latency <= apst_secondary_latency_tol_us) {
+ if (*last_index <= 2)
+ return false;
+ *last_index = 2;
+ *transition_time = apst_secondary_timeout_ms;
+ return true;
+ }
+ return false;
+}
- unsigned apste;
+/*
+ * APST (Autonomous Power State Transition) lets us program a table of power
+ * state transitions that the controller will perform automatically.
+ *
+ * Depending on module params, one of the two supported techniques will be used:
+ *
+ * - If the parameters provide explicit timeouts and tolerances, they will be
+ * used to build a table with up to 2 non-operational states to transition to.
+ * The default parameter values were selected based on the values used by
+ * Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic
+ * regeneration of the APST table in the event of switching between external
+ * and battery power, the timeouts and tolerances reflect a compromise
+ * between values used by Microsoft for AC and battery scenarios.
+ * - If not, we'll configure the table with a simple heuristic: we are willing
+ * to spend at most 2% of the time transitioning between power states.
+ * Therefore, when running in any given state, we will enter the next
+ * lower-power non-operational state after waiting 50 * (enlat + exlat)
+ * microseconds, as long as that state's exit latency is under the requested
+ * maximum latency.
+ *
+ * We will not autonomously enter any non-operational state for which the total
+ * latency exceeds ps_max_latency_us.
+ *
+ * Users can set ps_max_latency_us to zero to turn off APST.
+ */
+static int nvme_configure_apst(struct nvme_ctrl *ctrl)
+{
struct nvme_feat_auto_pst *table;
+ unsigned apste = 0;
u64 max_lat_us = 0;
+ __le64 target = 0;
int max_ps = -1;
+ int state;
int ret;
+ unsigned last_lt_index = UINT_MAX;
/*
* If APST isn't supported or if we haven't been initialized yet,
@@ -2283,83 +2494,78 @@ static int nvme_configure_apst(struct nvme_ctrl *ctrl)
if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
/* Turn off APST. */
- apste = 0;
dev_dbg(ctrl->device, "APST disabled\n");
- } else {
- __le64 target = cpu_to_le64(0);
- int state;
+ goto done;
+ }
+
+ /*
+ * Walk through all states from lowest- to highest-power.
+ * According to the spec, lower-numbered states use more power. NPSS,
+ * despite the name, is the index of the lowest-power state, not the
+ * number of states.
+ */
+ for (state = (int)ctrl->npss; state >= 0; state--) {
+ u64 total_latency_us, exit_latency_us, transition_ms;
+
+ if (target)
+ table->entries[state] = target;
/*
- * Walk through all states from lowest- to highest-power.
- * According to the spec, lower-numbered states use more
- * power. NPSS, despite the name, is the index of the
- * lowest-power state, not the number of states.
+ * Don't allow transitions to the deepest state if it's quirked
+ * off.
*/
- for (state = (int)ctrl->npss; state >= 0; state--) {
- u64 total_latency_us, exit_latency_us, transition_ms;
+ if (state == ctrl->npss &&
+ (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
+ continue;
- if (target)
- table->entries[state] = target;
+ /*
+ * Is this state a useful non-operational state for higher-power
+ * states to autonomously transition to?
+ */
+ if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE))
+ continue;
- /*
- * Don't allow transitions to the deepest state
- * if it's quirked off.
- */
- if (state == ctrl->npss &&
- (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
- continue;
+ exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
+ if (exit_latency_us > ctrl->ps_max_latency_us)
+ continue;
- /*
- * Is this state a useful non-operational state for
- * higher-power states to autonomously transition to?
- */
- if (!(ctrl->psd[state].flags &
- NVME_PS_FLAGS_NON_OP_STATE))
- continue;
+ total_latency_us = exit_latency_us +
+ le32_to_cpu(ctrl->psd[state].entry_lat);
- exit_latency_us =
- (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
- if (exit_latency_us > ctrl->ps_max_latency_us)
+ /*
+ * This state is good. It can be used as the APST idle target
+ * for higher power states.
+ */
+ if (apst_primary_timeout_ms && apst_primary_latency_tol_us) {
+ if (!nvme_apst_get_transition_time(total_latency_us,
+ &transition_ms, &last_lt_index))
continue;
-
- total_latency_us =
- exit_latency_us +
- le32_to_cpu(ctrl->psd[state].entry_lat);
-
- /*
- * This state is good. Use it as the APST idle
- * target for higher power states.
- */
+ } else {
transition_ms = total_latency_us + 19;
do_div(transition_ms, 20);
if (transition_ms > (1 << 24) - 1)
transition_ms = (1 << 24) - 1;
-
- target = cpu_to_le64((state << 3) |
- (transition_ms << 8));
-
- if (max_ps == -1)
- max_ps = state;
-
- if (total_latency_us > max_lat_us)
- max_lat_us = total_latency_us;
}
- apste = 1;
-
- if (max_ps == -1) {
- dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
- } else {
- dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
- max_ps, max_lat_us, (int)sizeof(*table), table);
- }
+ target = cpu_to_le64((state << 3) | (transition_ms << 8));
+ if (max_ps == -1)
+ max_ps = state;
+ if (total_latency_us > max_lat_us)
+ max_lat_us = total_latency_us;
}
+ if (max_ps == -1)
+ dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
+ else
+ dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
+ max_ps, max_lat_us, (int)sizeof(*table), table);
+ apste = 1;
+
+done:
ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
table, sizeof(*table), NULL);
if (ret)
dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
-
kfree(table);
return ret;
}
@@ -2381,7 +2587,8 @@ static void nvme_set_latency_tolerance(struct device *dev, s32 val)
if (ctrl->ps_max_latency_us != latency) {
ctrl->ps_max_latency_us = latency;
- nvme_configure_apst(ctrl);
+ if (ctrl->state == NVME_CTRL_LIVE)
+ nvme_configure_apst(ctrl);
}
}
@@ -2416,6 +2623,34 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
.vid = 0x14a4,
.fr = "22301111",
.quirks = NVME_QUIRK_SIMPLE_SUSPEND,
+ },
+ {
+ /*
+ * This Kioxia CD6-V Series / HPE PE8030 device times out and
+ * aborts I/O during any load, but more easily reproducible
+ * with discards (fstrim).
+ *
+ * The device is left in a state where it is also not possible
+ * to use "nvme set-feature" to disable APST, but booting with
+ * nvme_core.default_ps_max_latency=0 works.
+ */
+ .vid = 0x1e0f,
+ .mn = "KCD6XVUL6T40",
+ .quirks = NVME_QUIRK_NO_APST,
+ },
+ {
+ /*
+ * The external Samsung X5 SSD fails initialization without a
+ * delay before checking if it is ready and has a whole set of
+ * other problems. To make this even more interesting, it
+ * shares the PCI ID with internal Samsung 970 Evo Plus that
+ * does not need or want these quirks.
+ */
+ .vid = 0x144d,
+ .mn = "Samsung Portable SSD X5",
+ .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
+ NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN,
}
};
@@ -2457,7 +2692,7 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
- strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
+ strscpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
return;
}
@@ -2465,7 +2700,11 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
}
- /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
+ /*
+ * Generate a "fake" NQN similar to the one in Section 4.5 of the NVMe
+ * Base Specification 2.0. It is slightly different from the format
+ * specified there due to historic reasons, and we can't change it now.
+ */
off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
"nqn.2014.08.org.nvmexpress:%04x%04x",
le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
@@ -2482,7 +2721,7 @@ static void nvme_release_subsystem(struct device *dev)
container_of(dev, struct nvme_subsystem, dev);
if (subsys->instance >= 0)
- ida_simple_remove(&nvme_instance_ida, subsys->instance);
+ ida_free(&nvme_instance_ida, subsys->instance);
kfree(subsys);
}
@@ -2544,18 +2783,36 @@ static ssize_t nvme_subsys_show_nqn(struct device *dev,
struct nvme_subsystem *subsys =
container_of(dev, struct nvme_subsystem, dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn);
+ return sysfs_emit(buf, "%s\n", subsys->subnqn);
}
static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
+static ssize_t nvme_subsys_show_type(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_subsystem *subsys =
+ container_of(dev, struct nvme_subsystem, dev);
+
+ switch (subsys->subtype) {
+ case NVME_NQN_DISC:
+ return sysfs_emit(buf, "discovery\n");
+ case NVME_NQN_NVME:
+ return sysfs_emit(buf, "nvm\n");
+ default:
+ return sysfs_emit(buf, "reserved\n");
+ }
+}
+static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);
+
#define nvme_subsys_show_str_function(field) \
static ssize_t subsys_##field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct nvme_subsystem *subsys = \
container_of(dev, struct nvme_subsystem, dev); \
- return sprintf(buf, "%.*s\n", \
- (int)sizeof(subsys->field), subsys->field); \
+ return sysfs_emit(buf, "%.*s\n", \
+ (int)sizeof(subsys->field), subsys->field); \
} \
static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
@@ -2568,13 +2825,14 @@ static struct attribute *nvme_subsys_attrs[] = {
&subsys_attr_serial.attr,
&subsys_attr_firmware_rev.attr,
&subsys_attr_subsysnqn.attr,
+ &subsys_attr_subsystype.attr,
#ifdef CONFIG_NVME_MULTIPATH
&subsys_attr_iopolicy.attr,
#endif
NULL,
};
-static struct attribute_group nvme_subsys_attrs_group = {
+static const struct attribute_group nvme_subsys_attrs_group = {
.attrs = nvme_subsys_attrs,
};
@@ -2583,6 +2841,11 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = {
NULL,
};
+static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
+{
+ return ctrl->opts && ctrl->opts->discovery_nqn;
+}
+
static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
@@ -2591,19 +2854,19 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
lockdep_assert_held(&nvme_subsystems_lock);
list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
- if (tmp->state == NVME_CTRL_DELETING ||
- tmp->state == NVME_CTRL_DEAD)
+ if (nvme_state_terminal(tmp))
continue;
if (tmp->cntlid == ctrl->cntlid) {
dev_err(ctrl->device,
- "Duplicate cntlid %u with %s, rejecting\n",
- ctrl->cntlid, dev_name(tmp->device));
+ "Duplicate cntlid %u with %s, subsys %s, rejecting\n",
+ ctrl->cntlid, dev_name(tmp->device),
+ subsys->subnqn);
return false;
}
- if ((id->cmic & (1 << 1)) ||
- (ctrl->opts && ctrl->opts->discovery_nqn))
+ if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
+ nvme_discovery_ctrl(ctrl))
continue;
dev_err(ctrl->device,
@@ -2631,13 +2894,25 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
nvme_init_subnqn(subsys, ctrl, id);
memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
memcpy(subsys->model, id->mn, sizeof(subsys->model));
- memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
subsys->vendor_id = le16_to_cpu(id->vid);
subsys->cmic = id->cmic;
+
+ /* Versions prior to 1.4 don't necessarily report a valid type */
+ if (id->cntrltype == NVME_CTRL_DISC ||
+ !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME))
+ subsys->subtype = NVME_NQN_DISC;
+ else
+ subsys->subtype = NVME_NQN_NVME;
+
+ if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) {
+ dev_err(ctrl->device,
+ "Subsystem %s is not a discovery controller",
+ subsys->subnqn);
+ kfree(subsys);
+ return -EINVAL;
+ }
subsys->awupf = le16_to_cpu(id->awupf);
-#ifdef CONFIG_NVME_MULTIPATH
- subsys->iopolicy = NVME_IOPOLICY_NUMA;
-#endif
+ nvme_mpath_default_iopolicy(subsys);
subsys->dev.class = nvme_subsys_class;
subsys->dev.release = nvme_release_subsystem;
@@ -2689,11 +2964,11 @@ out_unlock:
return ret;
}
-int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
+int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
void *log, size_t size, u64 offset)
{
struct nvme_command c = { };
- unsigned long dwlen = size / 4 - 1;
+ u32 dwlen = nvme_bytes_to_numd(size);
c.get_log_page.opcode = nvme_admin_get_log_page;
c.get_log_page.nsid = cpu_to_le32(nsid);
@@ -2703,51 +2978,104 @@ int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
+ c.get_log_page.csi = csi;
return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
}
-static int nvme_get_effects_log(struct nvme_ctrl *ctrl)
+static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
+ struct nvme_effects_log **log)
{
+ struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi);
int ret;
- if (!ctrl->effects)
- ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
+ if (cel)
+ goto out;
- if (!ctrl->effects)
- return 0;
+ cel = kzalloc(sizeof(*cel), GFP_KERNEL);
+ if (!cel)
+ return -ENOMEM;
- ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0,
- ctrl->effects, sizeof(*ctrl->effects), 0);
+ ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
+ cel, sizeof(*cel), 0);
if (ret) {
- kfree(ctrl->effects);
- ctrl->effects = NULL;
+ kfree(cel);
+ return ret;
}
+
+ xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
+out:
+ *log = cel;
+ return 0;
+}
+
+static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units)
+{
+ u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val;
+
+ if (check_shl_overflow(1U, units + page_shift - 9, &val))
+ return UINT_MAX;
+ return val;
+}
+
+static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
+{
+ struct nvme_command c = { };
+ struct nvme_id_ctrl_nvm *id;
+ int ret;
+
+ if (ctrl->oncs & NVME_CTRL_ONCS_DSM) {
+ ctrl->max_discard_sectors = UINT_MAX;
+ ctrl->max_discard_segments = NVME_DSM_MAX_RANGES;
+ } else {
+ ctrl->max_discard_sectors = 0;
+ ctrl->max_discard_segments = 0;
+ }
+
+ /*
+ * Even though NVMe spec explicitly states that MDTS is not applicable
+ * to the write-zeroes, we are cautious and limit the size to the
+ * controllers max_hw_sectors value, which is based on the MDTS field
+ * and possibly other limiting factors.
+ */
+ if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) &&
+ !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
+ ctrl->max_zeroes_sectors = ctrl->max_hw_sectors;
+ else
+ ctrl->max_zeroes_sectors = 0;
+
+ if (nvme_ctrl_limited_cns(ctrl))
+ return 0;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return 0;
+
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.cns = NVME_ID_CNS_CS_CTRL;
+ c.identify.csi = NVME_CSI_NVM;
+
+ ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
+ if (ret)
+ goto free_data;
+
+ if (id->dmrl)
+ ctrl->max_discard_segments = id->dmrl;
+ ctrl->dmrsl = le32_to_cpu(id->dmrsl);
+ if (id->wzsl)
+ ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
+
+free_data:
+ kfree(id);
return ret;
}
-/*
- * Initialize the cached copies of the Identify data and various controller
- * register in our nvme_ctrl structure. This should be called as soon as
- * the admin queue is fully up and running.
- */
-int nvme_init_identify(struct nvme_ctrl *ctrl)
+static int nvme_init_identify(struct nvme_ctrl *ctrl)
{
struct nvme_id_ctrl *id;
- int ret, page_shift;
u32 max_hw_sectors;
bool prev_apst_enabled;
-
- ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
- if (ret) {
- dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
- return ret;
- }
- page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
- ctrl->sqsize = min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
-
- if (ctrl->vs >= NVME_VS(1, 1, 0))
- ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
+ int ret;
ret = nvme_identify_ctrl(ctrl, &id);
if (ret) {
@@ -2756,7 +3084,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
}
if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
- ret = nvme_get_effects_log(ctrl);
+ ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
if (ret < 0)
goto out_free;
}
@@ -2765,7 +3093,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->cntlid = le16_to_cpu(id->cntlid);
if (!ctrl->identified) {
- int i;
+ unsigned int i;
ret = nvme_init_subsystem(ctrl, id);
if (ret)
@@ -2784,6 +3112,8 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->quirks |= core_quirks[i].quirks;
}
}
+ memcpy(ctrl->subsys->firmware_rev, id->fr,
+ sizeof(ctrl->subsys->firmware_rev));
if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
@@ -2804,7 +3134,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
atomic_set(&ctrl->abort_limit, id->acl + 1);
ctrl->vwc = id->vwc;
if (id->mdts)
- max_hw_sectors = 1 << (id->mdts + page_shift - 9);
+ max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts);
else
max_hw_sectors = UINT_MAX;
ctrl->max_hw_sectors =
@@ -2816,9 +3146,12 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->max_namespaces = le32_to_cpu(id->mnan);
ctrl->ctratt = le32_to_cpu(id->ctratt);
+ ctrl->cntrltype = id->cntrltype;
+ ctrl->dctype = id->dctype;
+
if (id->rtd3e) {
/* us -> s */
- u32 transition_time = le32_to_cpu(id->rtd3e) / 1000000;
+ u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC;
ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
shutdown_timeout, 60);
@@ -2864,7 +3197,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
goto out_free;
}
- if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
+ if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
dev_err(ctrl->device,
"keep-alive support is mandatory for fabrics\n");
ret = -EINVAL;
@@ -2877,45 +3210,71 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
}
- ret = nvme_mpath_init(ctrl, id);
- kfree(id);
-
+ ret = nvme_mpath_init_identify(ctrl, id);
if (ret < 0)
- return ret;
+ goto out_free;
if (ctrl->apst_enabled && !prev_apst_enabled)
dev_pm_qos_expose_latency_tolerance(ctrl->device);
else if (!ctrl->apst_enabled && prev_apst_enabled)
dev_pm_qos_hide_latency_tolerance(ctrl->device);
- ret = nvme_configure_apst(ctrl);
- if (ret < 0)
+out_free:
+ kfree(id);
+ return ret;
+}
+
+/*
+ * Initialize the cached copies of the Identify data and various controller
+ * register in our nvme_ctrl structure. This should be called as soon as
+ * the admin queue is fully up and running.
+ */
+int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl)
+{
+ int ret;
+
+ ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
+ if (ret) {
+ dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
return ret;
-
- ret = nvme_configure_timestamp(ctrl);
+ }
+
+ ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
+
+ if (ctrl->vs >= NVME_VS(1, 1, 0))
+ ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
+
+ ret = nvme_init_identify(ctrl);
+ if (ret)
+ return ret;
+
+ ret = nvme_configure_apst(ctrl);
if (ret < 0)
return ret;
- ret = nvme_configure_directives(ctrl);
+ ret = nvme_configure_timestamp(ctrl);
if (ret < 0)
return ret;
- ret = nvme_configure_acre(ctrl);
+ ret = nvme_configure_host_options(ctrl);
if (ret < 0)
return ret;
- if (!ctrl->identified)
- nvme_hwmon_init(ctrl);
+ if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
+ /*
+ * Do not return errors unless we are in a controller reset,
+ * the controller works perfectly fine without hwmon.
+ */
+ ret = nvme_hwmon_init(ctrl);
+ if (ret == -EINTR)
+ return ret;
+ }
ctrl->identified = true;
return 0;
-
-out_free:
- kfree(id);
- return ret;
}
-EXPORT_SYMBOL_GPL(nvme_init_identify);
+EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish);
static int nvme_dev_open(struct inode *inode, struct file *file)
{
@@ -2929,74 +3288,33 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
return -EWOULDBLOCK;
}
+ nvme_get_ctrl(ctrl);
+ if (!try_module_get(ctrl->ops->module)) {
+ nvme_put_ctrl(ctrl);
+ return -EINVAL;
+ }
+
file->private_data = ctrl;
return 0;
}
-static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
+static int nvme_dev_release(struct inode *inode, struct file *file)
{
- struct nvme_ns *ns;
- int ret;
-
- down_read(&ctrl->namespaces_rwsem);
- if (list_empty(&ctrl->namespaces)) {
- ret = -ENOTTY;
- goto out_unlock;
- }
-
- ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
- if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
- dev_warn(ctrl->device,
- "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
- ret = -EINVAL;
- goto out_unlock;
- }
-
- dev_warn(ctrl->device,
- "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
- kref_get(&ns->kref);
- up_read(&ctrl->namespaces_rwsem);
-
- ret = nvme_user_cmd(ctrl, ns, argp);
- nvme_put_ns(ns);
- return ret;
-
-out_unlock:
- up_read(&ctrl->namespaces_rwsem);
- return ret;
-}
+ struct nvme_ctrl *ctrl =
+ container_of(inode->i_cdev, struct nvme_ctrl, cdev);
-static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct nvme_ctrl *ctrl = file->private_data;
- void __user *argp = (void __user *)arg;
-
- switch (cmd) {
- case NVME_IOCTL_ADMIN_CMD:
- return nvme_user_cmd(ctrl, NULL, argp);
- case NVME_IOCTL_ADMIN64_CMD:
- return nvme_user_cmd64(ctrl, NULL, argp);
- case NVME_IOCTL_IO_CMD:
- return nvme_dev_user_cmd(ctrl, argp);
- case NVME_IOCTL_RESET:
- dev_warn(ctrl->device, "resetting controller\n");
- return nvme_reset_ctrl_sync(ctrl);
- case NVME_IOCTL_SUBSYS_RESET:
- return nvme_reset_subsystem(ctrl);
- case NVME_IOCTL_RESCAN:
- nvme_queue_scan(ctrl);
- return 0;
- default:
- return -ENOTTY;
- }
+ module_put(ctrl->ops->module);
+ nvme_put_ctrl(ctrl);
+ return 0;
}
static const struct file_operations nvme_dev_fops = {
.owner = THIS_MODULE,
.open = nvme_dev_open,
+ .release = nvme_dev_release,
.unlocked_ioctl = nvme_dev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
+ .uring_cmd = nvme_dev_uring_cmd,
};
static ssize_t nvme_sysfs_reset(struct device *dev,
@@ -3028,7 +3346,7 @@ static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
{
struct gendisk *disk = dev_to_disk(dev);
- if (disk->fops == &nvme_fops)
+ if (disk->fops == &nvme_bdev_ops)
return nvme_get_ns_from_dev(dev)->head;
else
return disk->private_data;
@@ -3044,13 +3362,13 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
int model_len = sizeof(subsys->model);
if (!uuid_is_null(&ids->uuid))
- return sprintf(buf, "uuid.%pU\n", &ids->uuid);
+ return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
- return sprintf(buf, "eui.%16phN\n", ids->nguid);
+ return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
- return sprintf(buf, "eui.%8phN\n", ids->eui64);
+ return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
subsys->serial[serial_len - 1] == '\0'))
@@ -3059,7 +3377,7 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
subsys->model[model_len - 1] == '\0'))
model_len--;
- return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
+ return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
serial_len, subsys->serial, model_len, subsys->model,
head->ns_id);
}
@@ -3068,7 +3386,7 @@ static DEVICE_ATTR_RO(wwid);
static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
+ return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
}
static DEVICE_ATTR_RO(nguid);
@@ -3081,25 +3399,25 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
* we have no UUID set
*/
if (uuid_is_null(&ids->uuid)) {
- printk_ratelimited(KERN_WARNING
- "No UUID available providing old NGUID\n");
- return sprintf(buf, "%pU\n", ids->nguid);
+ dev_warn_ratelimited(dev,
+ "No UUID available providing old NGUID\n");
+ return sysfs_emit(buf, "%pU\n", ids->nguid);
}
- return sprintf(buf, "%pU\n", &ids->uuid);
+ return sysfs_emit(buf, "%pU\n", &ids->uuid);
}
static DEVICE_ATTR_RO(uuid);
static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
+ return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
}
static DEVICE_ATTR_RO(eui);
static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
+ return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
}
static DEVICE_ATTR_RO(nsid);
@@ -3137,7 +3455,7 @@ static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
}
#ifdef CONFIG_NVME_MULTIPATH
if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
- if (dev_to_disk(dev)->fops != &nvme_fops) /* per-path attr */
+ if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */
return 0;
if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
return 0;
@@ -3153,9 +3471,6 @@ static const struct attribute_group nvme_ns_id_attr_group = {
const struct attribute_group *nvme_ns_id_attr_groups[] = {
&nvme_ns_id_attr_group,
-#ifdef CONFIG_NVM
- &nvme_nvm_attr_group,
-#endif
NULL,
};
@@ -3164,7 +3479,7 @@ static ssize_t field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
- return sprintf(buf, "%.*s\n", \
+ return sysfs_emit(buf, "%.*s\n", \
(int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
} \
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
@@ -3178,7 +3493,7 @@ static ssize_t field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
- return sprintf(buf, "%d\n", ctrl->field); \
+ return sysfs_emit(buf, "%d\n", ctrl->field); \
} \
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
@@ -3186,6 +3501,7 @@ nvme_show_int_function(cntlid);
nvme_show_int_function(numa_node);
nvme_show_int_function(queue_count);
nvme_show_int_function(sqsize);
+nvme_show_int_function(kato);
static ssize_t nvme_sysfs_delete(struct device *dev,
struct device_attribute *attr, const char *buf,
@@ -3205,7 +3521,7 @@ static ssize_t nvme_sysfs_show_transport(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
+ return sysfs_emit(buf, "%s\n", ctrl->ops->name);
}
static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
@@ -3220,14 +3536,15 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
[NVME_CTRL_RESETTING] = "resetting",
[NVME_CTRL_CONNECTING] = "connecting",
[NVME_CTRL_DELETING] = "deleting",
+ [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
[NVME_CTRL_DEAD] = "dead",
};
if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
state_name[ctrl->state])
- return sprintf(buf, "%s\n", state_name[ctrl->state]);
+ return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
- return sprintf(buf, "unknown state\n");
+ return sysfs_emit(buf, "unknown state\n");
}
static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
@@ -3238,10 +3555,30 @@ static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn);
+ return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
}
static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
+static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
+}
+static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
+
+static ssize_t nvme_sysfs_show_hostid(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
+}
+static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
+
static ssize_t nvme_sysfs_show_address(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -3252,6 +3589,232 @@ static ssize_t nvme_sysfs_show_address(struct device *dev,
}
static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
+static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (ctrl->opts->max_reconnects == -1)
+ return sysfs_emit(buf, "off\n");
+ return sysfs_emit(buf, "%d\n",
+ opts->max_reconnects * opts->reconnect_delay);
+}
+
+static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ int ctrl_loss_tmo, err;
+
+ err = kstrtoint(buf, 10, &ctrl_loss_tmo);
+ if (err)
+ return -EINVAL;
+
+ if (ctrl_loss_tmo < 0)
+ opts->max_reconnects = -1;
+ else
+ opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
+ opts->reconnect_delay);
+ return count;
+}
+static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
+ nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);
+
+static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (ctrl->opts->reconnect_delay == -1)
+ return sysfs_emit(buf, "off\n");
+ return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
+}
+
+static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ unsigned int v;
+ int err;
+
+ err = kstrtou32(buf, 10, &v);
+ if (err)
+ return err;
+
+ ctrl->opts->reconnect_delay = v;
+ return count;
+}
+static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
+ nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);
+
+static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (ctrl->opts->fast_io_fail_tmo == -1)
+ return sysfs_emit(buf, "off\n");
+ return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo);
+}
+
+static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ int fast_io_fail_tmo, err;
+
+ err = kstrtoint(buf, 10, &fast_io_fail_tmo);
+ if (err)
+ return -EINVAL;
+
+ if (fast_io_fail_tmo < 0)
+ opts->fast_io_fail_tmo = -1;
+ else
+ opts->fast_io_fail_tmo = fast_io_fail_tmo;
+ return count;
+}
+static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
+ nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);
+
+static ssize_t cntrltype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ static const char * const type[] = {
+ [NVME_CTRL_IO] = "io\n",
+ [NVME_CTRL_DISC] = "discovery\n",
+ [NVME_CTRL_ADMIN] = "admin\n",
+ };
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype])
+ return sysfs_emit(buf, "reserved\n");
+
+ return sysfs_emit(buf, type[ctrl->cntrltype]);
+}
+static DEVICE_ATTR_RO(cntrltype);
+
+static ssize_t dctype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ static const char * const type[] = {
+ [NVME_DCTYPE_NOT_REPORTED] = "none\n",
+ [NVME_DCTYPE_DDC] = "ddc\n",
+ [NVME_DCTYPE_CDC] = "cdc\n",
+ };
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype])
+ return sysfs_emit(buf, "reserved\n");
+
+ return sysfs_emit(buf, type[ctrl->dctype]);
+}
+static DEVICE_ATTR_RO(dctype);
+
+#ifdef CONFIG_NVME_AUTH
+static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (!opts->dhchap_secret)
+ return sysfs_emit(buf, "none\n");
+ return sysfs_emit(buf, "%s\n", opts->dhchap_secret);
+}
+
+static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ char *dhchap_secret;
+
+ if (!ctrl->opts->dhchap_secret)
+ return -EINVAL;
+ if (count < 7)
+ return -EINVAL;
+ if (memcmp(buf, "DHHC-1:", 7))
+ return -EINVAL;
+
+ dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
+ if (!dhchap_secret)
+ return -ENOMEM;
+ memcpy(dhchap_secret, buf, count);
+ nvme_auth_stop(ctrl);
+ if (strcmp(dhchap_secret, opts->dhchap_secret)) {
+ int ret;
+
+ ret = nvme_auth_generate_key(dhchap_secret, &ctrl->host_key);
+ if (ret)
+ return ret;
+ kfree(opts->dhchap_secret);
+ opts->dhchap_secret = dhchap_secret;
+ /* Key has changed; re-authentication with new key */
+ nvme_auth_reset(ctrl);
+ }
+ /* Start re-authentication */
+ dev_info(ctrl->device, "re-authenticating controller\n");
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+
+ return count;
+}
+static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR,
+ nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store);
+
+static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (!opts->dhchap_ctrl_secret)
+ return sysfs_emit(buf, "none\n");
+ return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret);
+}
+
+static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ char *dhchap_secret;
+
+ if (!ctrl->opts->dhchap_ctrl_secret)
+ return -EINVAL;
+ if (count < 7)
+ return -EINVAL;
+ if (memcmp(buf, "DHHC-1:", 7))
+ return -EINVAL;
+
+ dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
+ if (!dhchap_secret)
+ return -ENOMEM;
+ memcpy(dhchap_secret, buf, count);
+ nvme_auth_stop(ctrl);
+ if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
+ int ret;
+
+ ret = nvme_auth_generate_key(dhchap_secret, &ctrl->ctrl_key);
+ if (ret)
+ return ret;
+ kfree(opts->dhchap_ctrl_secret);
+ opts->dhchap_ctrl_secret = dhchap_secret;
+ /* Key has changed; re-authentication with new key */
+ nvme_auth_reset(ctrl);
+ }
+ /* Start re-authentication */
+ dev_info(ctrl->device, "re-authenticating controller\n");
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+
+ return count;
+}
+static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
+ nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
+#endif
+
static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr,
&dev_attr_rescan_controller.attr,
@@ -3267,6 +3830,18 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_numa_node.attr,
&dev_attr_queue_count.attr,
&dev_attr_sqsize.attr,
+ &dev_attr_hostnqn.attr,
+ &dev_attr_hostid.attr,
+ &dev_attr_ctrl_loss_tmo.attr,
+ &dev_attr_reconnect_delay.attr,
+ &dev_attr_fast_io_fail_tmo.attr,
+ &dev_attr_kato.attr,
+ &dev_attr_cntrltype.attr,
+ &dev_attr_dctype.attr,
+#ifdef CONFIG_NVME_AUTH
+ &dev_attr_dhchap_secret.attr,
+ &dev_attr_dhchap_ctrl_secret.attr,
+#endif
NULL
};
@@ -3280,11 +3855,27 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
return 0;
if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
return 0;
+ if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_hostid.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
+ return 0;
+#ifdef CONFIG_NVME_AUTH
+ if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
+ return 0;
+#endif
return a->mode;
}
-static struct attribute_group nvme_dev_attrs_group = {
+static const struct attribute_group nvme_dev_attrs_group = {
.attrs = nvme_dev_attrs,
.is_visible = nvme_dev_attrs_are_visible,
};
@@ -3294,40 +3885,121 @@ static const struct attribute_group *nvme_dev_attr_groups[] = {
NULL,
};
-static struct nvme_ns_head *__nvme_find_ns_head(struct nvme_subsystem *subsys,
+static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl,
unsigned nsid)
{
struct nvme_ns_head *h;
- lockdep_assert_held(&subsys->lock);
+ lockdep_assert_held(&ctrl->subsys->lock);
- list_for_each_entry(h, &subsys->nsheads, entry) {
- if (h->ns_id == nsid && kref_get_unless_zero(&h->ref))
+ list_for_each_entry(h, &ctrl->subsys->nsheads, entry) {
+ /*
+ * Private namespaces can share NSIDs under some conditions.
+ * In that case we can't use the same ns_head for namespaces
+ * with the same NSID.
+ */
+ if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h))
+ continue;
+ if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
return h;
}
return NULL;
}
-static int __nvme_check_ids(struct nvme_subsystem *subsys,
- struct nvme_ns_head *new)
+static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys,
+ struct nvme_ns_ids *ids)
{
+ bool has_uuid = !uuid_is_null(&ids->uuid);
+ bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid));
+ bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
struct nvme_ns_head *h;
lockdep_assert_held(&subsys->lock);
list_for_each_entry(h, &subsys->nsheads, entry) {
- if (nvme_ns_ids_valid(&new->ids) &&
- !list_empty(&h->list) &&
- nvme_ns_ids_equal(&new->ids, &h->ids))
+ if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid))
+ return -EINVAL;
+ if (has_nguid &&
+ memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0)
+ return -EINVAL;
+ if (has_eui64 &&
+ memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0)
return -EINVAL;
}
return 0;
}
+static void nvme_cdev_rel(struct device *dev)
+{
+ ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
+}
+
+void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device)
+{
+ cdev_device_del(cdev, cdev_device);
+ put_device(cdev_device);
+}
+
+int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
+ const struct file_operations *fops, struct module *owner)
+{
+ int minor, ret;
+
+ minor = ida_alloc(&nvme_ns_chr_minor_ida, GFP_KERNEL);
+ if (minor < 0)
+ return minor;
+ cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
+ cdev_device->class = nvme_ns_chr_class;
+ cdev_device->release = nvme_cdev_rel;
+ device_initialize(cdev_device);
+ cdev_init(cdev, fops);
+ cdev->owner = owner;
+ ret = cdev_device_add(cdev, cdev_device);
+ if (ret)
+ put_device(cdev_device);
+
+ return ret;
+}
+
+static int nvme_ns_chr_open(struct inode *inode, struct file *file)
+{
+ return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev));
+}
+
+static int nvme_ns_chr_release(struct inode *inode, struct file *file)
+{
+ nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev));
+ return 0;
+}
+
+static const struct file_operations nvme_ns_chr_fops = {
+ .owner = THIS_MODULE,
+ .open = nvme_ns_chr_open,
+ .release = nvme_ns_chr_release,
+ .unlocked_ioctl = nvme_ns_chr_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .uring_cmd = nvme_ns_chr_uring_cmd,
+ .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll,
+};
+
+static int nvme_add_ns_cdev(struct nvme_ns *ns)
+{
+ int ret;
+
+ ns->cdev_device.parent = ns->ctrl->device;
+ ret = dev_set_name(&ns->cdev_device, "ng%dn%d",
+ ns->ctrl->instance, ns->head->instance);
+ if (ret)
+ return ret;
+
+ return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
+ ns->ctrl->ops->module);
+}
+
static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
- unsigned nsid, struct nvme_id_ns *id)
+ struct nvme_ns_info *info)
{
struct nvme_ns_head *head;
size_t size = sizeof(*head);
@@ -3340,7 +4012,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
head = kzalloc(size, GFP_KERNEL);
if (!head)
goto out;
- ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL);
+ ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL);
if (ret < 0)
goto out_free_head;
head->instance = ret;
@@ -3349,19 +4021,17 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
if (ret)
goto out_ida_remove;
head->subsys = ctrl->subsys;
- head->ns_id = nsid;
+ head->ns_id = info->nsid;
+ head->ids = info->ids;
+ head->shared = info->is_shared;
kref_init(&head->ref);
- ret = nvme_report_ns_ids(ctrl, nsid, id, &head->ids);
- if (ret)
- goto out_cleanup_srcu;
-
- ret = __nvme_check_ids(ctrl->subsys, head);
- if (ret) {
- dev_err(ctrl->device,
- "duplicate IDs for nsid %d\n", nsid);
- goto out_cleanup_srcu;
- }
+ if (head->ids.csi) {
+ ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
+ if (ret)
+ goto out_cleanup_srcu;
+ } else
+ head->effects = ctrl->effects;
ret = nvme_mpath_alloc_disk(ctrl, head);
if (ret)
@@ -3375,7 +4045,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
out_cleanup_srcu:
cleanup_srcu_struct(&head->srcu);
out_ida_remove:
- ida_simple_remove(&ctrl->subsys->ns_ida, head->instance);
+ ida_free(&ctrl->subsys->ns_ida, head->instance);
out_free_head:
kfree(head);
out:
@@ -3384,65 +4054,105 @@ out:
return ERR_PTR(ret);
}
-static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
- struct nvme_id_ns *id)
+static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this,
+ struct nvme_ns_ids *ids)
+{
+ struct nvme_subsystem *s;
+ int ret = 0;
+
+ /*
+ * Note that this check is racy as we try to avoid holding the global
+ * lock over the whole ns_head creation. But it is only intended as
+ * a sanity check anyway.
+ */
+ mutex_lock(&nvme_subsystems_lock);
+ list_for_each_entry(s, &nvme_subsystems, entry) {
+ if (s == this)
+ continue;
+ mutex_lock(&s->lock);
+ ret = nvme_subsys_check_duplicate_ids(s, ids);
+ mutex_unlock(&s->lock);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&nvme_subsystems_lock);
+
+ return ret;
+}
+
+static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
{
struct nvme_ctrl *ctrl = ns->ctrl;
- bool is_shared = id->nmic & (1 << 0);
struct nvme_ns_head *head = NULL;
- int ret = 0;
+ int ret;
+
+ ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids);
+ if (ret) {
+ dev_err(ctrl->device,
+ "globally duplicate IDs for nsid %d\n", info->nsid);
+ nvme_print_device_info(ctrl);
+ return ret;
+ }
mutex_lock(&ctrl->subsys->lock);
- if (is_shared)
- head = __nvme_find_ns_head(ctrl->subsys, nsid);
+ head = nvme_find_ns_head(ctrl, info->nsid);
if (!head) {
- head = nvme_alloc_ns_head(ctrl, nsid, id);
+ ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids);
+ if (ret) {
+ dev_err(ctrl->device,
+ "duplicate IDs in subsystem for nsid %d\n",
+ info->nsid);
+ goto out_unlock;
+ }
+ head = nvme_alloc_ns_head(ctrl, info);
if (IS_ERR(head)) {
ret = PTR_ERR(head);
goto out_unlock;
}
} else {
- struct nvme_ns_ids ids;
-
- ret = nvme_report_ns_ids(ctrl, nsid, id, &ids);
- if (ret)
- goto out_unlock;
-
- if (!nvme_ns_ids_equal(&head->ids, &ids)) {
+ ret = -EINVAL;
+ if (!info->is_shared || !head->shared) {
+ dev_err(ctrl->device,
+ "Duplicate unshared namespace %d\n",
+ info->nsid);
+ goto out_put_ns_head;
+ }
+ if (!nvme_ns_ids_equal(&head->ids, &info->ids)) {
dev_err(ctrl->device,
"IDs don't match for shared namespace %d\n",
- nsid);
- ret = -EINVAL;
- goto out_unlock;
+ info->nsid);
+ goto out_put_ns_head;
+ }
+
+ if (!multipath && !list_empty(&head->list)) {
+ dev_warn(ctrl->device,
+ "Found shared namespace %d, but multipathing not supported.\n",
+ info->nsid);
+ dev_warn_once(ctrl->device,
+ "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
}
}
- list_add_tail(&ns->siblings, &head->list);
+ list_add_tail_rcu(&ns->siblings, &head->list);
ns->head = head;
+ mutex_unlock(&ctrl->subsys->lock);
+ return 0;
+out_put_ns_head:
+ nvme_put_ns_head(head);
out_unlock:
mutex_unlock(&ctrl->subsys->lock);
- if (ret > 0)
- ret = blk_status_to_errno(nvme_error_status(ret));
return ret;
}
-static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
-{
- struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
- struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
-
- return nsa->head->ns_id - nsb->head->ns_id;
-}
-
-static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
struct nvme_ns *ns, *ret = NULL;
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) {
if (ns->head->ns_id == nsid) {
- if (!kref_get_unless_zero(&ns->kref))
+ if (!nvme_get_ns(ns))
continue;
ret = ns;
break;
@@ -3453,177 +4163,240 @@ static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
up_read(&ctrl->namespaces_rwsem);
return ret;
}
+EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
-static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns)
+/*
+ * Add the namespace to the controller list while keeping the list ordered.
+ */
+static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
{
- struct streams_directive_params s;
- int ret;
-
- if (!ctrl->nr_streams)
- return 0;
-
- ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id);
- if (ret)
- return ret;
-
- ns->sws = le32_to_cpu(s.sws);
- ns->sgs = le16_to_cpu(s.sgs);
+ struct nvme_ns *tmp;
- if (ns->sws) {
- unsigned int bs = 1 << ns->lba_shift;
-
- blk_queue_io_min(ns->queue, bs * ns->sws);
- if (ns->sgs)
- blk_queue_io_opt(ns->queue, bs * ns->sws * ns->sgs);
+ list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
+ if (tmp->head->ns_id < ns->head->ns_id) {
+ list_add(&ns->list, &tmp->list);
+ return;
+ }
}
-
- return 0;
+ list_add(&ns->list, &ns->ctrl->namespaces);
}
-static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
{
struct nvme_ns *ns;
struct gendisk *disk;
- struct nvme_id_ns *id;
- char disk_name[DISK_NAME_LEN];
- int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT, ret;
+ int node = ctrl->numa_node;
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
if (!ns)
- return -ENOMEM;
+ return;
- ns->queue = blk_mq_init_queue(ctrl->tagset);
- if (IS_ERR(ns->queue)) {
- ret = PTR_ERR(ns->queue);
+ disk = blk_mq_alloc_disk(ctrl->tagset, ns);
+ if (IS_ERR(disk))
goto out_free_ns;
- }
+ disk->fops = &nvme_bdev_ops;
+ disk->private_data = ns;
+
+ ns->disk = disk;
+ ns->queue = disk->queue;
if (ctrl->opts && ctrl->opts->data_digest)
- ns->queue->backing_dev_info->capabilities
- |= BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
- if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
+ if (ctrl->ops->supports_pci_p2pdma &&
+ ctrl->ops->supports_pci_p2pdma(ctrl))
blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
- ns->queue->queuedata = ns;
ns->ctrl = ctrl;
-
kref_init(&ns->kref);
- ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
-
- blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
- nvme_set_queue_limits(ctrl, ns->queue);
- ret = nvme_identify_ns(ctrl, nsid, &id);
- if (ret)
- goto out_free_queue;
+ if (nvme_init_ns_head(ns, info))
+ goto out_cleanup_disk;
- if (id->ncap == 0) {
- ret = -EINVAL;
- goto out_free_id;
+ /*
+ * If multipathing is enabled, the device name for all disks and not
+ * just those that represent shared namespaces needs to be based on the
+ * subsystem instance. Using the controller instance for private
+ * namespaces could lead to naming collisions between shared and private
+ * namespaces if they don't use a common numbering scheme.
+ *
+ * If multipathing is not enabled, disk names must use the controller
+ * instance as shared namespaces will show up as multiple block
+ * devices.
+ */
+ if (ns->head->disk) {
+ sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
+ ctrl->instance, ns->head->instance);
+ disk->flags |= GENHD_FL_HIDDEN;
+ } else if (multipath) {
+ sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance,
+ ns->head->instance);
+ } else {
+ sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
+ ns->head->instance);
}
- ret = nvme_init_ns_head(ns, nsid, id);
- if (ret)
- goto out_free_id;
- nvme_setup_streams_ns(ctrl, ns);
- nvme_set_disk_name(disk_name, ns, ctrl, &flags);
-
- disk = alloc_disk_node(0, node);
- if (!disk) {
- ret = -ENOMEM;
+ if (nvme_update_ns_info(ns, info))
goto out_unlink_ns;
- }
-
- disk->fops = &nvme_fops;
- disk->private_data = ns;
- disk->queue = ns->queue;
- disk->flags = flags;
- memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
- ns->disk = disk;
-
- __nvme_revalidate_disk(disk, id);
-
- if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
- ret = nvme_nvm_register(ns, disk_name, node);
- if (ret) {
- dev_warn(ctrl->device, "LightNVM init failure\n");
- goto out_put_disk;
- }
- }
down_write(&ctrl->namespaces_rwsem);
- list_add_tail(&ns->list, &ctrl->namespaces);
+ nvme_ns_add_to_ctrl_list(ns);
up_write(&ctrl->namespaces_rwsem);
-
nvme_get_ctrl(ctrl);
- device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
+ if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups))
+ goto out_cleanup_ns_from_list;
+
+ if (!nvme_ns_head_multipath(ns->head))
+ nvme_add_ns_cdev(ns);
- nvme_mpath_add_disk(ns, id);
+ nvme_mpath_add_disk(ns, info->anagrpid);
nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
- kfree(id);
- return 0;
- out_put_disk:
- put_disk(ns->disk);
+ return;
+
+ out_cleanup_ns_from_list:
+ nvme_put_ctrl(ctrl);
+ down_write(&ctrl->namespaces_rwsem);
+ list_del_init(&ns->list);
+ up_write(&ctrl->namespaces_rwsem);
out_unlink_ns:
mutex_lock(&ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
+ if (list_empty(&ns->head->list))
+ list_del_init(&ns->head->entry);
mutex_unlock(&ctrl->subsys->lock);
nvme_put_ns_head(ns->head);
- out_free_id:
- kfree(id);
- out_free_queue:
- blk_cleanup_queue(ns->queue);
+ out_cleanup_disk:
+ put_disk(disk);
out_free_ns:
kfree(ns);
- if (ret > 0)
- ret = blk_status_to_errno(nvme_error_status(ret));
- return ret;
}
static void nvme_ns_remove(struct nvme_ns *ns)
{
+ bool last_path = false;
+
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return;
+ clear_bit(NVME_NS_READY, &ns->flags);
+ set_capacity(ns->disk, 0);
nvme_fault_inject_fini(&ns->fault_inject);
+ /*
+ * Ensure that !NVME_NS_READY is seen by other threads to prevent
+ * this ns going back into current_path.
+ */
+ synchronize_srcu(&ns->head->srcu);
+
+ /* wait for concurrent submissions */
+ if (nvme_mpath_clear_current_path(ns))
+ synchronize_srcu(&ns->head->srcu);
+
mutex_lock(&ns->ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
+ if (list_empty(&ns->head->list)) {
+ list_del_init(&ns->head->entry);
+ last_path = true;
+ }
mutex_unlock(&ns->ctrl->subsys->lock);
- synchronize_rcu(); /* guarantee not available in head->list */
- nvme_mpath_clear_current_path(ns);
- synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
- if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
- del_gendisk(ns->disk);
- blk_cleanup_queue(ns->queue);
- if (blk_get_integrity(ns->disk))
- blk_integrity_unregister(ns->disk);
- }
+ /* guarantee not available in head->list */
+ synchronize_rcu();
+
+ if (!nvme_ns_head_multipath(ns->head))
+ nvme_cdev_del(&ns->cdev, &ns->cdev_device);
+ del_gendisk(ns->disk);
down_write(&ns->ctrl->namespaces_rwsem);
list_del_init(&ns->list);
up_write(&ns->ctrl->namespaces_rwsem);
- nvme_mpath_check_last_path(ns);
+ if (last_path)
+ nvme_mpath_shutdown_disk(ns->head);
nvme_put_ns(ns);
}
-static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
+{
+ struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid);
+
+ if (ns) {
+ nvme_ns_remove(ns);
+ nvme_put_ns(ns);
+ }
+}
+
+static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info)
+{
+ int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
+
+ if (test_bit(NVME_NS_DEAD, &ns->flags))
+ goto out;
+
+ ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) {
+ dev_err(ns->ctrl->device,
+ "identifiers changed for nsid %d\n", ns->head->ns_id);
+ goto out;
+ }
+
+ ret = nvme_update_ns_info(ns, info);
+out:
+ /*
+ * Only remove the namespace if we got a fatal error back from the
+ * device, otherwise ignore the error and just move on.
+ *
+ * TODO: we should probably schedule a delayed retry here.
+ */
+ if (ret > 0 && (ret & NVME_SC_DNR))
+ nvme_ns_remove(ns);
+}
+
+static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
+ struct nvme_ns_info info = { .nsid = nsid };
struct nvme_ns *ns;
+ if (nvme_identify_ns_descs(ctrl, &info))
+ return;
+
+ if (info.ids.csi != NVME_CSI_NVM && !nvme_multi_css(ctrl)) {
+ dev_warn(ctrl->device,
+ "command set not reported for nsid: %d\n", nsid);
+ return;
+ }
+
+ /*
+ * If available try to use the Command Set Idependent Identify Namespace
+ * data structure to find all the generic information that is needed to
+ * set up a namespace. If not fall back to the legacy version.
+ */
+ if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) ||
+ (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS)) {
+ if (nvme_ns_info_from_id_cs_indep(ctrl, &info))
+ return;
+ } else {
+ if (nvme_ns_info_from_identify(ctrl, &info))
+ return;
+ }
+
+ /*
+ * Ignore the namespace if it is not ready. We will get an AEN once it
+ * becomes ready and restart the scan.
+ */
+ if (!info.is_ready)
+ return;
+
ns = nvme_find_get_ns(ctrl, nsid);
if (ns) {
- if (ns->disk && revalidate_disk(ns->disk))
- nvme_ns_remove(ns);
+ nvme_validate_ns(ns, &info);
nvme_put_ns(ns);
- } else
- nvme_alloc_ns(ctrl, nsid);
+ } else {
+ nvme_alloc_ns(ctrl, &info);
+ }
}
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
@@ -3644,39 +4417,44 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
}
-static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
+static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
{
- struct nvme_ns *ns;
+ const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32);
__le32 *ns_list;
- unsigned i, j, nsid, prev = 0;
- unsigned num_lists = DIV_ROUND_UP_ULL((u64)nn, 1024);
- int ret = 0;
+ u32 prev = 0;
+ int ret = 0, i;
+
+ if (nvme_ctrl_limited_cns(ctrl))
+ return -EOPNOTSUPP;
ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
if (!ns_list)
return -ENOMEM;
- for (i = 0; i < num_lists; i++) {
- ret = nvme_identify_ns_list(ctrl, prev, ns_list);
- if (ret)
- goto free;
+ for (;;) {
+ struct nvme_command cmd = {
+ .identify.opcode = nvme_admin_identify,
+ .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST,
+ .identify.nsid = cpu_to_le32(prev),
+ };
- for (j = 0; j < min(nn, 1024U); j++) {
- nsid = le32_to_cpu(ns_list[j]);
- if (!nsid)
- goto out;
+ ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list,
+ NVME_IDENTIFY_DATA_SIZE);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "Identify NS List failed (status=0x%x)\n", ret);
+ goto free;
+ }
- nvme_validate_ns(ctrl, nsid);
+ for (i = 0; i < nr_entries; i++) {
+ u32 nsid = le32_to_cpu(ns_list[i]);
- while (++prev < nsid) {
- ns = nvme_find_get_ns(ctrl, prev);
- if (ns) {
- nvme_ns_remove(ns);
- nvme_put_ns(ns);
- }
- }
+ if (!nsid) /* end of the list? */
+ goto out;
+ nvme_scan_ns(ctrl, nsid);
+ while (++prev < nsid)
+ nvme_ns_remove_by_nsid(ctrl, prev);
}
- nn -= j;
}
out:
nvme_remove_invalid_namespaces(ctrl, prev);
@@ -3685,12 +4463,18 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
return ret;
}
-static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
+static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
{
- unsigned i;
+ struct nvme_id_ctrl *id;
+ u32 nn, i;
+
+ if (nvme_identify_ctrl(ctrl, &id))
+ return;
+ nn = le32_to_cpu(id->nn);
+ kfree(id);
for (i = 1; i <= nn; i++)
- nvme_validate_ns(ctrl, i);
+ nvme_scan_ns(ctrl, i);
nvme_remove_invalid_namespaces(ctrl, nn);
}
@@ -3711,8 +4495,8 @@ static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
* raced with us in reading the log page, which could cause us to miss
* updates.
*/
- error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, log,
- log_size, 0);
+ error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0,
+ NVME_CSI_NVM, log, log_size, 0);
if (error)
dev_warn(ctrl->device,
"reading changed ns log failed: %d\n", error);
@@ -3724,35 +4508,35 @@ static void nvme_scan_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl =
container_of(work, struct nvme_ctrl, scan_work);
- struct nvme_id_ctrl *id;
- unsigned nn;
+ int ret;
/* No tagset on a live ctrl means IO queues could not created */
if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
return;
+ /*
+ * Identify controller limits can change at controller reset due to
+ * new firmware download, even though it is not common we cannot ignore
+ * such scenario. Controller's non-mdts limits are reported in the unit
+ * of logical blocks that is dependent on the format of attached
+ * namespace. Hence re-read the limits at the time of ns allocation.
+ */
+ ret = nvme_init_non_mdts_limits(ctrl);
+ if (ret < 0) {
+ dev_warn(ctrl->device,
+ "reading non-mdts-limits failed: %d\n", ret);
+ return;
+ }
+
if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
dev_info(ctrl->device, "rescanning namespaces.\n");
nvme_clear_changed_ns_log(ctrl);
}
- if (nvme_identify_ctrl(ctrl, &id))
- return;
-
mutex_lock(&ctrl->scan_lock);
- nn = le32_to_cpu(id->nn);
- if (ctrl->vs >= NVME_VS(1, 1, 0) &&
- !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
- if (!nvme_scan_ns_list(ctrl, nn))
- goto out_free_id;
- }
- nvme_scan_ns_sequential(ctrl, nn);
-out_free_id:
+ if (nvme_scan_ns_list(ctrl) != 0)
+ nvme_scan_ns_sequential(ctrl);
mutex_unlock(&ctrl->scan_lock);
- kfree(id);
- down_write(&ctrl->namespaces_rwsem);
- list_sort(NULL, &ctrl->namespaces, ns_cmp);
- up_write(&ctrl->namespaces_rwsem);
}
/*
@@ -3784,6 +4568,9 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
if (ctrl->state == NVME_CTRL_DEAD)
nvme_kill_queues(ctrl);
+ /* this is a no-op when called from the controller reset handler */
+ nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
+
down_write(&ctrl->namespaces_rwsem);
list_splice_init(&ctrl->namespaces, &ns_list);
up_write(&ctrl->namespaces_rwsem);
@@ -3816,10 +4603,22 @@ static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env)
ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
opts->host_traddr ?: "none");
+ if (ret)
+ return ret;
+
+ ret = add_uevent_var(env, "NVME_HOST_IFACE=%s",
+ opts->host_iface ?: "none");
}
return ret;
}
+static void nvme_change_uevent(struct nvme_ctrl *ctrl, char *envdata)
+{
+ char *envp[2] = { envdata, NULL };
+
+ kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
+}
+
static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
{
char *envp[2] = { NULL, NULL };
@@ -3842,7 +4641,14 @@ static void nvme_async_event_work(struct work_struct *work)
container_of(work, struct nvme_ctrl, async_event_work);
nvme_aen_uevent(ctrl);
- ctrl->ops->submit_async_event(ctrl);
+
+ /*
+ * The transport drivers must guarantee AER submission here is safe by
+ * flushing ctrl async_event_work after changing the controller state
+ * from LIVE and before freeing the admin queue.
+ */
+ if (ctrl->state == NVME_CTRL_LIVE)
+ ctrl->ops->submit_async_event(ctrl);
}
static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
@@ -3867,8 +4673,8 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
if (!log)
return;
- if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log,
- sizeof(*log), 0))
+ if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM,
+ log, sizeof(*log), 0))
dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
kfree(log);
}
@@ -3903,11 +4709,24 @@ static void nvme_fw_act_work(struct work_struct *work)
nvme_start_queues(ctrl);
/* read FW slot information to clear the AER */
nvme_get_fw_slot_info(ctrl);
+
+ queue_work(nvme_wq, &ctrl->async_event_work);
+}
+
+static u32 nvme_aer_type(u32 result)
+{
+ return result & 0x7;
+}
+
+static u32 nvme_aer_subtype(u32 result)
+{
+ return (result & 0xff00) >> 8;
}
-static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
+static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{
- u32 aer_notice_type = (result & 0xff00) >> 8;
+ u32 aer_notice_type = nvme_aer_subtype(result);
+ bool requeue = true;
trace_nvme_async_event(ctrl, aer_notice_type);
@@ -3922,8 +4741,11 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
* recovery actions from interfering with the controller's
* firmware activation.
*/
- if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
+ if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
+ nvme_auth_stop(ctrl);
+ requeue = false;
queue_work(nvme_wq, &ctrl->fw_act_work);
+ }
break;
#ifdef CONFIG_NVME_MULTIPATH
case NVME_AER_NOTICE_ANA:
@@ -3938,22 +4760,41 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
default:
dev_warn(ctrl->device, "async event result %08x\n", result);
}
+ return requeue;
+}
+
+static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
+{
+ trace_nvme_async_event(ctrl, NVME_AER_ERROR);
+ dev_warn(ctrl->device, "resetting controller due to AER\n");
+ nvme_reset_ctrl(ctrl);
}
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
volatile union nvme_result *res)
{
u32 result = le32_to_cpu(res->u32);
- u32 aer_type = result & 0x07;
+ u32 aer_type = nvme_aer_type(result);
+ u32 aer_subtype = nvme_aer_subtype(result);
+ bool requeue = true;
if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
return;
switch (aer_type) {
case NVME_AER_NOTICE:
- nvme_handle_aen_notice(ctrl, result);
+ requeue = nvme_handle_aen_notice(ctrl, result);
break;
case NVME_AER_ERROR:
+ /*
+ * For a persistent internal error, don't run async_event_work
+ * to submit a new AER. The controller reset will do it.
+ */
+ if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) {
+ nvme_handle_aer_persistent_error(ctrl);
+ return;
+ }
+ fallthrough;
case NVME_AER_SMART:
case NVME_AER_CSS:
case NVME_AER_VS:
@@ -3963,52 +4804,189 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
default:
break;
}
- queue_work(nvme_wq, &ctrl->async_event_work);
+
+ if (requeue)
+ queue_work(nvme_wq, &ctrl->async_event_work);
}
EXPORT_SYMBOL_GPL(nvme_complete_async_event);
+int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int flags,
+ unsigned int cmd_size)
+{
+ int ret;
+
+ memset(set, 0, sizeof(*set));
+ set->ops = ops;
+ set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+ if (ctrl->ops->flags & NVME_F_FABRICS)
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = ctrl->numa_node;
+ set->flags = flags;
+ set->cmd_size = cmd_size;
+ set->driver_data = ctrl;
+ set->nr_hw_queues = 1;
+ set->timeout = NVME_ADMIN_TIMEOUT;
+ ret = blk_mq_alloc_tag_set(set);
+ if (ret)
+ return ret;
+
+ ctrl->admin_q = blk_mq_init_queue(set);
+ if (IS_ERR(ctrl->admin_q)) {
+ ret = PTR_ERR(ctrl->admin_q);
+ goto out_free_tagset;
+ }
+
+ if (ctrl->ops->flags & NVME_F_FABRICS) {
+ ctrl->fabrics_q = blk_mq_init_queue(set);
+ if (IS_ERR(ctrl->fabrics_q)) {
+ ret = PTR_ERR(ctrl->fabrics_q);
+ goto out_cleanup_admin_q;
+ }
+ }
+
+ ctrl->admin_tagset = set;
+ return 0;
+
+out_cleanup_admin_q:
+ blk_mq_destroy_queue(ctrl->admin_q);
+out_free_tagset:
+ blk_mq_free_tag_set(ctrl->admin_tagset);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
+
+void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
+{
+ blk_mq_destroy_queue(ctrl->admin_q);
+ if (ctrl->ops->flags & NVME_F_FABRICS)
+ blk_mq_destroy_queue(ctrl->fabrics_q);
+ blk_mq_free_tag_set(ctrl->admin_tagset);
+}
+EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
+
+int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int flags,
+ unsigned int cmd_size)
+{
+ int ret;
+
+ memset(set, 0, sizeof(*set));
+ set->ops = ops;
+ set->queue_depth = ctrl->sqsize + 1;
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = ctrl->numa_node;
+ set->flags = flags;
+ set->cmd_size = cmd_size,
+ set->driver_data = ctrl;
+ set->nr_hw_queues = ctrl->queue_count - 1;
+ set->timeout = NVME_IO_TIMEOUT;
+ if (ops->map_queues)
+ set->nr_maps = ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
+ ret = blk_mq_alloc_tag_set(set);
+ if (ret)
+ return ret;
+
+ if (ctrl->ops->flags & NVME_F_FABRICS) {
+ ctrl->connect_q = blk_mq_init_queue(set);
+ if (IS_ERR(ctrl->connect_q)) {
+ ret = PTR_ERR(ctrl->connect_q);
+ goto out_free_tag_set;
+ }
+ }
+
+ ctrl->tagset = set;
+ return 0;
+
+out_free_tag_set:
+ blk_mq_free_tag_set(set);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);
+
+void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->ops->flags & NVME_F_FABRICS)
+ blk_mq_destroy_queue(ctrl->connect_q);
+ blk_mq_free_tag_set(ctrl->tagset);
+}
+EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set);
+
void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{
nvme_mpath_stop(ctrl);
+ nvme_auth_stop(ctrl);
nvme_stop_keep_alive(ctrl);
+ nvme_stop_failfast_work(ctrl);
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fw_act_work);
+ if (ctrl->ops->stop_ctrl)
+ ctrl->ops->stop_ctrl(ctrl);
}
EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
void nvme_start_ctrl(struct nvme_ctrl *ctrl)
{
- if (ctrl->kato)
- nvme_start_keep_alive(ctrl);
+ nvme_start_keep_alive(ctrl);
nvme_enable_aen(ctrl);
+ /*
+ * persistent discovery controllers need to send indication to userspace
+ * to re-read the discovery log page to learn about possible changes
+ * that were missed. We identify persistent discovery controllers by
+ * checking that they started once before, hence are reconnecting back.
+ */
+ if (test_and_set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
+ nvme_discovery_ctrl(ctrl))
+ nvme_change_uevent(ctrl, "NVME_EVENT=rediscover");
+
if (ctrl->queue_count > 1) {
nvme_queue_scan(ctrl);
nvme_start_queues(ctrl);
+ nvme_mpath_update(ctrl);
}
+
+ nvme_change_uevent(ctrl, "NVME_EVENT=connected");
}
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
{
+ nvme_hwmon_exit(ctrl);
nvme_fault_inject_fini(&ctrl->fault_inject);
dev_pm_qos_hide_latency_tolerance(ctrl->device);
cdev_device_del(&ctrl->cdev, ctrl->device);
+ nvme_put_ctrl(ctrl);
}
EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
+static void nvme_free_cels(struct nvme_ctrl *ctrl)
+{
+ struct nvme_effects_log *cel;
+ unsigned long i;
+
+ xa_for_each(&ctrl->cels, i, cel) {
+ xa_erase(&ctrl->cels, i);
+ kfree(cel);
+ }
+
+ xa_destroy(&ctrl->cels);
+}
+
static void nvme_free_ctrl(struct device *dev)
{
struct nvme_ctrl *ctrl =
container_of(dev, struct nvme_ctrl, ctrl_device);
struct nvme_subsystem *subsys = ctrl->subsys;
- if (subsys && ctrl->instance != subsys->instance)
- ida_simple_remove(&nvme_instance_ida, ctrl->instance);
+ if (!subsys || ctrl->instance != subsys->instance)
+ ida_free(&nvme_instance_ida, ctrl->instance);
- kfree(ctrl->effects);
+ nvme_free_cels(ctrl);
nvme_mpath_uninit(ctrl);
+ nvme_auth_stop(ctrl);
+ nvme_auth_free(ctrl);
__free_page(ctrl->discard_page);
if (subsys) {
@@ -4035,13 +5013,16 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
int ret;
ctrl->state = NVME_CTRL_NEW;
+ clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
spin_lock_init(&ctrl->lock);
mutex_init(&ctrl->scan_lock);
INIT_LIST_HEAD(&ctrl->namespaces);
+ xa_init(&ctrl->cels);
init_rwsem(&ctrl->namespaces_rwsem);
ctrl->dev = dev;
ctrl->ops = ops;
ctrl->quirks = quirks;
+ ctrl->numa_node = NUMA_NO_NODE;
INIT_WORK(&ctrl->scan_work, nvme_scan_work);
INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
@@ -4049,6 +5030,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
init_waitqueue_head(&ctrl->state_wq);
INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
+ INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work);
memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
@@ -4060,14 +5042,15 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
goto out;
}
- ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&nvme_instance_ida, GFP_KERNEL);
if (ret < 0)
goto out;
ctrl->instance = ret;
device_initialize(&ctrl->ctrl_device);
ctrl->device = &ctrl->ctrl_device;
- ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance);
+ ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt),
+ ctrl->instance);
ctrl->device->class = nvme_class;
ctrl->device->parent = ctrl->dev;
ctrl->device->groups = nvme_dev_attr_groups;
@@ -4077,6 +5060,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
if (ret)
goto out_release_instance;
+ nvme_get_ctrl(ctrl);
cdev_init(&ctrl->cdev, &nvme_dev_fops);
ctrl->cdev.owner = ops->module;
ret = cdev_device_add(&ctrl->cdev, ctrl->device);
@@ -4092,12 +5076,15 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
min(default_ps_max_latency_us, (unsigned long)S32_MAX));
nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
+ nvme_mpath_init_ctrl(ctrl);
+ nvme_auth_init_ctrl(ctrl);
return 0;
out_free_name:
+ nvme_put_ctrl(ctrl);
kfree_const(ctrl->device->kobj.name);
out_release_instance:
- ida_simple_remove(&nvme_instance_ida, ctrl->instance);
+ ida_free(&nvme_instance_ida, ctrl->instance);
out:
if (ctrl->discard_page)
__free_page(ctrl->discard_page);
@@ -4105,6 +5092,39 @@ out:
}
EXPORT_SYMBOL_GPL(nvme_init_ctrl);
+static void nvme_start_ns_queue(struct nvme_ns *ns)
+{
+ if (test_and_clear_bit(NVME_NS_STOPPED, &ns->flags))
+ blk_mq_unquiesce_queue(ns->queue);
+}
+
+static void nvme_stop_ns_queue(struct nvme_ns *ns)
+{
+ if (!test_and_set_bit(NVME_NS_STOPPED, &ns->flags))
+ blk_mq_quiesce_queue(ns->queue);
+ else
+ blk_mq_wait_quiesce_done(ns->queue);
+}
+
+/*
+ * Prepare a queue for teardown.
+ *
+ * This must forcibly unquiesce queues to avoid blocking dispatch, and only set
+ * the capacity to 0 after that to avoid blocking dispatchers that may be
+ * holding bd_butex. This will end buffered writers dirtying pages that can't
+ * be synced.
+ */
+static void nvme_set_queue_dying(struct nvme_ns *ns)
+{
+ if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
+ return;
+
+ blk_mark_disk_dead(ns->disk);
+ nvme_start_ns_queue(ns);
+
+ set_capacity_and_notify(ns->disk, 0);
+}
+
/**
* nvme_kill_queues(): Ends all namespace queues
* @ctrl: the dead controller that needs to end
@@ -4120,7 +5140,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
/* Forcibly unquiesce queues to avoid blocking dispatch */
if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
- blk_mq_unquiesce_queue(ctrl->admin_q);
+ nvme_start_admin_queue(ctrl);
list_for_each_entry(ns, &ctrl->namespaces, list)
nvme_set_queue_dying(ns);
@@ -4140,7 +5160,7 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_unfreeze);
-void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
+int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
{
struct nvme_ns *ns;
@@ -4151,6 +5171,7 @@ void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
break;
}
up_read(&ctrl->namespaces_rwsem);
+ return timeout;
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
@@ -4182,7 +5203,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list)
- blk_mq_quiesce_queue(ns->queue);
+ nvme_stop_ns_queue(ns);
up_read(&ctrl->namespaces_rwsem);
}
EXPORT_SYMBOL_GPL(nvme_stop_queues);
@@ -4193,13 +5214,28 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list)
- blk_mq_unquiesce_queue(ns->queue);
+ nvme_start_ns_queue(ns);
up_read(&ctrl->namespaces_rwsem);
}
EXPORT_SYMBOL_GPL(nvme_start_queues);
+void nvme_stop_admin_queue(struct nvme_ctrl *ctrl)
+{
+ if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
+ blk_mq_quiesce_queue(ctrl->admin_q);
+ else
+ blk_mq_wait_quiesce_done(ctrl->admin_q);
+}
+EXPORT_SYMBOL_GPL(nvme_stop_admin_queue);
-void nvme_sync_queues(struct nvme_ctrl *ctrl)
+void nvme_start_admin_queue(struct nvme_ctrl *ctrl)
+{
+ if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
+ blk_mq_unquiesce_queue(ctrl->admin_q);
+}
+EXPORT_SYMBOL_GPL(nvme_start_admin_queue);
+
+void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
@@ -4207,12 +5243,25 @@ void nvme_sync_queues(struct nvme_ctrl *ctrl)
list_for_each_entry(ns, &ctrl->namespaces, list)
blk_sync_queue(ns->queue);
up_read(&ctrl->namespaces_rwsem);
+}
+EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
+void nvme_sync_queues(struct nvme_ctrl *ctrl)
+{
+ nvme_sync_io_queues(ctrl);
if (ctrl->admin_q)
blk_sync_queue(ctrl->admin_q);
}
EXPORT_SYMBOL_GPL(nvme_sync_queues);
+struct nvme_ctrl *nvme_ctrl_from_file(struct file *file)
+{
+ if (file->f_op != &nvme_dev_fops)
+ return NULL;
+ return file->private_data;
+}
+EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU);
+
/*
* Check we didn't inadvertently grow the command structure sizes:
*/
@@ -4231,10 +5280,17 @@ static inline void _nvme_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ns_cs_indep) !=
+ NVME_IDENTIFY_DATA_SIZE);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ns_nvm) != NVME_IDENTIFY_DATA_SIZE);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE);
BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior) != 512);
}
@@ -4259,7 +5315,8 @@ static int __init nvme_core_init(void)
if (!nvme_delete_wq)
goto destroy_reset_wq;
- result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme");
+ result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0,
+ NVME_MINORS, "nvme");
if (result < 0)
goto destroy_delete_wq;
@@ -4275,12 +5332,28 @@ static int __init nvme_core_init(void)
result = PTR_ERR(nvme_subsys_class);
goto destroy_class;
}
+
+ result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS,
+ "nvme-generic");
+ if (result < 0)
+ goto destroy_subsys_class;
+
+ nvme_ns_chr_class = class_create(THIS_MODULE, "nvme-generic");
+ if (IS_ERR(nvme_ns_chr_class)) {
+ result = PTR_ERR(nvme_ns_chr_class);
+ goto unregister_generic_ns;
+ }
+
return 0;
+unregister_generic_ns:
+ unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
+destroy_subsys_class:
+ class_destroy(nvme_subsys_class);
destroy_class:
class_destroy(nvme_class);
unregister_chrdev:
- unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
+ unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
destroy_delete_wq:
destroy_workqueue(nvme_delete_wq);
destroy_reset_wq:
@@ -4293,12 +5366,16 @@ out:
static void __exit nvme_core_exit(void)
{
+ class_destroy(nvme_ns_chr_class);
class_destroy(nvme_subsys_class);
class_destroy(nvme_class);
- unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
+ unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
+ unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
destroy_workqueue(nvme_delete_wq);
destroy_workqueue(nvme_reset_wq);
destroy_workqueue(nvme_wq);
+ ida_destroy(&nvme_ns_chr_minor_ida);
+ ida_destroy(&nvme_instance_ida);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 74b8818ac9a1..ce27276f552d 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -49,7 +49,7 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)
goto out_unlock;
kref_init(&host->ref);
- strlcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
+ strscpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
list_add_tail(&host->list, &nvmf_hosts);
out_unlock:
@@ -105,14 +105,17 @@ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
int len = 0;
if (ctrl->opts->mask & NVMF_OPT_TRADDR)
- len += snprintf(buf, size, "traddr=%s", ctrl->opts->traddr);
+ len += scnprintf(buf, size, "traddr=%s", ctrl->opts->traddr);
if (ctrl->opts->mask & NVMF_OPT_TRSVCID)
- len += snprintf(buf + len, size - len, "%strsvcid=%s",
+ len += scnprintf(buf + len, size - len, "%strsvcid=%s",
(len) ? "," : "", ctrl->opts->trsvcid);
if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)
- len += snprintf(buf + len, size - len, "%shost_traddr=%s",
+ len += scnprintf(buf + len, size - len, "%shost_traddr=%s",
(len) ? "," : "", ctrl->opts->host_traddr);
- len += snprintf(buf + len, size - len, "\n");
+ if (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)
+ len += scnprintf(buf + len, size - len, "%shost_iface=%s",
+ (len) ? "," : "", ctrl->opts->host_iface);
+ len += scnprintf(buf + len, size - len, "\n");
return len;
}
@@ -141,17 +144,16 @@ EXPORT_SYMBOL_GPL(nvmf_get_address);
*/
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
{
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
union nvme_result res;
int ret;
- memset(&cmd, 0, sizeof(cmd));
cmd.prop_get.opcode = nvme_fabrics_command;
cmd.prop_get.fctype = nvme_fabrics_type_property_get;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
- NVME_QID_ANY, 0, 0, false);
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
+ NVME_QID_ANY, 0, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -187,18 +189,17 @@ EXPORT_SYMBOL_GPL(nvmf_reg_read32);
*/
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
{
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
union nvme_result res;
int ret;
- memset(&cmd, 0, sizeof(cmd));
cmd.prop_get.opcode = nvme_fabrics_command;
cmd.prop_get.fctype = nvme_fabrics_type_property_get;
cmd.prop_get.attrib = 1;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
- NVME_QID_ANY, 0, 0, false);
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
+ NVME_QID_ANY, 0, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -233,18 +234,17 @@ EXPORT_SYMBOL_GPL(nvmf_reg_read64);
*/
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
{
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
int ret;
- memset(&cmd, 0, sizeof(cmd));
cmd.prop_set.opcode = nvme_fabrics_command;
cmd.prop_set.fctype = nvme_fabrics_type_property_set;
cmd.prop_set.attrib = 0;
cmd.prop_set.offset = cpu_to_le32(off);
cmd.prop_set.value = cpu_to_le64(val);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0,
- NVME_QID_ANY, 0, 0, false);
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
+ NVME_QID_ANY, 0, 0);
if (unlikely(ret))
dev_err(ctrl->device,
"Property Set error: %d, offset %#x\n",
@@ -254,29 +254,30 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
EXPORT_SYMBOL_GPL(nvmf_reg_write32);
/**
- * nvmf_log_connect_error() - Error-parsing-diagnostic print
- * out function for connect() errors.
- *
- * @ctrl: the specific /dev/nvmeX device that had the error.
- *
- * @errval: Error code to be decoded in a more human-friendly
- * printout.
- *
- * @offset: For use with the NVMe error code NVME_SC_CONNECT_INVALID_PARAM.
- *
- * @cmd: This is the SQE portion of a submission capsule.
- *
- * @data: This is the "Data" portion of a submission capsule.
+ * nvmf_log_connect_error() - Error-parsing-diagnostic print out function for
+ * connect() errors.
+ * @ctrl: The specific /dev/nvmeX device that had the error.
+ * @errval: Error code to be decoded in a more human-friendly
+ * printout.
+ * @offset: For use with the NVMe error code
+ * NVME_SC_CONNECT_INVALID_PARAM.
+ * @cmd: This is the SQE portion of a submission capsule.
+ * @data: This is the "Data" portion of a submission capsule.
*/
static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
int errval, int offset, struct nvme_command *cmd,
struct nvmf_connect_data *data)
{
- int err_sctype = errval & (~NVME_SC_DNR);
+ int err_sctype = errval & ~NVME_SC_DNR;
- switch (err_sctype) {
+ if (errval < 0) {
+ dev_err(ctrl->device,
+ "Connect command failed, errno: %d\n", errval);
+ return;
+ }
- case (NVME_SC_CONNECT_INVALID_PARAM):
+ switch (err_sctype) {
+ case NVME_SC_CONNECT_INVALID_PARAM:
if (offset >> 16) {
char *inv_data = "Connect Invalid Data Parameter";
@@ -318,30 +319,34 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
}
}
break;
-
case NVME_SC_CONNECT_INVALID_HOST:
dev_err(ctrl->device,
"Connect for subsystem %s is not allowed, hostnqn: %s\n",
data->subsysnqn, data->hostnqn);
break;
-
case NVME_SC_CONNECT_CTRL_BUSY:
dev_err(ctrl->device,
"Connect command failed: controller is busy or not available\n");
break;
-
case NVME_SC_CONNECT_FORMAT:
dev_err(ctrl->device,
"Connect incompatible format: %d",
cmd->connect.recfmt);
break;
-
+ case NVME_SC_HOST_PATH_ERROR:
+ dev_err(ctrl->device,
+ "Connect command failed: host path error\n");
+ break;
+ case NVME_SC_AUTH_REQUIRED:
+ dev_err(ctrl->device,
+ "Connect command failed: authentication required\n");
+ break;
default:
dev_err(ctrl->device,
"Connect command failed, error wo/DNR bit: %d\n",
err_sctype);
break;
- } /* switch (err_sctype) */
+ }
}
/**
@@ -366,12 +371,12 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
*/
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
{
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
union nvme_result res;
struct nvmf_connect_data *data;
int ret;
+ u32 result;
- memset(&cmd, 0, sizeof(cmd));
cmd.connect.opcode = nvme_fabrics_command;
cmd.connect.fctype = nvme_fabrics_type_connect;
cmd.connect.qid = 0;
@@ -379,10 +384,8 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
/*
* Set keep-alive timeout in seconds granularity (ms * 1000)
- * and add a grace period for controller kato enforcement
*/
- cmd.connect.kato = ctrl->kato ?
- cpu_to_le32((ctrl->kato + NVME_KATO_GRACE) * 1000) : 0;
+ cmd.connect.kato = cpu_to_le32(ctrl->kato * 1000);
if (ctrl->opts->disable_sqflow)
cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
@@ -397,16 +400,33 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
- data, sizeof(*data), 0, NVME_QID_ANY, 1,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, false);
+ data, sizeof(*data), NVME_QID_ANY, 1,
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
goto out_free_data;
}
- ctrl->cntlid = le16_to_cpu(res.u16);
-
+ result = le32_to_cpu(res.u32);
+ ctrl->cntlid = result & 0xFFFF;
+ if ((result >> 16) & 0x3) {
+ /* Authentication required */
+ ret = nvme_auth_negotiate(ctrl, 0);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid 0: authentication setup failed\n");
+ ret = NVME_SC_AUTH_REQUIRED;
+ goto out_free_data;
+ }
+ ret = nvme_auth_wait(ctrl, 0);
+ if (ret)
+ dev_warn(ctrl->device,
+ "qid 0: authentication failed\n");
+ else
+ dev_info(ctrl->device,
+ "qid 0: authenticated\n");
+ }
out_free_data:
kfree(data);
return ret;
@@ -422,7 +442,6 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
* @qid: NVMe I/O queue number for the new I/O connection between
* host and target (note qid == 0 is illegal as this is
* the Admin queue, per NVMe standard).
- * @poll: Whether or not to poll for the completion of the connect cmd.
*
* This function issues a fabrics-protocol connection
* of a NVMe I/O queue (via NVMe Fabrics "Connect" command)
@@ -434,14 +453,14 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
* > 0: NVMe error status code
* < 0: Linux errno error code
*/
-int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
+int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
{
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
struct nvmf_connect_data *data;
union nvme_result res;
int ret;
+ u32 result;
- memset(&cmd, 0, sizeof(cmd));
cmd.connect.opcode = nvme_fabrics_command;
cmd.connect.fctype = nvme_fabrics_type_connect;
cmd.connect.qid = cpu_to_le16(qid);
@@ -460,12 +479,27 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
- data, sizeof(*data), 0, qid, 1,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, poll);
+ data, sizeof(*data), qid, 1,
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
}
+ result = le32_to_cpu(res.u32);
+ if ((result >> 16) & 2) {
+ /* Authentication required */
+ ret = nvme_auth_negotiate(ctrl, qid);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d: authentication setup failed\n", qid);
+ ret = NVME_SC_AUTH_REQUIRED;
+ } else {
+ ret = nvme_auth_wait(ctrl, qid);
+ if (ret)
+ dev_warn(ctrl->device,
+ "qid %u: authentication failed\n", qid);
+ }
+ }
kfree(data);
return ret;
}
@@ -535,63 +569,6 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
return NULL;
}
-/*
- * For something we're not in a state to send to the device the default action
- * is to busy it and retry it after the controller state is recovered. However,
- * if the controller is deleting or if anything is marked for failfast or
- * nvme multipath it is immediately failed.
- *
- * Note: commands used to initialize the controller will be marked for failfast.
- * Note: nvme cli/ioctl commands are marked for failfast.
- */
-blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
- struct request *rq)
-{
- if (ctrl->state != NVME_CTRL_DELETING &&
- ctrl->state != NVME_CTRL_DEAD &&
- !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
- return BLK_STS_RESOURCE;
-
- nvme_req(rq)->status = NVME_SC_HOST_PATH_ERROR;
- blk_mq_start_request(rq);
- nvme_complete_rq(rq);
- return BLK_STS_OK;
-}
-EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command);
-
-bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
- bool queue_live)
-{
- struct nvme_request *req = nvme_req(rq);
-
- /*
- * If we are in some state of setup or teardown only allow
- * internally generated commands.
- */
- if (!blk_rq_is_passthrough(rq) || (req->flags & NVME_REQ_USERCMD))
- return false;
-
- /*
- * Only allow commands on a live queue, except for the connect command,
- * which is require to set the queue live in the appropinquate states.
- */
- switch (ctrl->state) {
- case NVME_CTRL_NEW:
- case NVME_CTRL_CONNECTING:
- if (nvme_is_fabrics(req->cmd) &&
- req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
- return true;
- break;
- default:
- break;
- case NVME_CTRL_DEAD:
- return false;
- }
-
- return queue_live;
-}
-EXPORT_SYMBOL_GPL(__nvmf_check_ready);
-
static const match_table_t opt_tokens = {
{ NVMF_OPT_TRANSPORT, "transport=%s" },
{ NVMF_OPT_TRADDR, "traddr=%s" },
@@ -604,6 +581,7 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_KATO, "keep_alive_tmo=%d" },
{ NVMF_OPT_HOSTNQN, "hostnqn=%s" },
{ NVMF_OPT_HOST_TRADDR, "host_traddr=%s" },
+ { NVMF_OPT_HOST_IFACE, "host_iface=%s" },
{ NVMF_OPT_HOST_ID, "hostid=%s" },
{ NVMF_OPT_DUP_CONNECT, "duplicate_connect" },
{ NVMF_OPT_DISABLE_SQFLOW, "disable_sqflow" },
@@ -612,6 +590,10 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" },
{ NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
{ NVMF_OPT_TOS, "tos=%d" },
+ { NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
+ { NVMF_OPT_DISCOVERY, "discovery" },
+ { NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" },
+ { NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" },
{ NVMF_OPT_ERR, NULL }
};
@@ -629,8 +611,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->queue_size = NVMF_DEF_QUEUE_SIZE;
opts->nr_io_queues = num_online_cpus();
opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
- opts->kato = NVME_DEFAULT_KATO;
+ opts->kato = 0;
opts->duplicate_connect = false;
+ opts->fast_io_fail_tmo = NVMF_DEF_FAIL_FAST_TMO;
opts->hdr_digest = false;
opts->data_digest = false;
opts->tos = -1; /* < 0 == use transport default */
@@ -751,6 +734,20 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
pr_warn("ctrl_loss_tmo < 0 will reconnect forever\n");
ctrl_loss_tmo = token;
break;
+ case NVMF_OPT_FAIL_FAST_TMO:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (token >= 0)
+ pr_warn("I/O fail on reconnect controller after %d sec\n",
+ token);
+ else
+ token = -1;
+
+ opts->fast_io_fail_tmo = token;
+ break;
case NVMF_OPT_HOSTNQN:
if (opts->host) {
pr_err("hostnqn already user-assigned: %s\n",
@@ -771,7 +768,6 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -EINVAL;
goto out;
}
- nvmf_host_put(opts->host);
opts->host = nvmf_host_add(p);
kfree(p);
if (!opts->host) {
@@ -800,6 +796,15 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
kfree(opts->host_traddr);
opts->host_traddr = p;
break;
+ case NVMF_OPT_HOST_IFACE:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ kfree(opts->host_iface);
+ opts->host_iface = p;
+ break;
case NVMF_OPT_HOST_ID:
p = match_strdup(args);
if (!p) {
@@ -867,6 +872,37 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
}
opts->tos = token;
break;
+ case NVMF_OPT_DISCOVERY:
+ opts->discovery_nqn = true;
+ break;
+ case NVMF_OPT_DHCHAP_SECRET:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) {
+ pr_err("Invalid DH-CHAP secret %s\n", p);
+ ret = -EINVAL;
+ goto out;
+ }
+ kfree(opts->dhchap_secret);
+ opts->dhchap_secret = p;
+ break;
+ case NVMF_OPT_DHCHAP_CTRL_SECRET:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) {
+ pr_err("Invalid DH-CHAP secret %s\n", p);
+ ret = -EINVAL;
+ goto out;
+ }
+ kfree(opts->dhchap_ctrl_secret);
+ opts->dhchap_ctrl_secret = p;
+ break;
default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p);
@@ -880,12 +916,19 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->nr_write_queues = 0;
opts->nr_poll_queues = 0;
opts->duplicate_connect = true;
+ } else {
+ if (!opts->kato)
+ opts->kato = NVME_DEFAULT_KATO;
}
- if (ctrl_loss_tmo < 0)
+ if (ctrl_loss_tmo < 0) {
opts->max_reconnects = -1;
- else
+ } else {
opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
opts->reconnect_delay);
+ if (ctrl_loss_tmo < opts->fast_io_fail_tmo)
+ pr_warn("failfast tmo (%d) larger than controller loss tmo (%d)\n",
+ opts->fast_io_fail_tmo, ctrl_loss_tmo);
+ }
if (!opts->host) {
kref_get(&nvmf_default_host->ref);
@@ -903,7 +946,7 @@ static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
unsigned int required_opts)
{
if ((opts->mask & required_opts) != required_opts) {
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
if ((opt_tokens[i].token & required_opts) &&
@@ -928,13 +971,17 @@ bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
return false;
/*
- * Checking the local address is rough. In most cases, none is specified
- * and the host port is selected by the stack.
+ * Checking the local address or host interfaces is rough.
+ *
+ * In most cases, none is specified and the host port or
+ * host interface is selected by the stack.
*
* Assume no match if:
- * - local address is specified and address is not the same
- * - local address is not specified but remote is, or vice versa
- * (admin using specific host_traddr when it matters).
+ * - local address or host interface is specified and address
+ * or host interface is not the same
+ * - local address or host interface is not specified but
+ * remote is, or vice versa (admin using specific
+ * host_traddr/host_iface when it matters).
*/
if ((opts->mask & NVMF_OPT_HOST_TRADDR) &&
(ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
@@ -945,6 +992,15 @@ bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
return false;
}
+ if ((opts->mask & NVMF_OPT_HOST_IFACE) &&
+ (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)) {
+ if (strcmp(opts->host_iface, ctrl->opts->host_iface))
+ return false;
+ } else if ((opts->mask & NVMF_OPT_HOST_IFACE) ||
+ (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)) {
+ return false;
+ }
+
return true;
}
EXPORT_SYMBOL_GPL(nvmf_ip_options_match);
@@ -953,7 +1009,7 @@ static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
unsigned int allowed_opts)
{
if (opts->mask & ~allowed_opts) {
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
if ((opt_tokens[i].token & opts->mask) &&
@@ -977,6 +1033,9 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts)
kfree(opts->trsvcid);
kfree(opts->subsysnqn);
kfree(opts->host_traddr);
+ kfree(opts->host_iface);
+ kfree(opts->dhchap_secret);
+ kfree(opts->dhchap_ctrl_secret);
kfree(opts);
}
EXPORT_SYMBOL_GPL(nvmf_free_options);
@@ -985,7 +1044,9 @@ EXPORT_SYMBOL_GPL(nvmf_free_options);
#define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
- NVMF_OPT_DISABLE_SQFLOW)
+ NVMF_OPT_DISABLE_SQFLOW | NVMF_OPT_DISCOVERY |\
+ NVMF_OPT_FAIL_FAST_TMO | NVMF_OPT_DHCHAP_SECRET |\
+ NVMF_OPT_DHCHAP_CTRL_SECRET)
static struct nvme_ctrl *
nvmf_create_ctrl(struct device *dev, const char *buf)
@@ -1097,15 +1158,34 @@ out_unlock:
return ret ? ret : count;
}
+static void __nvmf_concat_opt_tokens(struct seq_file *seq_file)
+{
+ const struct match_token *tok;
+ int idx;
+
+ /*
+ * Add dummy entries for instance and cntlid to
+ * signal an invalid/non-existing controller
+ */
+ seq_puts(seq_file, "instance=-1,cntlid=-1");
+ for (idx = 0; idx < ARRAY_SIZE(opt_tokens); idx++) {
+ tok = &opt_tokens[idx];
+ if (tok->token == NVMF_OPT_ERR)
+ continue;
+ seq_puts(seq_file, ",");
+ seq_puts(seq_file, tok->pattern);
+ }
+ seq_puts(seq_file, "\n");
+}
+
static int nvmf_dev_show(struct seq_file *seq_file, void *private)
{
struct nvme_ctrl *ctrl;
- int ret = 0;
mutex_lock(&nvmf_dev_mutex);
ctrl = seq_file->private;
if (!ctrl) {
- ret = -EINVAL;
+ __nvmf_concat_opt_tokens(seq_file);
goto out_unlock;
}
@@ -1114,7 +1194,7 @@ static int nvmf_dev_show(struct seq_file *seq_file, void *private)
out_unlock:
mutex_unlock(&nvmf_dev_mutex);
- return ret;
+ return 0;
}
static int nvmf_dev_open(struct inode *inode, struct file *file)
@@ -1169,7 +1249,7 @@ static int __init nvmf_init(void)
nvmf_device =
device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
if (IS_ERR(nvmf_device)) {
- pr_err("couldn't create nvme-fabris device!\n");
+ pr_err("couldn't create nvme-fabrics device!\n");
ret = PTR_ERR(nvmf_device);
goto out_destroy_class;
}
@@ -1202,7 +1282,14 @@ static void __exit nvmf_exit(void)
BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_send_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_receive_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_negotiate_data) != 8);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_challenge_data) != 16);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_reply_data) != 16);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success1_data) != 16);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success2_data) != 16);
}
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index a0ec40ab62ee..a6e22116e139 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -15,6 +15,15 @@
#define NVMF_DEF_RECONNECT_DELAY 10
/* default to 600 seconds of reconnect attempts before giving up */
#define NVMF_DEF_CTRL_LOSS_TMO 600
+/* default is -1: the fail fast mechanism is disabled */
+#define NVMF_DEF_FAIL_FAST_TMO -1
+
+/*
+ * Reserved one command for internal usage. This command is used for sending
+ * the connect command, as well as for the keep alive command on the admin
+ * queue once live.
+ */
+#define NVMF_RESERVED_TAGS 1
/*
* Define a host as seen by the target. We allocate one at boot, but also
@@ -56,6 +65,11 @@ enum {
NVMF_OPT_NR_WRITE_QUEUES = 1 << 17,
NVMF_OPT_NR_POLL_QUEUES = 1 << 18,
NVMF_OPT_TOS = 1 << 19,
+ NVMF_OPT_FAIL_FAST_TMO = 1 << 20,
+ NVMF_OPT_HOST_IFACE = 1 << 21,
+ NVMF_OPT_DISCOVERY = 1 << 22,
+ NVMF_OPT_DHCHAP_SECRET = 1 << 23,
+ NVMF_OPT_DHCHAP_CTRL_SECRET = 1 << 24,
};
/**
@@ -73,7 +87,9 @@ enum {
* @trsvcid: The transport-specific TRSVCID field for a port on the
* subsystem which is adding a controller.
* @host_traddr: A transport-specific field identifying the NVME host port
- * to use for the connection to the controller.
+ * to use for the connection to the controller.
+ * @host_iface: A transport-specific field identifying the NVME host
+ * interface to use for the connection to the controller.
* @queue_size: Number of IO queue elements.
* @nr_io_queues: Number of controller IO queues that will be established.
* @reconnect_delay: Time between two consecutive reconnect attempts.
@@ -83,12 +99,16 @@ enum {
* @max_reconnects: maximum number of allowed reconnect attempts before removing
* the controller, (-1) means reconnect forever, zero means remove
* immediately;
+ * @dhchap_secret: DH-HMAC-CHAP secret
+ * @dhchap_ctrl_secret: DH-HMAC-CHAP controller secret for bi-directional
+ * authentication
* @disable_sqflow: disable controller sq flow control
* @hdr_digest: generate/verify header digest (TCP)
* @data_digest: generate/verify data digest (TCP)
* @nr_write_queues: number of queues for write I/O
* @nr_poll_queues: number of queues for polling I/O
* @tos: type of service
+ * @fast_io_fail_tmo: Fast I/O fail timeout in seconds
*/
struct nvmf_ctrl_options {
unsigned mask;
@@ -97,6 +117,7 @@ struct nvmf_ctrl_options {
char *traddr;
char *trsvcid;
char *host_traddr;
+ char *host_iface;
size_t queue_size;
unsigned int nr_io_queues;
unsigned int reconnect_delay;
@@ -105,12 +126,15 @@ struct nvmf_ctrl_options {
unsigned int kato;
struct nvmf_host *host;
int max_reconnects;
+ char *dhchap_secret;
+ char *dhchap_ctrl_secret;
bool disable_sqflow;
bool hdr_digest;
bool data_digest;
unsigned int nr_write_queues;
unsigned int nr_poll_queues;
int tos;
+ int fast_io_fail_tmo;
};
/*
@@ -153,6 +177,7 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
struct nvmf_ctrl_options *opts)
{
if (ctrl->state == NVME_CTRL_DELETING ||
+ ctrl->state == NVME_CTRL_DELETING_NOIO ||
ctrl->state == NVME_CTRL_DEAD ||
strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
@@ -162,29 +187,32 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
return true;
}
+static inline char *nvmf_ctrl_subsysnqn(struct nvme_ctrl *ctrl)
+{
+ if (!ctrl->subsys)
+ return ctrl->opts->subsysnqn;
+ return ctrl->subsys->subnqn;
+}
+
+static inline void nvmf_complete_timed_out_request(struct request *rq)
+{
+ if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
+ nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
+ blk_mq_complete_request(rq);
+ }
+}
+
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl);
-int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll);
+int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid);
int nvmf_register_transport(struct nvmf_transport_ops *ops);
void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
void nvmf_free_options(struct nvmf_ctrl_options *opts);
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
-blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
- struct request *rq);
-bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
- bool queue_live);
bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
struct nvmf_ctrl_options *opts);
-static inline bool nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
- bool queue_live)
-{
- if (likely(ctrl->state == NVME_CTRL_LIVE))
- return true;
- return __nvmf_check_ready(ctrl, rq, queue_live);
-}
-
#endif /* _NVME_FABRICS_H */
diff --git a/drivers/nvme/host/fault_inject.c b/drivers/nvme/host/fault_inject.c
index 1352159733b0..83d2e6860d38 100644
--- a/drivers/nvme/host/fault_inject.c
+++ b/drivers/nvme/host/fault_inject.c
@@ -56,7 +56,7 @@ void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject)
void nvme_should_fail(struct request *req)
{
- struct gendisk *disk = req->rq_disk;
+ struct gendisk *disk = req->q->disk;
struct nvme_fault_inject *fault_inject = NULL;
u16 status;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 5a70ac395d53..5d57a042dbca 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -9,12 +9,14 @@
#include <uapi/scsi/fc/fc_els.h>
#include <linux/delay.h>
#include <linux/overflow.h>
-
+#include <linux/blk-cgroup.h>
#include "nvme.h"
#include "fabrics.h"
#include <linux/nvme-fc-driver.h>
#include <linux/nvme-fc.h>
+#include "fc.h"
#include <scsi/scsi_transport_fc.h>
+#include <linux/blk-mq-pci.h>
/* *************************** Data Structures/Defines ****************** */
@@ -25,6 +27,10 @@ enum nvme_fc_queue_flags {
};
#define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
+#define NVME_FC_DEFAULT_RECONNECT_TMO 2 /* delay between reconnects
+ * when connected and a
+ * connection failure.
+ */
struct nvme_fc_queue {
struct nvme_fc_ctrl *ctrl;
@@ -61,6 +67,17 @@ struct nvmefc_ls_req_op {
bool req_queued;
};
+struct nvmefc_ls_rcv_op {
+ struct nvme_fc_rport *rport;
+ struct nvmefc_ls_rsp *lsrsp;
+ union nvmefc_ls_requests *rqstbuf;
+ union nvmefc_ls_responses *rspbuf;
+ u16 rqstdatalen;
+ bool handled;
+ dma_addr_t rspdma;
+ struct list_head lsrcv_list; /* rport->ls_rcv_list */
+} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
+
enum nvme_fcpop_state {
FCPOP_STATE_UNINIT = 0,
FCPOP_STATE_IDLE = 1,
@@ -96,7 +113,7 @@ struct nvme_fc_fcp_op {
struct nvme_fcp_op_w_sgl {
struct nvme_fc_fcp_op op;
struct scatterlist sgl[NVME_INLINE_SG_CNT];
- uint8_t priv[0];
+ uint8_t priv[];
};
struct nvme_fc_lport {
@@ -117,6 +134,7 @@ struct nvme_fc_rport {
struct list_head endp_list; /* for lport->endp_list */
struct list_head ctrl_list;
struct list_head ls_req_list;
+ struct list_head ls_rcv_list;
struct list_head disc_list;
struct device *dev; /* physical device for dma */
struct nvme_fc_lport *lport;
@@ -124,11 +142,13 @@ struct nvme_fc_rport {
struct kref ref;
atomic_t act_ctrl_cnt;
unsigned long dev_loss_end;
+ struct work_struct lsrcv_work;
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
-enum nvme_fcctrl_flags {
- FCCTRL_TERMIO = (1 << 0),
-};
+/* fc_ctrl flags values - specified as bit positions */
+#define ASSOC_ACTIVE 0
+#define ASSOC_FAILED 1
+#define FCCTRL_TERMIO 2
struct nvme_fc_ctrl {
spinlock_t lock;
@@ -139,20 +159,19 @@ struct nvme_fc_ctrl {
u32 cnum;
bool ioq_live;
- bool assoc_active;
- atomic_t err_work_active;
u64 association_id;
+ struct nvmefc_ls_rcv_op *rcv_disconn;
struct list_head ctrl_list; /* rport->ctrl_list */
struct blk_mq_tag_set admin_tag_set;
struct blk_mq_tag_set tag_set;
+ struct work_struct ioerr_work;
struct delayed_work connect_work;
- struct work_struct err_work;
struct kref ref;
- u32 flags;
+ unsigned long flags;
u32 iocnt;
wait_queue_head_t ioabort_wait;
@@ -213,12 +232,16 @@ static DECLARE_COMPLETION(nvme_fc_unload_proceed);
*/
static struct device *fc_udev_device;
+static void nvme_fc_complete_rq(struct request *rq);
/* *********************** FC-NVME Port Management ************************ */
static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
struct nvme_fc_queue *, unsigned int);
+static void nvme_fc_handle_ls_rqst_work(struct work_struct *work);
+
+
static void
nvme_fc_free_lport(struct kref *ref)
{
@@ -236,7 +259,7 @@ nvme_fc_free_lport(struct kref *ref)
complete(&nvme_fc_unload_proceed);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
- ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
+ ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num);
ida_destroy(&lport->endp_cnt);
put_device(lport->dev);
@@ -342,8 +365,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
!template->ls_req || !template->fcp_io ||
!template->ls_abort || !template->fcp_abort ||
!template->max_hw_queues || !template->max_sgl_segments ||
- !template->max_dif_sgl_segments || !template->dma_boundary ||
- !template->module) {
+ !template->max_dif_sgl_segments || !template->dma_boundary) {
ret = -EINVAL;
goto out_reghost_failed;
}
@@ -377,7 +399,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
goto out_reghost_failed;
}
- idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
+ idx = ida_alloc(&nvme_fc_local_port_cnt, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
goto out_fail_kfree;
@@ -395,7 +417,10 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
newrec->ops = template;
newrec->dev = dev;
ida_init(&newrec->endp_cnt);
- newrec->localport.private = &newrec[1];
+ if (template->local_priv_sz)
+ newrec->localport.private = &newrec[1];
+ else
+ newrec->localport.private = NULL;
newrec->localport.node_name = pinfo->node_name;
newrec->localport.port_name = pinfo->port_name;
newrec->localport.port_role = pinfo->port_role;
@@ -414,7 +439,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
return 0;
out_ida_put:
- ida_simple_remove(&nvme_fc_local_port_cnt, idx);
+ ida_free(&nvme_fc_local_port_cnt, idx);
out_fail_kfree:
kfree(newrec);
out_reghost_failed:
@@ -510,7 +535,7 @@ nvme_fc_free_rport(struct kref *ref)
spin_unlock_irqrestore(&nvme_fc_lock, flags);
WARN_ON(!list_empty(&rport->disc_list));
- ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
+ ida_free(&lport->endp_cnt, rport->remoteport.port_num);
kfree(rport);
@@ -688,7 +713,7 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
goto out_lport_put;
}
- idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
+ idx = ida_alloc(&lport->endp_cnt, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
goto out_kfree_rport;
@@ -702,9 +727,13 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
atomic_set(&newrec->act_ctrl_cnt, 0);
spin_lock_init(&newrec->lock);
newrec->remoteport.localport = &lport->localport;
+ INIT_LIST_HEAD(&newrec->ls_rcv_list);
newrec->dev = lport->dev;
newrec->lport = lport;
- newrec->remoteport.private = &newrec[1];
+ if (lport->ops->remote_priv_sz)
+ newrec->remoteport.private = &newrec[1];
+ else
+ newrec->remoteport.private = NULL;
newrec->remoteport.port_role = pinfo->port_role;
newrec->remoteport.node_name = pinfo->node_name;
newrec->remoteport.port_name = pinfo->port_name;
@@ -712,6 +741,7 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
newrec->remoteport.port_num = idx;
__nvme_fc_set_dev_loss_tmo(newrec, pinfo);
+ INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work);
spin_lock_irqsave(&nvme_fc_lock, flags);
list_add_tail(&newrec->endp_list, &lport->endp_list);
@@ -801,6 +831,7 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
break;
case NVME_CTRL_DELETING:
+ case NVME_CTRL_DELETING_NOIO:
default:
/* no action to take - let it delete */
break;
@@ -1001,6 +1032,7 @@ fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
+static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
static void
__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
@@ -1141,41 +1173,6 @@ nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
return __nvme_fc_send_ls_req(rport, lsop, done);
}
-/* Validation Error indexes into the string table below */
-enum {
- VERR_NO_ERROR = 0,
- VERR_LSACC = 1,
- VERR_LSDESC_RQST = 2,
- VERR_LSDESC_RQST_LEN = 3,
- VERR_ASSOC_ID = 4,
- VERR_ASSOC_ID_LEN = 5,
- VERR_CONN_ID = 6,
- VERR_CONN_ID_LEN = 7,
- VERR_CR_ASSOC = 8,
- VERR_CR_ASSOC_ACC_LEN = 9,
- VERR_CR_CONN = 10,
- VERR_CR_CONN_ACC_LEN = 11,
- VERR_DISCONN = 12,
- VERR_DISCONN_ACC_LEN = 13,
-};
-
-static char *validation_errors[] = {
- "OK",
- "Not LS_ACC",
- "Not LSDESC_RQST",
- "Bad LSDESC_RQST Length",
- "Not Association ID",
- "Bad Association ID Length",
- "Not Connection ID",
- "Bad Connection ID Length",
- "Not CR_ASSOC Rqst",
- "Bad CR_ASSOC ACC Length",
- "Not CR_CONN Rqst",
- "Bad CR_CONN ACC Length",
- "Not Disconnect Rqst",
- "Bad Disconnect ACC Length",
-};
-
static int
nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
@@ -1184,21 +1181,27 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
struct nvmefc_ls_req *lsreq;
struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
struct fcnvme_ls_cr_assoc_acc *assoc_acc;
+ unsigned long flags;
int ret, fcret = 0;
lsop = kzalloc((sizeof(*lsop) +
- ctrl->lport->ops->lsrqst_priv_sz +
- sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
+ sizeof(*assoc_rqst) + sizeof(*assoc_acc) +
+ ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
if (!lsop) {
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: send Create Association failed: ENOMEM\n",
+ ctrl->cnum);
ret = -ENOMEM;
goto out_no_memory;
}
- lsreq = &lsop->ls_req;
- lsreq->private = (void *)&lsop[1];
- assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
- (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
+ assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)&lsop[1];
assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
+ lsreq = &lsop->ls_req;
+ if (ctrl->lport->ops->lsrqst_priv_sz)
+ lsreq->private = &assoc_acc[1];
+ else
+ lsreq->private = NULL;
assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
assoc_rqst->desc_list_len =
@@ -1268,11 +1271,13 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
"q %d Create Association LS failed: %s\n",
queue->qnum, validation_errors[fcret]);
} else {
+ spin_lock_irqsave(&ctrl->lock, flags);
ctrl->association_id =
be64_to_cpu(assoc_acc->associd.association_id);
queue->connection_id =
be64_to_cpu(assoc_acc->connectid.connection_id);
set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
+ spin_unlock_irqrestore(&ctrl->lock, flags);
}
out_free_buffer:
@@ -1296,18 +1301,23 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
int ret, fcret = 0;
lsop = kzalloc((sizeof(*lsop) +
- ctrl->lport->ops->lsrqst_priv_sz +
- sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
+ sizeof(*conn_rqst) + sizeof(*conn_acc) +
+ ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
if (!lsop) {
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: send Create Connection failed: ENOMEM\n",
+ ctrl->cnum);
ret = -ENOMEM;
goto out_no_memory;
}
- lsreq = &lsop->ls_req;
- lsreq->private = (void *)&lsop[1];
- conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
- (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
+ conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)&lsop[1];
conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
+ lsreq = &lsop->ls_req;
+ if (ctrl->lport->ops->lsrqst_priv_sz)
+ lsreq->private = (void *)&conn_acc[1];
+ else
+ lsreq->private = NULL;
conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
conn_rqst->desc_list_len = cpu_to_be32(
@@ -1421,54 +1431,385 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
int ret;
lsop = kzalloc((sizeof(*lsop) +
- ctrl->lport->ops->lsrqst_priv_sz +
- sizeof(*discon_rqst) + sizeof(*discon_acc)),
- GFP_KERNEL);
- if (!lsop)
- /* couldn't sent it... too bad */
+ sizeof(*discon_rqst) + sizeof(*discon_acc) +
+ ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
+ if (!lsop) {
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: send Disconnect Association "
+ "failed: ENOMEM\n",
+ ctrl->cnum);
return;
+ }
+ discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
+ discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
lsreq = &lsop->ls_req;
+ if (ctrl->lport->ops->lsrqst_priv_sz)
+ lsreq->private = (void *)&discon_acc[1];
+ else
+ lsreq->private = NULL;
- lsreq->private = (void *)&lsop[1];
- discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)
- (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
- discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
+ nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
+ ctrl->association_id);
- discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT_ASSOC;
- discon_rqst->desc_list_len = cpu_to_be32(
- sizeof(struct fcnvme_lsdesc_assoc_id) +
- sizeof(struct fcnvme_lsdesc_disconn_cmd));
+ ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
+ nvme_fc_disconnect_assoc_done);
+ if (ret)
+ kfree(lsop);
+}
- discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
- discon_rqst->associd.desc_len =
- fcnvme_lsdesc_len(
- sizeof(struct fcnvme_lsdesc_assoc_id));
+static void
+nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
+{
+ struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private;
+ struct nvme_fc_rport *rport = lsop->rport;
+ struct nvme_fc_lport *lport = rport->lport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rport->lock, flags);
+ list_del(&lsop->lsrcv_list);
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma,
+ sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
+ fc_dma_unmap_single(lport->dev, lsop->rspdma,
+ sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
+
+ kfree(lsop);
+
+ nvme_fc_rport_put(rport);
+}
+
+static void
+nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop)
+{
+ struct nvme_fc_rport *rport = lsop->rport;
+ struct nvme_fc_lport *lport = rport->lport;
+ struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
+ int ret;
+
+ fc_dma_sync_single_for_device(lport->dev, lsop->rspdma,
+ sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
+
+ ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport,
+ lsop->lsrsp);
+ if (ret) {
+ dev_warn(lport->dev,
+ "LLDD rejected LS RSP xmt: LS %d status %d\n",
+ w0->ls_cmd, ret);
+ nvme_fc_xmt_ls_rsp_done(lsop->lsrsp);
+ return;
+ }
+}
+
+static struct nvme_fc_ctrl *
+nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport,
+ struct nvmefc_ls_rcv_op *lsop)
+{
+ struct fcnvme_ls_disconnect_assoc_rqst *rqst =
+ &lsop->rqstbuf->rq_dis_assoc;
+ struct nvme_fc_ctrl *ctrl, *ret = NULL;
+ struct nvmefc_ls_rcv_op *oldls = NULL;
+ u64 association_id = be64_to_cpu(rqst->associd.association_id);
+ unsigned long flags;
- discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
+ spin_lock_irqsave(&rport->lock, flags);
- discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
- FCNVME_LSDESC_DISCONN_CMD);
- discon_rqst->discon_cmd.desc_len =
+ list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
+ if (!nvme_fc_ctrl_get(ctrl))
+ continue;
+ spin_lock(&ctrl->lock);
+ if (association_id == ctrl->association_id) {
+ oldls = ctrl->rcv_disconn;
+ ctrl->rcv_disconn = lsop;
+ ret = ctrl;
+ }
+ spin_unlock(&ctrl->lock);
+ if (ret)
+ /* leave the ctrl get reference */
+ break;
+ nvme_fc_ctrl_put(ctrl);
+ }
+
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ /* transmit a response for anything that was pending */
+ if (oldls) {
+ dev_info(rport->lport->dev,
+ "NVME-FC{%d}: Multiple Disconnect Association "
+ "LS's received\n", ctrl->cnum);
+ /* overwrite good response with bogus failure */
+ oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
+ sizeof(*oldls->rspbuf),
+ rqst->w0.ls_cmd,
+ FCNVME_RJT_RC_UNAB,
+ FCNVME_RJT_EXP_NONE, 0);
+ nvme_fc_xmt_ls_rsp(oldls);
+ }
+
+ return ret;
+}
+
+/*
+ * returns true to mean LS handled and ls_rsp can be sent
+ * returns false to defer ls_rsp xmt (will be done as part of
+ * association termination)
+ */
+static bool
+nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop)
+{
+ struct nvme_fc_rport *rport = lsop->rport;
+ struct fcnvme_ls_disconnect_assoc_rqst *rqst =
+ &lsop->rqstbuf->rq_dis_assoc;
+ struct fcnvme_ls_disconnect_assoc_acc *acc =
+ &lsop->rspbuf->rsp_dis_assoc;
+ struct nvme_fc_ctrl *ctrl = NULL;
+ int ret = 0;
+
+ memset(acc, 0, sizeof(*acc));
+
+ ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst);
+ if (!ret) {
+ /* match an active association */
+ ctrl = nvme_fc_match_disconn_ls(rport, lsop);
+ if (!ctrl)
+ ret = VERR_NO_ASSOC;
+ }
+
+ if (ret) {
+ dev_info(rport->lport->dev,
+ "Disconnect LS failed: %s\n",
+ validation_errors[ret]);
+ lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc,
+ sizeof(*acc), rqst->w0.ls_cmd,
+ (ret == VERR_NO_ASSOC) ?
+ FCNVME_RJT_RC_INV_ASSOC :
+ FCNVME_RJT_RC_LOGIC,
+ FCNVME_RJT_EXP_NONE, 0);
+ return true;
+ }
+
+ /* format an ACCept response */
+
+ lsop->lsrsp->rsplen = sizeof(*acc);
+
+ nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
fcnvme_lsdesc_len(
- sizeof(struct fcnvme_lsdesc_disconn_cmd));
+ sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
+ FCNVME_LS_DISCONNECT_ASSOC);
- lsreq->rqstaddr = discon_rqst;
- lsreq->rqstlen = sizeof(*discon_rqst);
- lsreq->rspaddr = discon_acc;
- lsreq->rsplen = sizeof(*discon_acc);
- lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
+ /*
+ * the transmit of the response will occur after the exchanges
+ * for the association have been ABTS'd by
+ * nvme_fc_delete_association().
+ */
- ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
- nvme_fc_disconnect_assoc_done);
- if (ret)
- kfree(lsop);
+ /* fail the association */
+ nvme_fc_error_recovery(ctrl, "Disconnect Association LS received");
+
+ /* release the reference taken by nvme_fc_match_disconn_ls() */
+ nvme_fc_ctrl_put(ctrl);
+
+ return false;
}
+/*
+ * Actual Processing routine for received FC-NVME LS Requests from the LLD
+ * returns true if a response should be sent afterward, false if rsp will
+ * be sent asynchronously.
+ */
+static bool
+nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop)
+{
+ struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
+ bool ret = true;
-/* *********************** NVME Ctrl Routines **************************** */
+ lsop->lsrsp->nvme_fc_private = lsop;
+ lsop->lsrsp->rspbuf = lsop->rspbuf;
+ lsop->lsrsp->rspdma = lsop->rspdma;
+ lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done;
+ /* Be preventative. handlers will later set to valid length */
+ lsop->lsrsp->rsplen = 0;
-static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
+ /*
+ * handlers:
+ * parse request input, execute the request, and format the
+ * LS response
+ */
+ switch (w0->ls_cmd) {
+ case FCNVME_LS_DISCONNECT_ASSOC:
+ ret = nvme_fc_ls_disconnect_assoc(lsop);
+ break;
+ case FCNVME_LS_DISCONNECT_CONN:
+ lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
+ sizeof(*lsop->rspbuf), w0->ls_cmd,
+ FCNVME_RJT_RC_UNSUP, FCNVME_RJT_EXP_NONE, 0);
+ break;
+ case FCNVME_LS_CREATE_ASSOCIATION:
+ case FCNVME_LS_CREATE_CONNECTION:
+ lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
+ sizeof(*lsop->rspbuf), w0->ls_cmd,
+ FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0);
+ break;
+ default:
+ lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
+ sizeof(*lsop->rspbuf), w0->ls_cmd,
+ FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
+ break;
+ }
+
+ return(ret);
+}
+
+static void
+nvme_fc_handle_ls_rqst_work(struct work_struct *work)
+{
+ struct nvme_fc_rport *rport =
+ container_of(work, struct nvme_fc_rport, lsrcv_work);
+ struct fcnvme_ls_rqst_w0 *w0;
+ struct nvmefc_ls_rcv_op *lsop;
+ unsigned long flags;
+ bool sendrsp;
+
+restart:
+ sendrsp = true;
+ spin_lock_irqsave(&rport->lock, flags);
+ list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) {
+ if (lsop->handled)
+ continue;
+
+ lsop->handled = true;
+ if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
+ spin_unlock_irqrestore(&rport->lock, flags);
+ sendrsp = nvme_fc_handle_ls_rqst(lsop);
+ } else {
+ spin_unlock_irqrestore(&rport->lock, flags);
+ w0 = &lsop->rqstbuf->w0;
+ lsop->lsrsp->rsplen = nvme_fc_format_rjt(
+ lsop->rspbuf,
+ sizeof(*lsop->rspbuf),
+ w0->ls_cmd,
+ FCNVME_RJT_RC_UNAB,
+ FCNVME_RJT_EXP_NONE, 0);
+ }
+ if (sendrsp)
+ nvme_fc_xmt_ls_rsp(lsop);
+ goto restart;
+ }
+ spin_unlock_irqrestore(&rport->lock, flags);
+}
+
+/**
+ * nvme_fc_rcv_ls_req - transport entry point called by an LLDD
+ * upon the reception of a NVME LS request.
+ *
+ * The nvme-fc layer will copy payload to an internal structure for
+ * processing. As such, upon completion of the routine, the LLDD may
+ * immediately free/reuse the LS request buffer passed in the call.
+ *
+ * If this routine returns error, the LLDD should abort the exchange.
+ *
+ * @portptr: pointer to the (registered) remote port that the LS
+ * was received from. The remoteport is associated with
+ * a specific localport.
+ * @lsrsp: pointer to a nvmefc_ls_rsp response structure to be
+ * used to reference the exchange corresponding to the LS
+ * when issuing an ls response.
+ * @lsreqbuf: pointer to the buffer containing the LS Request
+ * @lsreqbuf_len: length, in bytes, of the received LS request
+ */
+int
+nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr,
+ struct nvmefc_ls_rsp *lsrsp,
+ void *lsreqbuf, u32 lsreqbuf_len)
+{
+ struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
+ struct nvme_fc_lport *lport = rport->lport;
+ struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
+ struct nvmefc_ls_rcv_op *lsop;
+ unsigned long flags;
+ int ret;
+
+ nvme_fc_rport_get(rport);
+
+ /* validate there's a routine to transmit a response */
+ if (!lport->ops->xmt_ls_rsp) {
+ dev_info(lport->dev,
+ "RCV %s LS failed: no LLDD xmt_ls_rsp\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
+ ret = -EINVAL;
+ goto out_put;
+ }
+
+ if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
+ dev_info(lport->dev,
+ "RCV %s LS failed: payload too large\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
+ ret = -E2BIG;
+ goto out_put;
+ }
+
+ lsop = kzalloc(sizeof(*lsop) +
+ sizeof(union nvmefc_ls_requests) +
+ sizeof(union nvmefc_ls_responses),
+ GFP_KERNEL);
+ if (!lsop) {
+ dev_info(lport->dev,
+ "RCV %s LS failed: No memory\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
+ ret = -ENOMEM;
+ goto out_put;
+ }
+ lsop->rqstbuf = (union nvmefc_ls_requests *)&lsop[1];
+ lsop->rspbuf = (union nvmefc_ls_responses *)&lsop->rqstbuf[1];
+
+ lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf,
+ sizeof(*lsop->rspbuf),
+ DMA_TO_DEVICE);
+ if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) {
+ dev_info(lport->dev,
+ "RCV %s LS failed: DMA mapping failure\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
+ ret = -EFAULT;
+ goto out_free;
+ }
+
+ lsop->rport = rport;
+ lsop->lsrsp = lsrsp;
+
+ memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len);
+ lsop->rqstdatalen = lsreqbuf_len;
+
+ spin_lock_irqsave(&rport->lock, flags);
+ if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) {
+ spin_unlock_irqrestore(&rport->lock, flags);
+ ret = -ENOTCONN;
+ goto out_unmap;
+ }
+ list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list);
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ schedule_work(&rport->lsrcv_work);
+
+ return 0;
+
+out_unmap:
+ fc_dma_unmap_single(lport->dev, lsop->rspdma,
+ sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
+out_free:
+ kfree(lsop);
+out_put:
+ nvme_fc_rport_put(rport);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req);
+
+
+/* *********************** NVME Ctrl Routines **************************** */
static void
__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
@@ -1488,7 +1829,7 @@ nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
{
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
- return __nvme_fc_exit_request(set->driver_data, op);
+ return __nvme_fc_exit_request(to_fc_ctrl(set->driver_data), op);
}
static int
@@ -1501,8 +1842,10 @@ __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
if (opstate != FCPOP_STATE_ACTIVE)
atomic_set(&op->state, opstate);
- else if (ctrl->flags & FCCTRL_TERMIO)
+ else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) {
+ op->flags |= FCOP_FLAGS_TERMIO;
ctrl->iocnt++;
+ }
spin_unlock_irqrestore(&ctrl->lock, flags);
if (opstate != FCPOP_STATE_ACTIVE)
@@ -1538,7 +1881,8 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
if (opstate == FCPOP_STATE_ABORTED) {
spin_lock_irqsave(&ctrl->lock, flags);
- if (ctrl->flags & FCCTRL_TERMIO) {
+ if (test_bit(FCCTRL_TERMIO, &ctrl->flags) &&
+ op->flags & FCOP_FLAGS_TERMIO) {
if (!--ctrl->iocnt)
wake_up(&ctrl->ioabort_wait);
}
@@ -1547,6 +1891,33 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
}
static void
+nvme_fc_ctrl_ioerr_work(struct work_struct *work)
+{
+ struct nvme_fc_ctrl *ctrl =
+ container_of(work, struct nvme_fc_ctrl, ioerr_work);
+
+ nvme_fc_error_recovery(ctrl, "transport detected io error");
+}
+
+/*
+ * nvme_fc_io_getuuid - Routine called to get the appid field
+ * associated with request by the lldd
+ * @req:IO request from nvme fc to driver
+ * Returns: UUID if there is an appid associated with VM or
+ * NULL if the user/libvirt has not set the appid to VM
+ */
+char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req)
+{
+ struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
+ struct request *rq = op->rq;
+
+ if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq->bio)
+ return NULL;
+ return blkcg_get_fc_appid(rq->bio);
+}
+EXPORT_SYMBOL_GPL(nvme_fc_io_getuuid);
+
+static void
nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
{
struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
@@ -1604,7 +1975,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
sizeof(op->rsp_iu), DMA_FROM_DEVICE);
if (opstate == FCPOP_STATE_ABORTED)
- status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
+ status = cpu_to_le16(NVME_SC_HOST_ABORTED_CMD << 1);
else if (freq->status) {
status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
dev_info(ctrl->ctrl.device,
@@ -1699,11 +2070,12 @@ done:
}
__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
- nvme_end_request(rq, status, result);
+ if (!nvme_try_complete_req(rq, status, result))
+ nvme_fc_complete_rq(rq);
check_error:
- if (terminate_assoc)
- nvme_fc_error_recovery(ctrl, "transport detected io error");
+ if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING)
+ queue_work(nvme_reset_wq, &ctrl->ioerr_work);
}
static int
@@ -1741,7 +2113,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
dev_err(ctrl->dev,
"FCP Op failed - cmdiu dma mapping failed.\n");
- ret = EFAULT;
+ ret = -EFAULT;
goto out_on_error;
}
@@ -1751,7 +2123,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
dev_err(ctrl->dev,
"FCP Op failed - rspiu dma mapping failed.\n");
- ret = EFAULT;
+ ret = -EFAULT;
}
atomic_set(&op->state, FCPOP_STATE_IDLE);
@@ -1763,7 +2135,7 @@ static int
nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
- struct nvme_fc_ctrl *ctrl = set->driver_data;
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data);
struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
@@ -1772,9 +2144,10 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
if (res)
return res;
- op->op.fcp_req.first_sgl = &op->sgl[0];
+ op->op.fcp_req.first_sgl = op->sgl;
op->op.fcp_req.private = &op->priv[0];
nvme_req(rq)->ctrl = &ctrl->ctrl;
+ nvme_req(rq)->cmd = &op->op.cmd_iu.sqe;
return res;
}
@@ -1784,15 +2157,17 @@ nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
struct nvme_fc_fcp_op *aen_op;
struct nvme_fc_cmd_iu *cmdiu;
struct nvme_command *sqe;
- void *private;
+ void *private = NULL;
int i, ret;
aen_op = ctrl->aen_ops;
for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
- private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
+ if (ctrl->lport->ops->fcprqst_priv_sz) {
+ private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
GFP_KERNEL);
- if (!private)
- return -ENOMEM;
+ if (!private)
+ return -ENOMEM;
+ }
cmdiu = &aen_op->cmd_iu;
sqe = &cmdiu->sqe;
@@ -1821,11 +2196,9 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
struct nvme_fc_fcp_op *aen_op;
int i;
+ cancel_work_sync(&ctrl->ctrl.async_event_work);
aen_op = ctrl->aen_ops;
for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
- if (!aen_op->fcp_req.private)
- continue;
-
__nvme_fc_exit_request(ctrl, aen_op);
kfree(aen_op->fcp_req.private);
@@ -1833,36 +2206,28 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
}
}
-static inline void
-__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
- unsigned int qidx)
+static inline int
+__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int qidx)
{
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(data);
struct nvme_fc_queue *queue = &ctrl->queues[qidx];
hctx->driver_data = queue;
queue->hctx = hctx;
+ return 0;
}
static int
-nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
- unsigned int hctx_idx)
+nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx)
{
- struct nvme_fc_ctrl *ctrl = data;
-
- __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
-
- return 0;
+ return __nvme_fc_init_hctx(hctx, data, hctx_idx + 1);
}
static int
nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_fc_ctrl *ctrl = data;
-
- __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
-
- return 0;
+ return __nvme_fc_init_hctx(hctx, data, hctx_idx);
}
static void
@@ -1977,7 +2342,7 @@ nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
return 0;
delete_queues:
- for (; i >= 0; i--)
+ for (; i > 0; i--)
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
return ret;
}
@@ -1992,7 +2357,7 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
(qsize / 5));
if (ret)
break;
- ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
if (ret)
break;
@@ -2016,34 +2381,28 @@ nvme_fc_ctrl_free(struct kref *ref)
{
struct nvme_fc_ctrl *ctrl =
container_of(ref, struct nvme_fc_ctrl, ref);
- struct nvme_fc_lport *lport = ctrl->lport;
unsigned long flags;
- if (ctrl->ctrl.tagset) {
- blk_cleanup_queue(ctrl->ctrl.connect_q);
- blk_mq_free_tag_set(&ctrl->tag_set);
- }
+ if (ctrl->ctrl.tagset)
+ nvme_remove_io_tag_set(&ctrl->ctrl);
/* remove from rport list */
spin_lock_irqsave(&ctrl->rport->lock, flags);
list_del(&ctrl->ctrl_list);
spin_unlock_irqrestore(&ctrl->rport->lock, flags);
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
- blk_cleanup_queue(ctrl->ctrl.admin_q);
- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
- blk_mq_free_tag_set(&ctrl->admin_tag_set);
+ nvme_start_admin_queue(&ctrl->ctrl);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
kfree(ctrl->queues);
put_device(ctrl->dev);
nvme_fc_rport_put(ctrl->rport);
- ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
+ ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
if (ctrl->ctrl.opts)
nvmf_free_options(ctrl->ctrl.opts);
kfree(ctrl);
- module_put(lport->ops->module);
}
static void
@@ -2072,24 +2431,113 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
nvme_fc_ctrl_put(ctrl);
}
+/*
+ * This routine is used by the transport when it needs to find active
+ * io on a queue that is to be terminated. The transport uses
+ * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
+ * this routine to kill them on a 1 by 1 basis.
+ *
+ * As FC allocates FC exchange for each io, the transport must contact
+ * the LLDD to terminate the exchange, thus releasing the FC exchange.
+ * After terminating the exchange the LLDD will call the transport's
+ * normal io done path for the request, but it will have an aborted
+ * status. The done path will return the io request back to the block
+ * layer with an error status.
+ */
+static bool nvme_fc_terminate_exchange(struct request *req, void *data)
+{
+ struct nvme_ctrl *nctrl = data;
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
+
+ op->nreq.flags |= NVME_REQ_CANCELLED;
+ __nvme_fc_abort_op(ctrl, op);
+ return true;
+}
+
+/*
+ * This routine runs through all outstanding commands on the association
+ * and aborts them. This routine is typically be called by the
+ * delete_association routine. It is also called due to an error during
+ * reconnect. In that scenario, it is most likely a command that initializes
+ * the controller, including fabric Connect commands on io queues, that
+ * may have timed out or failed thus the io must be killed for the connect
+ * thread to see the error.
+ */
static void
-nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
+__nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
{
- int active;
+ int q;
/*
- * if an error (io timeout, etc) while (re)connecting,
- * it's an error on creating the new association.
- * Start the error recovery thread if it hasn't already
- * been started. It is expected there could be multiple
- * ios hitting this path before things are cleaned up.
+ * if aborting io, the queues are no longer good, mark them
+ * all as not live.
+ */
+ if (ctrl->ctrl.queue_count > 1) {
+ for (q = 1; q < ctrl->ctrl.queue_count; q++)
+ clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags);
+ }
+ clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
+
+ /*
+ * If io queues are present, stop them and terminate all outstanding
+ * ios on them. As FC allocates FC exchange for each io, the
+ * transport must contact the LLDD to terminate the exchange,
+ * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
+ * to tell us what io's are busy and invoke a transport routine
+ * to kill them with the LLDD. After terminating the exchange
+ * the LLDD will call the transport's normal io done path, but it
+ * will have an aborted status. The done path will return the
+ * io requests back to the block layer as part of normal completions
+ * (but with error status).
+ */
+ if (ctrl->ctrl.queue_count > 1) {
+ nvme_stop_queues(&ctrl->ctrl);
+ nvme_sync_io_queues(&ctrl->ctrl);
+ blk_mq_tagset_busy_iter(&ctrl->tag_set,
+ nvme_fc_terminate_exchange, &ctrl->ctrl);
+ blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
+ if (start_queues)
+ nvme_start_queues(&ctrl->ctrl);
+ }
+
+ /*
+ * Other transports, which don't have link-level contexts bound
+ * to sqe's, would try to gracefully shutdown the controller by
+ * writing the registers for shutdown and polling (call
+ * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
+ * just aborted and we will wait on those contexts, and given
+ * there was no indication of how live the controlelr is on the
+ * link, don't send more io to create more contexts for the
+ * shutdown. Let the controller fail via keepalive failure if
+ * its still present.
+ */
+
+ /*
+ * clean up the admin queue. Same thing as above.
+ */
+ nvme_stop_admin_queue(&ctrl->ctrl);
+ blk_sync_queue(ctrl->ctrl.admin_q);
+ blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
+ nvme_fc_terminate_exchange, &ctrl->ctrl);
+ blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
+ if (start_queues)
+ nvme_start_admin_queue(&ctrl->ctrl);
+}
+
+static void
+nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
+{
+ /*
+ * if an error (io timeout, etc) while (re)connecting, the remote
+ * port requested terminating of the association (disconnect_ls)
+ * or an error (timeout or abort) occurred on an io while creating
+ * the controller. Abort any ios on the association and let the
+ * create_association error path resolve things.
*/
if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
- active = atomic_xchg(&ctrl->err_work_active, 1);
- if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) {
- atomic_set(&ctrl->err_work_active, 0);
- WARN_ON(1);
- }
+ __nvme_fc_abort_outstanding_ios(ctrl, true);
+ set_bit(ASSOC_FAILED, &ctrl->flags);
return;
}
@@ -2098,7 +2546,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
return;
dev_warn(ctrl->ctrl.device,
- "NVME-FC{%d}: transport association error detected: %s\n",
+ "NVME-FC{%d}: transport association event: %s\n",
ctrl->cnum, errmsg);
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: resetting controller\n", ctrl->cnum);
@@ -2106,20 +2554,24 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
nvme_reset_ctrl(&ctrl->ctrl);
}
-static enum blk_eh_timer_return
-nvme_fc_timeout(struct request *rq, bool reserved)
+static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
{
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_ctrl *ctrl = op->ctrl;
+ struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
+ struct nvme_command *sqe = &cmdiu->sqe;
/*
- * we can't individually ABTS an io without affecting the queue,
- * thus killing the queue, and thus the association.
- * So resolve by performing a controller reset, which will stop
- * the host/io stack, terminate the association on the link,
- * and recreate an association on the link.
+ * Attempt to abort the offending command. Command completion
+ * will detect the aborted io and will fail the connection.
*/
- nvme_fc_error_recovery(ctrl, "io timeout error");
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: "
+ "x%08x/x%08x\n",
+ ctrl->cnum, op->queue->qnum, sqe->common.opcode,
+ sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11);
+ if (__nvme_fc_abort_op(ctrl, op))
+ nvme_fc_error_recovery(ctrl, "io timeout abort failed");
/*
* the io abort has been initiated. Have the reset timer
@@ -2303,10 +2755,11 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
- if (!(op->flags & FCOP_FLAGS_AEN))
+ if (!(op->flags & FCOP_FLAGS_AEN)) {
nvme_fc_unmap_data(ctrl, op->rq, op);
+ nvme_cleanup_cmd(op->rq);
+ }
- nvme_cleanup_cmd(op->rq);
nvme_fc_ctrl_put(ctrl);
if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
@@ -2328,18 +2781,16 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_fc_ctrl *ctrl = queue->ctrl;
struct request *rq = bd->rq;
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
- struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
- struct nvme_command *sqe = &cmdiu->sqe;
enum nvmefc_fcp_datadir io_dir;
bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
u32 data_len;
blk_status_t ret;
if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
- !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
- return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
+ !nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
+ return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
- ret = nvme_setup_cmd(ns, rq, sqe);
+ ret = nvme_setup_cmd(ns, rq);
if (ret)
return ret;
@@ -2369,16 +2820,9 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
struct nvme_fc_fcp_op *aen_op;
- unsigned long flags;
- bool terminating = false;
blk_status_t ret;
- spin_lock_irqsave(&ctrl->lock, flags);
- if (ctrl->flags & FCCTRL_TERMIO)
- terminating = true;
- spin_unlock_irqrestore(&ctrl->lock, flags);
-
- if (terminating)
+ if (test_bit(FCCTRL_TERMIO, &ctrl->flags))
return;
aen_op = &ctrl->aen_ops[0];
@@ -2397,36 +2841,34 @@ nvme_fc_complete_rq(struct request *rq)
struct nvme_fc_ctrl *ctrl = op->ctrl;
atomic_set(&op->state, FCPOP_STATE_IDLE);
+ op->flags &= ~FCOP_FLAGS_TERMIO;
nvme_fc_unmap_data(ctrl, rq, op);
nvme_complete_rq(rq);
nvme_fc_ctrl_put(ctrl);
}
-/*
- * This routine is used by the transport when it needs to find active
- * io on a queue that is to be terminated. The transport uses
- * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
- * this routine to kill them on a 1 by 1 basis.
- *
- * As FC allocates FC exchange for each io, the transport must contact
- * the LLDD to terminate the exchange, thus releasing the FC exchange.
- * After terminating the exchange the LLDD will call the transport's
- * normal io done path for the request, but it will have an aborted
- * status. The done path will return the io request back to the block
- * layer with an error status.
- */
-static bool
-nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
+static void nvme_fc_map_queues(struct blk_mq_tag_set *set)
{
- struct nvme_ctrl *nctrl = data;
- struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
- struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data);
+ int i;
- __nvme_fc_abort_op(ctrl, op);
- return true;
-}
+ for (i = 0; i < set->nr_maps; i++) {
+ struct blk_mq_queue_map *map = &set->map[i];
+ if (!map->nr_queues) {
+ WARN_ON(i == HCTX_TYPE_DEFAULT);
+ continue;
+ }
+
+ /* Call LLDD map queue functionality if defined */
+ if (ctrl->lport->ops->map_queues)
+ ctrl->lport->ops->map_queues(&ctrl->lport->localport,
+ map);
+ else
+ blk_mq_map_queues(map);
+ }
+}
static const struct blk_mq_ops nvme_fc_mq_ops = {
.queue_rq = nvme_fc_queue_rq,
@@ -2435,6 +2877,7 @@ static const struct blk_mq_ops nvme_fc_mq_ops = {
.exit_request = nvme_fc_exit_request,
.init_hctx = nvme_fc_init_hctx,
.timeout = nvme_fc_timeout,
+ .map_queues = nvme_fc_map_queues,
};
static int
@@ -2459,34 +2902,16 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
nvme_fc_init_io_queues(ctrl);
- memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
- ctrl->tag_set.ops = &nvme_fc_mq_ops;
- ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
- ctrl->tag_set.reserved_tags = 1; /* fabric connect */
- ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
- ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- ctrl->tag_set.cmd_size =
- struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
- ctrl->lport->ops->fcprqst_priv_sz);
- ctrl->tag_set.driver_data = ctrl;
- ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
- ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
-
- ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
+ ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
+ &nvme_fc_mq_ops, BLK_MQ_F_SHOULD_MERGE,
+ struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
+ ctrl->lport->ops->fcprqst_priv_sz));
if (ret)
return ret;
- ctrl->ctrl.tagset = &ctrl->tag_set;
-
- ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
- if (IS_ERR(ctrl->ctrl.connect_q)) {
- ret = PTR_ERR(ctrl->ctrl.connect_q);
- goto out_free_tag_set;
- }
-
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
- goto out_cleanup_blk_queue;
+ goto out_cleanup_tagset;
ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
@@ -2498,10 +2923,8 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
out_delete_hw_queues:
nvme_fc_delete_hw_io_queues(ctrl);
-out_cleanup_blk_queue:
- blk_cleanup_queue(ctrl->ctrl.connect_q);
-out_free_tag_set:
- blk_mq_free_tag_set(&ctrl->tag_set);
+out_cleanup_tagset:
+ nvme_remove_io_tag_set(&ctrl->ctrl);
nvme_fc_free_io_queues(ctrl);
/* force put free routine to ignore io queues */
@@ -2539,6 +2962,13 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
if (ctrl->ctrl.queue_count == 1)
return 0;
+ if (prior_ioq_cnt != nr_io_queues) {
+ dev_info(ctrl->ctrl.device,
+ "reconnect: revising io queue count from %d to %d\n",
+ prior_ioq_cnt, nr_io_queues);
+ blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
+ }
+
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
goto out_free_io_queues;
@@ -2547,12 +2977,6 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
if (ret)
goto out_delete_hw_queues;
- if (prior_ioq_cnt != nr_io_queues)
- dev_info(ctrl->ctrl.device,
- "reconnect: revising io queue count from %d to %d\n",
- prior_ioq_cnt, nr_io_queues);
- blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
-
return 0;
out_delete_hw_queues:
@@ -2587,10 +3011,9 @@ nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
struct nvme_fc_rport *rport = ctrl->rport;
u32 cnt;
- if (ctrl->assoc_active)
+ if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags))
return 1;
- ctrl->assoc_active = true;
cnt = atomic_inc_return(&rport->act_ctrl_cnt);
if (cnt == 1)
nvme_fc_rport_active_on_lport(rport);
@@ -2605,7 +3028,7 @@ nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
struct nvme_fc_lport *lport = rport->lport;
u32 cnt;
- /* ctrl->assoc_active=false will be set independently */
+ /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */
cnt = atomic_dec_return(&rport->act_ctrl_cnt);
if (cnt == 0) {
@@ -2625,6 +3048,8 @@ static int
nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
{
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ struct nvmefc_ls_rcv_op *disls = NULL;
+ unsigned long flags;
int ret;
bool changed;
@@ -2642,6 +3067,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
ctrl->cnum, ctrl->lport->localport.port_name,
ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn);
+ clear_bit(ASSOC_FAILED, &ctrl->flags);
+
/*
* Create the admin queue
*/
@@ -2670,16 +3097,17 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
*/
ret = nvme_enable_ctrl(&ctrl->ctrl);
- if (ret)
+ if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
goto out_disconnect_admin_queue;
- ctrl->ctrl.max_hw_sectors =
- (ctrl->lport->ops->max_sgl_segments - 1) << (PAGE_SHIFT - 9);
+ ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments;
+ ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments <<
+ (ilog2(SZ_4K) - 9);
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_start_admin_queue(&ctrl->ctrl);
- ret = nvme_init_identify(&ctrl->ctrl);
- if (ret)
+ ret = nvme_init_ctrl_finish(&ctrl->ctrl);
+ if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
goto out_disconnect_admin_queue;
/* sanity checks */
@@ -2688,10 +3116,17 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (ctrl->ctrl.icdoff) {
dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
ctrl->ctrl.icdoff);
+ ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto out_disconnect_admin_queue;
}
/* FC-NVME supports normal SGL Data Block Descriptors */
+ if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) {
+ dev_err(ctrl->ctrl.device,
+ "Mandatory sgls are not supported!\n");
+ ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out_disconnect_admin_queue;
+ }
if (opts->queue_size > ctrl->ctrl.maxcmd) {
/* warn if maxcmd is lower than queue_size */
@@ -2700,15 +3135,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
"to maxcmd\n",
opts->queue_size, ctrl->ctrl.maxcmd);
opts->queue_size = ctrl->ctrl.maxcmd;
- }
-
- if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
- /* warn if sqsize is lower than queue_size */
- dev_warn(ctrl->ctrl.device,
- "queue_size %zu > ctrl sqsize %u, reducing "
- "to sqsize\n",
- opts->queue_size, ctrl->ctrl.sqsize + 1);
- opts->queue_size = ctrl->ctrl.sqsize + 1;
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
}
ret = nvme_fc_init_aen_ops(ctrl);
@@ -2724,9 +3151,9 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
ret = nvme_fc_create_io_queues(ctrl);
else
ret = nvme_fc_recreate_io_queues(ctrl);
- if (ret)
- goto out_term_aen_ops;
}
+ if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
+ goto out_term_aen_ops;
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@@ -2742,17 +3169,24 @@ out_term_aen_ops:
out_disconnect_admin_queue:
/* send a Disconnect(association) LS to fc-nvme target */
nvme_fc_xmt_disconnect_assoc(ctrl);
+ spin_lock_irqsave(&ctrl->lock, flags);
ctrl->association_id = 0;
+ disls = ctrl->rcv_disconn;
+ ctrl->rcv_disconn = NULL;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+ if (disls)
+ nvme_fc_xmt_ls_rsp(disls);
out_delete_hw_queue:
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
out_free_queue:
nvme_fc_free_queue(&ctrl->queues[0]);
- ctrl->assoc_active = false;
+ clear_bit(ASSOC_ACTIVE, &ctrl->flags);
nvme_fc_ctlr_inactive_on_rport(ctrl);
return ret;
}
+
/*
* This routine stops operation of the controller on the host side.
* On the host os stack side: Admin and IO queues are stopped,
@@ -2762,57 +3196,18 @@ out_free_queue:
static void
nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
{
+ struct nvmefc_ls_rcv_op *disls = NULL;
unsigned long flags;
- if (!ctrl->assoc_active)
+ if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags))
return;
- ctrl->assoc_active = false;
spin_lock_irqsave(&ctrl->lock, flags);
- ctrl->flags |= FCCTRL_TERMIO;
+ set_bit(FCCTRL_TERMIO, &ctrl->flags);
ctrl->iocnt = 0;
spin_unlock_irqrestore(&ctrl->lock, flags);
- /*
- * If io queues are present, stop them and terminate all outstanding
- * ios on them. As FC allocates FC exchange for each io, the
- * transport must contact the LLDD to terminate the exchange,
- * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
- * to tell us what io's are busy and invoke a transport routine
- * to kill them with the LLDD. After terminating the exchange
- * the LLDD will call the transport's normal io done path, but it
- * will have an aborted status. The done path will return the
- * io requests back to the block layer as part of normal completions
- * (but with error status).
- */
- if (ctrl->ctrl.queue_count > 1) {
- nvme_stop_queues(&ctrl->ctrl);
- blk_mq_tagset_busy_iter(&ctrl->tag_set,
- nvme_fc_terminate_exchange, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
- }
-
- /*
- * Other transports, which don't have link-level contexts bound
- * to sqe's, would try to gracefully shutdown the controller by
- * writing the registers for shutdown and polling (call
- * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
- * just aborted and we will wait on those contexts, and given
- * there was no indication of how live the controlelr is on the
- * link, don't send more io to create more contexts for the
- * shutdown. Let the controller fail via keepalive failure if
- * its still present.
- */
-
- /*
- * clean up the admin queue. Same thing as above.
- * use blk_mq_tagset_busy_itr() and the transport routine to
- * terminate the exchanges.
- */
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
- blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
- nvme_fc_terminate_exchange, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
+ __nvme_fc_abort_outstanding_ios(ctrl, false);
/* kill the aens as they are a separate path */
nvme_fc_abort_aen_ops(ctrl);
@@ -2820,7 +3215,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
/* wait for all io that had to be aborted */
spin_lock_irq(&ctrl->lock);
wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
- ctrl->flags &= ~FCCTRL_TERMIO;
+ clear_bit(FCCTRL_TERMIO, &ctrl->flags);
spin_unlock_irq(&ctrl->lock);
nvme_fc_term_aen_ops(ctrl);
@@ -2834,7 +3229,17 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
if (ctrl->association_id)
nvme_fc_xmt_disconnect_assoc(ctrl);
+ spin_lock_irqsave(&ctrl->lock, flags);
ctrl->association_id = 0;
+ disls = ctrl->rcv_disconn;
+ ctrl->rcv_disconn = NULL;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+ if (disls)
+ /*
+ * if a Disconnect Request was waiting for a response, send
+ * now that all ABTS's have been issued (and are complete).
+ */
+ nvme_fc_xmt_ls_rsp(disls);
if (ctrl->ctrl.tagset) {
nvme_fc_delete_hw_io_queues(ctrl);
@@ -2845,7 +3250,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
nvme_fc_free_queue(&ctrl->queues[0]);
/* re-enable the admin_q so anything new can fast fail */
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_start_admin_queue(&ctrl->ctrl);
/* resume the io queues so that things will fast fail */
nvme_start_queues(&ctrl->ctrl);
@@ -2858,7 +3263,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
- cancel_work_sync(&ctrl->err_work);
+ cancel_work_sync(&ctrl->ioerr_work);
cancel_delayed_work_sync(&ctrl->connect_work);
/*
* kill the association on the link side. this will block
@@ -2878,11 +3283,13 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
return;
- if (portptr->port_state == FC_OBJSTATE_ONLINE)
+ if (portptr->port_state == FC_OBJSTATE_ONLINE) {
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
ctrl->cnum, status);
- else if (time_after_eq(jiffies, rport->dev_loss_end))
+ if (status > 0 && (status & NVME_SC_DNR))
+ recon = false;
+ } else if (time_after_eq(jiffies, rport->dev_loss_end))
recon = false;
if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
@@ -2896,89 +3303,57 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
} else {
- if (portptr->port_state == FC_OBJSTATE_ONLINE)
- dev_warn(ctrl->ctrl.device,
- "NVME-FC{%d}: Max reconnect attempts (%d) "
- "reached.\n",
- ctrl->cnum, ctrl->ctrl.nr_reconnects);
- else
+ if (portptr->port_state == FC_OBJSTATE_ONLINE) {
+ if (status > 0 && (status & NVME_SC_DNR))
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: reconnect failure\n",
+ ctrl->cnum);
+ else
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: Max reconnect attempts "
+ "(%d) reached.\n",
+ ctrl->cnum, ctrl->ctrl.nr_reconnects);
+ } else
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: dev_loss_tmo (%d) expired "
"while waiting for remoteport connectivity.\n",
- ctrl->cnum, portptr->dev_loss_tmo);
+ ctrl->cnum, min_t(int, portptr->dev_loss_tmo,
+ (ctrl->ctrl.opts->max_reconnects *
+ ctrl->ctrl.opts->reconnect_delay)));
WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
}
}
static void
-__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
-{
- /*
- * if state is connecting - the error occurred as part of a
- * reconnect attempt. The create_association error paths will
- * clean up any outstanding io.
- *
- * if it's a different state - ensure all pending io is
- * terminated. Given this can delay while waiting for the
- * aborted io to return, we recheck adapter state below
- * before changing state.
- */
- if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
- nvme_stop_keep_alive(&ctrl->ctrl);
-
- /* will block will waiting for io to terminate */
- nvme_fc_delete_association(ctrl);
- }
-
- if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
- !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
- dev_err(ctrl->ctrl.device,
- "NVME-FC{%d}: error_recovery: Couldn't change state "
- "to CONNECTING\n", ctrl->cnum);
-}
-
-static void
nvme_fc_reset_ctrl_work(struct work_struct *work)
{
struct nvme_fc_ctrl *ctrl =
container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
- int ret;
-
- __nvme_fc_terminate_io(ctrl);
nvme_stop_ctrl(&ctrl->ctrl);
- if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
- ret = nvme_fc_create_association(ctrl);
- else
- ret = -ENOTCONN;
-
- if (ret)
- nvme_fc_reconnect_or_delete(ctrl, ret);
- else
- dev_info(ctrl->ctrl.device,
- "NVME-FC{%d}: controller reset complete\n",
- ctrl->cnum);
-}
-
-static void
-nvme_fc_connect_err_work(struct work_struct *work)
-{
- struct nvme_fc_ctrl *ctrl =
- container_of(work, struct nvme_fc_ctrl, err_work);
-
- __nvme_fc_terminate_io(ctrl);
+ /* will block will waiting for io to terminate */
+ nvme_fc_delete_association(ctrl);
- atomic_set(&ctrl->err_work_active, 0);
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
+ dev_err(ctrl->ctrl.device,
+ "NVME-FC{%d}: error_recovery: Couldn't change state "
+ "to CONNECTING\n", ctrl->cnum);
- /*
- * Rescheduling the connection after recovering
- * from the io error is left to the reconnect work
- * item, which is what should have stalled waiting on
- * the io that had the error that scheduled this work.
- */
+ if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
+ if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
+ dev_err(ctrl->ctrl.device,
+ "NVME-FC{%d}: failed to schedule connect "
+ "after reset\n", ctrl->cnum);
+ } else {
+ flush_delayed_work(&ctrl->connect_work);
+ }
+ } else {
+ nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN);
+ }
}
+
static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
.name = "fc",
.module = THIS_MODULE,
@@ -3054,7 +3429,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
{
struct nvme_fc_ctrl *ctrl;
unsigned long flags;
- int ret, idx;
+ int ret, idx, ctrl_loss_tmo;
if (!(rport->remoteport.port_role &
(FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
@@ -3074,15 +3449,23 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
goto out_fail;
}
- if (!try_module_get(lport->ops->module)) {
- ret = -EUNATCH;
+ idx = ida_alloc(&nvme_fc_ctrl_cnt, GFP_KERNEL);
+ if (idx < 0) {
+ ret = -ENOSPC;
goto out_free_ctrl;
}
- idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
- if (idx < 0) {
- ret = -ENOSPC;
- goto out_mod_put;
+ /*
+ * if ctrl_loss_tmo is being enforced and the default reconnect delay
+ * is being used, change to a shorter reconnect delay for FC.
+ */
+ if (opts->max_reconnects != -1 &&
+ opts->reconnect_delay == NVMF_DEF_RECONNECT_DELAY &&
+ opts->reconnect_delay > NVME_FC_DEFAULT_RECONNECT_TMO) {
+ ctrl_loss_tmo = opts->max_reconnects * opts->reconnect_delay;
+ opts->reconnect_delay = NVME_FC_DEFAULT_RECONNECT_TMO;
+ opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
+ opts->reconnect_delay);
}
ctrl->ctrl.opts = opts;
@@ -3097,8 +3480,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->dev = lport->dev;
ctrl->cnum = idx;
ctrl->ioq_live = false;
- ctrl->assoc_active = false;
- atomic_set(&ctrl->err_work_active, 0);
init_waitqueue_head(&ctrl->ioabort_wait);
get_device(ctrl->dev);
@@ -3106,7 +3487,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
- INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work);
+ INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
spin_lock_init(&ctrl->lock);
/* io queue count */
@@ -3127,35 +3508,12 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
nvme_fc_init_queue(ctrl, 0);
- memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
- ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
- ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
- ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
- ctrl->admin_tag_set.cmd_size =
- struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
- ctrl->lport->ops->fcprqst_priv_sz);
- ctrl->admin_tag_set.driver_data = ctrl;
- ctrl->admin_tag_set.nr_hw_queues = 1;
- ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
- ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
-
- ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
+ ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
+ &nvme_fc_admin_mq_ops, BLK_MQ_F_NO_SCHED,
+ struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
+ ctrl->lport->ops->fcprqst_priv_sz));
if (ret)
goto out_free_queues;
- ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
-
- ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.fabrics_q)) {
- ret = PTR_ERR(ctrl->ctrl.fabrics_q);
- goto out_free_admin_tag_set;
- }
-
- ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.admin_q)) {
- ret = PTR_ERR(ctrl->ctrl.admin_q);
- goto out_cleanup_fabrics_q;
- }
/*
* Would have been nice to init io queues tag set as well.
@@ -3166,7 +3524,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
if (ret)
- goto out_cleanup_admin_q;
+ goto out_cleanup_tagset;
/* at this point, teardown path changes to ref counting on nvme ctrl */
@@ -3181,10 +3539,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
goto fail_ctrl;
}
- nvme_get_ctrl(&ctrl->ctrl);
-
if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
- nvme_put_ctrl(&ctrl->ctrl);
dev_err(ctrl->ctrl.device,
"NVME-FC{%d}: failed to schedule initial connect\n",
ctrl->cnum);
@@ -3195,14 +3550,14 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
- ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
+ ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl));
return &ctrl->ctrl;
fail_ctrl:
nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
+ cancel_work_sync(&ctrl->ioerr_work);
cancel_work_sync(&ctrl->ctrl.reset_work);
- cancel_work_sync(&ctrl->err_work);
cancel_delayed_work_sync(&ctrl->connect_work);
ctrl->ctrl.opts = NULL;
@@ -3224,19 +3579,13 @@ fail_ctrl:
return ERR_PTR(-EIO);
-out_cleanup_admin_q:
- blk_cleanup_queue(ctrl->ctrl.admin_q);
-out_cleanup_fabrics_q:
- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
-out_free_admin_tag_set:
- blk_mq_free_tag_set(&ctrl->admin_tag_set);
+out_cleanup_tagset:
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
out_free_queues:
kfree(ctrl->queues);
out_free_ida:
put_device(ctrl->dev);
- ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
-out_mod_put:
- module_put(lport->ops->module);
+ ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
out_free_ctrl:
kfree(ctrl);
out_fail:
@@ -3333,12 +3682,14 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
spin_lock_irqsave(&nvme_fc_lock, flags);
list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
if (lport->localport.node_name != laddr.nn ||
- lport->localport.port_name != laddr.pn)
+ lport->localport.port_name != laddr.pn ||
+ lport->localport.port_state != FC_OBJSTATE_ONLINE)
continue;
list_for_each_entry(rport, &lport->endp_list, endp_list) {
if (rport->remoteport.node_name != raddr.nn ||
- rport->remoteport.port_name != raddr.pn)
+ rport->remoteport.port_name != raddr.pn ||
+ rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
continue;
/* if fail to get reference fall through. Will error */
@@ -3431,14 +3782,77 @@ process_local_list:
return count;
}
+
static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
+#ifdef CONFIG_BLK_CGROUP_FC_APPID
+/* Parse the cgroup id from a buf and return the length of cgrpid */
+static int fc_parse_cgrpid(const char *buf, u64 *id)
+{
+ char cgrp_id[16+1];
+ int cgrpid_len, j;
+
+ memset(cgrp_id, 0x0, sizeof(cgrp_id));
+ for (cgrpid_len = 0, j = 0; cgrpid_len < 17; cgrpid_len++) {
+ if (buf[cgrpid_len] != ':')
+ cgrp_id[cgrpid_len] = buf[cgrpid_len];
+ else {
+ j = 1;
+ break;
+ }
+ }
+ if (!j)
+ return -EINVAL;
+ if (kstrtou64(cgrp_id, 16, id) < 0)
+ return -EINVAL;
+ return cgrpid_len;
+}
+
+/*
+ * Parse and update the appid in the blkcg associated with the cgroupid.
+ */
+static ssize_t fc_appid_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ size_t orig_count = count;
+ u64 cgrp_id;
+ int appid_len = 0;
+ int cgrpid_len = 0;
+ char app_id[FC_APPID_LEN];
+ int ret = 0;
+
+ if (buf[count-1] == '\n')
+ count--;
+
+ if ((count > (16+1+FC_APPID_LEN)) || (!strchr(buf, ':')))
+ return -EINVAL;
+
+ cgrpid_len = fc_parse_cgrpid(buf, &cgrp_id);
+ if (cgrpid_len < 0)
+ return -EINVAL;
+ appid_len = count - cgrpid_len - 1;
+ if (appid_len > FC_APPID_LEN)
+ return -EINVAL;
+
+ memset(app_id, 0x0, sizeof(app_id));
+ memcpy(app_id, &buf[cgrpid_len+1], appid_len);
+ ret = blkcg_set_fc_appid(app_id, cgrp_id, sizeof(app_id));
+ if (ret < 0)
+ return ret;
+ return orig_count;
+}
+static DEVICE_ATTR(appid_store, 0200, NULL, fc_appid_store);
+#endif /* CONFIG_BLK_CGROUP_FC_APPID */
+
static struct attribute *nvme_fc_attrs[] = {
&dev_attr_nvme_discovery.attr,
+#ifdef CONFIG_BLK_CGROUP_FC_APPID
+ &dev_attr_appid_store.attr,
+#endif
NULL
};
-static struct attribute_group nvme_fc_attr_group = {
+static const struct attribute_group nvme_fc_attr_group = {
.attrs = nvme_fc_attrs,
};
diff --git a/drivers/nvme/host/fc.h b/drivers/nvme/host/fc.h
new file mode 100644
index 000000000000..05ce566f2caf
--- /dev/null
+++ b/drivers/nvme/host/fc.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016, Avago Technologies
+ */
+
+#ifndef _NVME_FC_TRANSPORT_H
+#define _NVME_FC_TRANSPORT_H 1
+
+
+/*
+ * Common definitions between the nvme_fc (host) transport and
+ * nvmet_fc (target) transport implementation.
+ */
+
+/*
+ * ****************** FC-NVME LS HANDLING ******************
+ */
+
+union nvmefc_ls_requests {
+ struct fcnvme_ls_rqst_w0 w0;
+ struct fcnvme_ls_cr_assoc_rqst rq_cr_assoc;
+ struct fcnvme_ls_cr_conn_rqst rq_cr_conn;
+ struct fcnvme_ls_disconnect_assoc_rqst rq_dis_assoc;
+ struct fcnvme_ls_disconnect_conn_rqst rq_dis_conn;
+} __aligned(128); /* alignment for other things alloc'd with */
+
+union nvmefc_ls_responses {
+ struct fcnvme_ls_rjt rsp_rjt;
+ struct fcnvme_ls_cr_assoc_acc rsp_cr_assoc;
+ struct fcnvme_ls_cr_conn_acc rsp_cr_conn;
+ struct fcnvme_ls_disconnect_assoc_acc rsp_dis_assoc;
+ struct fcnvme_ls_disconnect_conn_acc rsp_dis_conn;
+} __aligned(128); /* alignment for other things alloc'd with */
+
+static inline void
+nvme_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
+{
+ struct fcnvme_ls_acc_hdr *acc = buf;
+
+ acc->w0.ls_cmd = ls_cmd;
+ acc->desc_list_len = desc_len;
+ acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
+ acc->rqst.desc_len =
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
+ acc->rqst.w0.ls_cmd = rqst_ls_cmd;
+}
+
+static inline int
+nvme_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
+ u8 reason, u8 explanation, u8 vendor)
+{
+ struct fcnvme_ls_rjt *rjt = buf;
+
+ nvme_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
+ ls_cmd);
+ rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
+ rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
+ rjt->rjt.reason_code = reason;
+ rjt->rjt.reason_explanation = explanation;
+ rjt->rjt.vendor = vendor;
+
+ return sizeof(struct fcnvme_ls_rjt);
+}
+
+/* Validation Error indexes into the string table below */
+enum {
+ VERR_NO_ERROR = 0,
+ VERR_CR_ASSOC_LEN = 1,
+ VERR_CR_ASSOC_RQST_LEN = 2,
+ VERR_CR_ASSOC_CMD = 3,
+ VERR_CR_ASSOC_CMD_LEN = 4,
+ VERR_ERSP_RATIO = 5,
+ VERR_ASSOC_ALLOC_FAIL = 6,
+ VERR_QUEUE_ALLOC_FAIL = 7,
+ VERR_CR_CONN_LEN = 8,
+ VERR_CR_CONN_RQST_LEN = 9,
+ VERR_ASSOC_ID = 10,
+ VERR_ASSOC_ID_LEN = 11,
+ VERR_NO_ASSOC = 12,
+ VERR_CONN_ID = 13,
+ VERR_CONN_ID_LEN = 14,
+ VERR_INVAL_CONN = 15,
+ VERR_CR_CONN_CMD = 16,
+ VERR_CR_CONN_CMD_LEN = 17,
+ VERR_DISCONN_LEN = 18,
+ VERR_DISCONN_RQST_LEN = 19,
+ VERR_DISCONN_CMD = 20,
+ VERR_DISCONN_CMD_LEN = 21,
+ VERR_DISCONN_SCOPE = 22,
+ VERR_RS_LEN = 23,
+ VERR_RS_RQST_LEN = 24,
+ VERR_RS_CMD = 25,
+ VERR_RS_CMD_LEN = 26,
+ VERR_RS_RCTL = 27,
+ VERR_RS_RO = 28,
+ VERR_LSACC = 29,
+ VERR_LSDESC_RQST = 30,
+ VERR_LSDESC_RQST_LEN = 31,
+ VERR_CR_ASSOC = 32,
+ VERR_CR_ASSOC_ACC_LEN = 33,
+ VERR_CR_CONN = 34,
+ VERR_CR_CONN_ACC_LEN = 35,
+ VERR_DISCONN = 36,
+ VERR_DISCONN_ACC_LEN = 37,
+};
+
+static char *validation_errors[] = {
+ "OK",
+ "Bad CR_ASSOC Length",
+ "Bad CR_ASSOC Rqst Length",
+ "Not CR_ASSOC Cmd",
+ "Bad CR_ASSOC Cmd Length",
+ "Bad Ersp Ratio",
+ "Association Allocation Failed",
+ "Queue Allocation Failed",
+ "Bad CR_CONN Length",
+ "Bad CR_CONN Rqst Length",
+ "Not Association ID",
+ "Bad Association ID Length",
+ "No Association",
+ "Not Connection ID",
+ "Bad Connection ID Length",
+ "Invalid Connection ID",
+ "Not CR_CONN Cmd",
+ "Bad CR_CONN Cmd Length",
+ "Bad DISCONN Length",
+ "Bad DISCONN Rqst Length",
+ "Not DISCONN Cmd",
+ "Bad DISCONN Cmd Length",
+ "Bad Disconnect Scope",
+ "Bad RS Length",
+ "Bad RS Rqst Length",
+ "Not RS Cmd",
+ "Bad RS Cmd Length",
+ "Bad RS R_CTL",
+ "Bad RS Relative Offset",
+ "Not LS_ACC",
+ "Not LSDESC_RQST",
+ "Bad LSDESC_RQST Length",
+ "Not CR_ASSOC Rqst",
+ "Bad CR_ASSOC ACC Length",
+ "Not CR_CONN Rqst",
+ "Bad CR_CONN ACC Length",
+ "Not Disconnect Rqst",
+ "Bad Disconnect ACC Length",
+};
+
+#define NVME_FC_LAST_LS_CMD_VALUE FCNVME_LS_DISCONNECT_CONN
+
+static char *nvmefc_ls_names[] = {
+ "Reserved (0)",
+ "RJT (1)",
+ "ACC (2)",
+ "Create Association",
+ "Create Connection",
+ "Disconnect Association",
+ "Disconnect Connection",
+};
+
+static inline void
+nvmefc_fmt_lsreq_discon_assoc(struct nvmefc_ls_req *lsreq,
+ struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst,
+ struct fcnvme_ls_disconnect_assoc_acc *discon_acc,
+ u64 association_id)
+{
+ lsreq->rqstaddr = discon_rqst;
+ lsreq->rqstlen = sizeof(*discon_rqst);
+ lsreq->rspaddr = discon_acc;
+ lsreq->rsplen = sizeof(*discon_acc);
+ lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
+
+ discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT_ASSOC;
+ discon_rqst->desc_list_len = cpu_to_be32(
+ sizeof(struct fcnvme_lsdesc_assoc_id) +
+ sizeof(struct fcnvme_lsdesc_disconn_cmd));
+
+ discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
+ discon_rqst->associd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id));
+
+ discon_rqst->associd.association_id = cpu_to_be64(association_id);
+
+ discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
+ FCNVME_LSDESC_DISCONN_CMD);
+ discon_rqst->discon_cmd.desc_len =
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_disconn_cmd));
+}
+
+static inline int
+nvmefc_vldt_lsreq_discon_assoc(u32 rqstlen,
+ struct fcnvme_ls_disconnect_assoc_rqst *rqst)
+{
+ int ret = 0;
+
+ if (rqstlen < sizeof(struct fcnvme_ls_disconnect_assoc_rqst))
+ ret = VERR_DISCONN_LEN;
+ else if (rqst->desc_list_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_ls_disconnect_assoc_rqst)))
+ ret = VERR_DISCONN_RQST_LEN;
+ else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
+ ret = VERR_ASSOC_ID;
+ else if (rqst->associd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_assoc_id)))
+ ret = VERR_ASSOC_ID_LEN;
+ else if (rqst->discon_cmd.desc_tag !=
+ cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
+ ret = VERR_DISCONN_CMD;
+ else if (rqst->discon_cmd.desc_len !=
+ fcnvme_lsdesc_len(
+ sizeof(struct fcnvme_lsdesc_disconn_cmd)))
+ ret = VERR_DISCONN_CMD_LEN;
+ /*
+ * As the standard changed on the LS, check if old format and scope
+ * something other than Association (e.g. 0).
+ */
+ else if (rqst->discon_cmd.rsvd8[0])
+ ret = VERR_DISCONN_SCOPE;
+
+ return ret;
+}
+
+#endif /* _NVME_FC_TRANSPORT_H */
diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c
index 2e6477ed420f..9e6e56c20ec9 100644
--- a/drivers/nvme/host/hwmon.c
+++ b/drivers/nvme/host/hwmon.c
@@ -12,7 +12,7 @@
struct nvme_hwmon_data {
struct nvme_ctrl *ctrl;
- struct nvme_smart_log log;
+ struct nvme_smart_log *log;
struct mutex read_lock;
};
@@ -59,19 +59,15 @@ static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data)
{
- int ret;
-
- ret = nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
- &data->log, sizeof(data->log), 0);
-
- return ret <= 0 ? ret : -EIO;
+ return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
+ NVME_CSI_NVM, data->log, sizeof(*data->log), 0);
}
static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
struct nvme_hwmon_data *data = dev_get_drvdata(dev);
- struct nvme_smart_log *log = &data->log;
+ struct nvme_smart_log *log = data->log;
int temp;
int err;
@@ -167,7 +163,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data,
case hwmon_temp_max:
case hwmon_temp_min:
if ((!channel && data->ctrl->wctemp) ||
- (channel && data->log.temp_sensor[channel - 1])) {
+ (channel && data->log->temp_sensor[channel - 1])) {
if (data->ctrl->quirks &
NVME_QUIRK_NO_TEMP_THRESH_CHANGE)
return 0444;
@@ -180,7 +176,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data,
break;
case hwmon_temp_input:
case hwmon_temp_label:
- if (!channel || data->log.temp_sensor[channel - 1])
+ if (!channel || data->log->temp_sensor[channel - 1])
return 0444;
break;
default:
@@ -225,16 +221,22 @@ static const struct hwmon_chip_info nvme_hwmon_chip_info = {
.info = nvme_hwmon_info,
};
-void nvme_hwmon_init(struct nvme_ctrl *ctrl)
+int nvme_hwmon_init(struct nvme_ctrl *ctrl)
{
- struct device *dev = ctrl->dev;
+ struct device *dev = ctrl->device;
struct nvme_hwmon_data *data;
struct device *hwmon;
int err;
- data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
- return;
+ return -ENOMEM;
+
+ data->log = kzalloc(sizeof(*data->log), GFP_KERNEL);
+ if (!data->log) {
+ err = -ENOMEM;
+ goto err_free_data;
+ }
data->ctrl = ctrl;
mutex_init(&data->read_lock);
@@ -242,15 +244,36 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl)
err = nvme_hwmon_get_smart_log(data);
if (err) {
dev_warn(dev, "Failed to read smart log (error %d)\n", err);
- devm_kfree(dev, data);
- return;
+ goto err_free_log;
}
- hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data,
- &nvme_hwmon_chip_info,
- NULL);
+ hwmon = hwmon_device_register_with_info(dev, "nvme",
+ data, &nvme_hwmon_chip_info,
+ NULL);
if (IS_ERR(hwmon)) {
dev_warn(dev, "Failed to instantiate hwmon device\n");
- devm_kfree(dev, data);
+ err = PTR_ERR(hwmon);
+ goto err_free_log;
+ }
+ ctrl->hwmon_device = hwmon;
+ return 0;
+
+err_free_log:
+ kfree(data->log);
+err_free_data:
+ kfree(data);
+ return err;
+}
+
+void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->hwmon_device) {
+ struct nvme_hwmon_data *data =
+ dev_get_drvdata(ctrl->hwmon_device);
+
+ hwmon_device_unregister(ctrl->hwmon_device);
+ ctrl->hwmon_device = NULL;
+ kfree(data->log);
+ kfree(data);
}
}
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
new file mode 100644
index 000000000000..81f5550b670d
--- /dev/null
+++ b/drivers/nvme/host/ioctl.c
@@ -0,0 +1,915 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011-2014, Intel Corporation.
+ * Copyright (c) 2017-2021 Christoph Hellwig.
+ */
+#include <linux/ptrace.h> /* for force_successful_syscall_return */
+#include <linux/nvme_ioctl.h>
+#include <linux/io_uring.h>
+#include "nvme.h"
+
+/*
+ * Convert integer values from ioctl structures to user pointers, silently
+ * ignoring the upper bits in the compat case to match behaviour of 32-bit
+ * kernels.
+ */
+static void __user *nvme_to_user_ptr(uintptr_t ptrval)
+{
+ if (in_compat_syscall())
+ ptrval = (compat_uptr_t)ptrval;
+ return (void __user *)ptrval;
+}
+
+static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
+ unsigned len, u32 seed)
+{
+ struct bio_integrity_payload *bip;
+ int ret = -ENOMEM;
+ void *buf;
+ struct bio *bio = req->bio;
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ goto out;
+
+ ret = -EFAULT;
+ if ((req_op(req) == REQ_OP_DRV_OUT) && copy_from_user(buf, ubuf, len))
+ goto out_free_meta;
+
+ bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
+ if (IS_ERR(bip)) {
+ ret = PTR_ERR(bip);
+ goto out_free_meta;
+ }
+
+ bip->bip_iter.bi_size = len;
+ bip->bip_iter.bi_sector = seed;
+ ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
+ offset_in_page(buf));
+ if (ret != len) {
+ ret = -ENOMEM;
+ goto out_free_meta;
+ }
+
+ req->cmd_flags |= REQ_INTEGRITY;
+ return buf;
+out_free_meta:
+ kfree(buf);
+out:
+ return ERR_PTR(ret);
+}
+
+static int nvme_finish_user_metadata(struct request *req, void __user *ubuf,
+ void *meta, unsigned len, int ret)
+{
+ if (!ret && req_op(req) == REQ_OP_DRV_IN &&
+ copy_to_user(ubuf, meta, len))
+ ret = -EFAULT;
+ kfree(meta);
+ return ret;
+}
+
+static struct request *nvme_alloc_user_request(struct request_queue *q,
+ struct nvme_command *cmd, blk_opf_t rq_flags,
+ blk_mq_req_flags_t blk_flags)
+{
+ struct request *req;
+
+ req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
+ if (IS_ERR(req))
+ return req;
+ nvme_init_request(req, cmd);
+ nvme_req(req)->flags |= NVME_REQ_USERCMD;
+ return req;
+}
+
+static int nvme_map_user_request(struct request *req, u64 ubuffer,
+ unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
+ u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd,
+ bool vec)
+{
+ struct request_queue *q = req->q;
+ struct nvme_ns *ns = q->queuedata;
+ struct block_device *bdev = ns ? ns->disk->part0 : NULL;
+ struct bio *bio = NULL;
+ void *meta = NULL;
+ int ret;
+
+ if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
+ struct iov_iter iter;
+
+ /* fixedbufs is only for non-vectored io */
+ if (WARN_ON_ONCE(vec))
+ return -EINVAL;
+ ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
+ rq_data_dir(req), &iter, ioucmd);
+ if (ret < 0)
+ goto out;
+ ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
+ } else {
+ ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
+ bufflen, GFP_KERNEL, vec, 0, 0,
+ rq_data_dir(req));
+ }
+
+ if (ret)
+ goto out;
+ bio = req->bio;
+ if (bdev)
+ bio_set_dev(bio, bdev);
+
+ if (bdev && meta_buffer && meta_len) {
+ meta = nvme_add_user_metadata(req, meta_buffer, meta_len,
+ meta_seed);
+ if (IS_ERR(meta)) {
+ ret = PTR_ERR(meta);
+ goto out_unmap;
+ }
+ *metap = meta;
+ }
+
+ return ret;
+
+out_unmap:
+ if (bio)
+ blk_rq_unmap_user(bio);
+out:
+ blk_mq_free_request(req);
+ return ret;
+}
+
+static int nvme_submit_user_cmd(struct request_queue *q,
+ struct nvme_command *cmd, u64 ubuffer,
+ unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
+ u32 meta_seed, u64 *result, unsigned timeout, bool vec)
+{
+ struct nvme_ctrl *ctrl;
+ struct request *req;
+ void *meta = NULL;
+ struct bio *bio;
+ u32 effects;
+ int ret;
+
+ req = nvme_alloc_user_request(q, cmd, 0, 0);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ req->timeout = timeout;
+ if (ubuffer && bufflen) {
+ ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
+ meta_len, meta_seed, &meta, NULL, vec);
+ if (ret)
+ return ret;
+ }
+
+ bio = req->bio;
+ ctrl = nvme_req(req)->ctrl;
+
+ ret = nvme_execute_passthru_rq(req, &effects);
+
+ if (result)
+ *result = le64_to_cpu(nvme_req(req)->result.u64);
+ if (meta)
+ ret = nvme_finish_user_metadata(req, meta_buffer, meta,
+ meta_len, ret);
+ if (bio)
+ blk_rq_unmap_user(bio);
+ blk_mq_free_request(req);
+
+ if (effects)
+ nvme_passthru_end(ctrl, effects, cmd, ret);
+
+ return ret;
+}
+
+static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
+{
+ struct nvme_user_io io;
+ struct nvme_command c;
+ unsigned length, meta_len;
+ void __user *metadata;
+
+ if (copy_from_user(&io, uio, sizeof(io)))
+ return -EFAULT;
+ if (io.flags)
+ return -EINVAL;
+
+ switch (io.opcode) {
+ case nvme_cmd_write:
+ case nvme_cmd_read:
+ case nvme_cmd_compare:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ length = (io.nblocks + 1) << ns->lba_shift;
+
+ if ((io.control & NVME_RW_PRINFO_PRACT) &&
+ ns->ms == sizeof(struct t10_pi_tuple)) {
+ /*
+ * Protection information is stripped/inserted by the
+ * controller.
+ */
+ if (nvme_to_user_ptr(io.metadata))
+ return -EINVAL;
+ meta_len = 0;
+ metadata = NULL;
+ } else {
+ meta_len = (io.nblocks + 1) * ns->ms;
+ metadata = nvme_to_user_ptr(io.metadata);
+ }
+
+ if (ns->features & NVME_NS_EXT_LBAS) {
+ length += meta_len;
+ meta_len = 0;
+ } else if (meta_len) {
+ if ((io.metadata & 3) || !io.metadata)
+ return -EINVAL;
+ }
+
+ memset(&c, 0, sizeof(c));
+ c.rw.opcode = io.opcode;
+ c.rw.flags = io.flags;
+ c.rw.nsid = cpu_to_le32(ns->head->ns_id);
+ c.rw.slba = cpu_to_le64(io.slba);
+ c.rw.length = cpu_to_le16(io.nblocks);
+ c.rw.control = cpu_to_le16(io.control);
+ c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
+ c.rw.reftag = cpu_to_le32(io.reftag);
+ c.rw.apptag = cpu_to_le16(io.apptag);
+ c.rw.appmask = cpu_to_le16(io.appmask);
+
+ return nvme_submit_user_cmd(ns->queue, &c,
+ io.addr, length,
+ metadata, meta_len, lower_32_bits(io.slba), NULL, 0,
+ false);
+}
+
+static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
+ struct nvme_ns *ns, __u32 nsid)
+{
+ if (ns && nsid != ns->head->ns_id) {
+ dev_err(ctrl->device,
+ "%s: nsid (%u) in cmd does not match nsid (%u)"
+ "of namespace\n",
+ current->comm, nsid, ns->head->ns_id);
+ return false;
+ }
+
+ return true;
+}
+
+static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ struct nvme_passthru_cmd __user *ucmd)
+{
+ struct nvme_passthru_cmd cmd;
+ struct nvme_command c;
+ unsigned timeout = 0;
+ u64 result;
+ int status;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
+ return -EFAULT;
+ if (cmd.flags)
+ return -EINVAL;
+ if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
+ return -EINVAL;
+
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = cmd.opcode;
+ c.common.flags = cmd.flags;
+ c.common.nsid = cpu_to_le32(cmd.nsid);
+ c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
+ c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
+ c.common.cdw10 = cpu_to_le32(cmd.cdw10);
+ c.common.cdw11 = cpu_to_le32(cmd.cdw11);
+ c.common.cdw12 = cpu_to_le32(cmd.cdw12);
+ c.common.cdw13 = cpu_to_le32(cmd.cdw13);
+ c.common.cdw14 = cpu_to_le32(cmd.cdw14);
+ c.common.cdw15 = cpu_to_le32(cmd.cdw15);
+
+ if (cmd.timeout_ms)
+ timeout = msecs_to_jiffies(cmd.timeout_ms);
+
+ status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
+ cmd.addr, cmd.data_len,
+ nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
+ 0, &result, timeout, false);
+
+ if (status >= 0) {
+ if (put_user(result, &ucmd->result))
+ return -EFAULT;
+ }
+
+ return status;
+}
+
+static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ struct nvme_passthru_cmd64 __user *ucmd, bool vec)
+{
+ struct nvme_passthru_cmd64 cmd;
+ struct nvme_command c;
+ unsigned timeout = 0;
+ int status;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
+ return -EFAULT;
+ if (cmd.flags)
+ return -EINVAL;
+ if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
+ return -EINVAL;
+
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = cmd.opcode;
+ c.common.flags = cmd.flags;
+ c.common.nsid = cpu_to_le32(cmd.nsid);
+ c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
+ c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
+ c.common.cdw10 = cpu_to_le32(cmd.cdw10);
+ c.common.cdw11 = cpu_to_le32(cmd.cdw11);
+ c.common.cdw12 = cpu_to_le32(cmd.cdw12);
+ c.common.cdw13 = cpu_to_le32(cmd.cdw13);
+ c.common.cdw14 = cpu_to_le32(cmd.cdw14);
+ c.common.cdw15 = cpu_to_le32(cmd.cdw15);
+
+ if (cmd.timeout_ms)
+ timeout = msecs_to_jiffies(cmd.timeout_ms);
+
+ status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
+ cmd.addr, cmd.data_len,
+ nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
+ 0, &cmd.result, timeout, vec);
+
+ if (status >= 0) {
+ if (put_user(cmd.result, &ucmd->result))
+ return -EFAULT;
+ }
+
+ return status;
+}
+
+struct nvme_uring_data {
+ __u64 metadata;
+ __u64 addr;
+ __u32 data_len;
+ __u32 metadata_len;
+ __u32 timeout_ms;
+};
+
+/*
+ * This overlays struct io_uring_cmd pdu.
+ * Expect build errors if this grows larger than that.
+ */
+struct nvme_uring_cmd_pdu {
+ union {
+ struct bio *bio;
+ struct request *req;
+ };
+ u32 meta_len;
+ u32 nvme_status;
+ union {
+ struct {
+ void *meta; /* kernel-resident buffer */
+ void __user *meta_buffer;
+ };
+ u64 result;
+ } u;
+};
+
+static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
+ struct io_uring_cmd *ioucmd)
+{
+ return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
+}
+
+static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd)
+{
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+ struct request *req = pdu->req;
+ int status;
+ u64 result;
+
+ if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
+ status = -EINTR;
+ else
+ status = nvme_req(req)->status;
+
+ result = le64_to_cpu(nvme_req(req)->result.u64);
+
+ if (pdu->meta_len)
+ status = nvme_finish_user_metadata(req, pdu->u.meta_buffer,
+ pdu->u.meta, pdu->meta_len, status);
+ if (req->bio)
+ blk_rq_unmap_user(req->bio);
+ blk_mq_free_request(req);
+
+ io_uring_cmd_done(ioucmd, status, result);
+}
+
+static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
+{
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+
+ if (pdu->bio)
+ blk_rq_unmap_user(pdu->bio);
+
+ io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result);
+}
+
+static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
+ blk_status_t err)
+{
+ struct io_uring_cmd *ioucmd = req->end_io_data;
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+ void *cookie = READ_ONCE(ioucmd->cookie);
+
+ req->bio = pdu->bio;
+ if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
+ pdu->nvme_status = -EINTR;
+ else
+ pdu->nvme_status = nvme_req(req)->status;
+ pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
+
+ /*
+ * For iopoll, complete it directly.
+ * Otherwise, move the completion to task work.
+ */
+ if (cookie != NULL && blk_rq_is_poll(req))
+ nvme_uring_task_cb(ioucmd);
+ else
+ io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
+
+ return RQ_END_IO_FREE;
+}
+
+static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
+ blk_status_t err)
+{
+ struct io_uring_cmd *ioucmd = req->end_io_data;
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+ void *cookie = READ_ONCE(ioucmd->cookie);
+
+ req->bio = pdu->bio;
+ pdu->req = req;
+
+ /*
+ * For iopoll, complete it directly.
+ * Otherwise, move the completion to task work.
+ */
+ if (cookie != NULL && blk_rq_is_poll(req))
+ nvme_uring_task_meta_cb(ioucmd);
+ else
+ io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_meta_cb);
+
+ return RQ_END_IO_NONE;
+}
+
+static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec)
+{
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+ const struct nvme_uring_cmd *cmd = ioucmd->cmd;
+ struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
+ struct nvme_uring_data d;
+ struct nvme_command c;
+ struct request *req;
+ blk_opf_t rq_flags = 0;
+ blk_mq_req_flags_t blk_flags = 0;
+ void *meta = NULL;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ c.common.opcode = READ_ONCE(cmd->opcode);
+ c.common.flags = READ_ONCE(cmd->flags);
+ if (c.common.flags)
+ return -EINVAL;
+
+ c.common.command_id = 0;
+ c.common.nsid = cpu_to_le32(cmd->nsid);
+ if (!nvme_validate_passthru_nsid(ctrl, ns, le32_to_cpu(c.common.nsid)))
+ return -EINVAL;
+
+ c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2));
+ c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3));
+ c.common.metadata = 0;
+ c.common.dptr.prp1 = c.common.dptr.prp2 = 0;
+ c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10));
+ c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11));
+ c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12));
+ c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13));
+ c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
+ c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
+
+ d.metadata = READ_ONCE(cmd->metadata);
+ d.addr = READ_ONCE(cmd->addr);
+ d.data_len = READ_ONCE(cmd->data_len);
+ d.metadata_len = READ_ONCE(cmd->metadata_len);
+ d.timeout_ms = READ_ONCE(cmd->timeout_ms);
+
+ if (issue_flags & IO_URING_F_NONBLOCK) {
+ rq_flags = REQ_NOWAIT;
+ blk_flags = BLK_MQ_REQ_NOWAIT;
+ }
+ if (issue_flags & IO_URING_F_IOPOLL)
+ rq_flags |= REQ_POLLED;
+
+retry:
+ req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
+
+ if (d.addr && d.data_len) {
+ ret = nvme_map_user_request(req, d.addr,
+ d.data_len, nvme_to_user_ptr(d.metadata),
+ d.metadata_len, 0, &meta, ioucmd, vec);
+ if (ret)
+ return ret;
+ }
+
+ if (issue_flags & IO_URING_F_IOPOLL && rq_flags & REQ_POLLED) {
+ if (unlikely(!req->bio)) {
+ /* we can't poll this, so alloc regular req instead */
+ blk_mq_free_request(req);
+ rq_flags &= ~REQ_POLLED;
+ goto retry;
+ } else {
+ WRITE_ONCE(ioucmd->cookie, req->bio);
+ req->bio->bi_opf |= REQ_POLLED;
+ }
+ }
+ /* to free bio on completion, as req->bio will be null at that time */
+ pdu->bio = req->bio;
+ pdu->meta_len = d.metadata_len;
+ req->end_io_data = ioucmd;
+ if (pdu->meta_len) {
+ pdu->u.meta = meta;
+ pdu->u.meta_buffer = nvme_to_user_ptr(d.metadata);
+ req->end_io = nvme_uring_cmd_end_io_meta;
+ } else {
+ req->end_io = nvme_uring_cmd_end_io;
+ }
+ blk_execute_rq_nowait(req, false);
+ return -EIOCBQUEUED;
+}
+
+static bool is_ctrl_ioctl(unsigned int cmd)
+{
+ if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
+ return true;
+ if (is_sed_ioctl(cmd))
+ return true;
+ return false;
+}
+
+static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
+ void __user *argp)
+{
+ switch (cmd) {
+ case NVME_IOCTL_ADMIN_CMD:
+ return nvme_user_cmd(ctrl, NULL, argp);
+ case NVME_IOCTL_ADMIN64_CMD:
+ return nvme_user_cmd64(ctrl, NULL, argp, false);
+ default:
+ return sed_ioctl(ctrl->opal_dev, cmd, argp);
+ }
+}
+
+#ifdef COMPAT_FOR_U64_ALIGNMENT
+struct nvme_user_io32 {
+ __u8 opcode;
+ __u8 flags;
+ __u16 control;
+ __u16 nblocks;
+ __u16 rsvd;
+ __u64 metadata;
+ __u64 addr;
+ __u64 slba;
+ __u32 dsmgmt;
+ __u32 reftag;
+ __u16 apptag;
+ __u16 appmask;
+} __attribute__((__packed__));
+#define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32)
+#endif /* COMPAT_FOR_U64_ALIGNMENT */
+
+static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
+ void __user *argp)
+{
+ switch (cmd) {
+ case NVME_IOCTL_ID:
+ force_successful_syscall_return();
+ return ns->head->ns_id;
+ case NVME_IOCTL_IO_CMD:
+ return nvme_user_cmd(ns->ctrl, ns, argp);
+ /*
+ * struct nvme_user_io can have different padding on some 32-bit ABIs.
+ * Just accept the compat version as all fields that are used are the
+ * same size and at the same offset.
+ */
+#ifdef COMPAT_FOR_U64_ALIGNMENT
+ case NVME_IOCTL_SUBMIT_IO32:
+#endif
+ case NVME_IOCTL_SUBMIT_IO:
+ return nvme_submit_io(ns, argp);
+ case NVME_IOCTL_IO64_CMD:
+ return nvme_user_cmd64(ns->ctrl, ns, argp, false);
+ case NVME_IOCTL_IO64_CMD_VEC:
+ return nvme_user_cmd64(ns->ctrl, ns, argp, true);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static int __nvme_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *arg)
+{
+ if (is_ctrl_ioctl(cmd))
+ return nvme_ctrl_ioctl(ns->ctrl, cmd, arg);
+ return nvme_ns_ioctl(ns, cmd, arg);
+}
+
+int nvme_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct nvme_ns *ns = bdev->bd_disk->private_data;
+
+ return __nvme_ioctl(ns, cmd, (void __user *)arg);
+}
+
+long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct nvme_ns *ns =
+ container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
+
+ return __nvme_ioctl(ns, cmd, (void __user *)arg);
+}
+
+static int nvme_uring_cmd_checks(unsigned int issue_flags)
+{
+
+ /* NVMe passthrough requires big SQE/CQE support */
+ if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
+ (IO_URING_F_SQE128|IO_URING_F_CQE32))
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags)
+{
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ int ret;
+
+ BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu));
+
+ ret = nvme_uring_cmd_checks(issue_flags);
+ if (ret)
+ return ret;
+
+ switch (ioucmd->cmd_op) {
+ case NVME_URING_CMD_IO:
+ ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, false);
+ break;
+ case NVME_URING_CMD_IO_VEC:
+ ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, true);
+ break;
+ default:
+ ret = -ENOTTY;
+ }
+
+ return ret;
+}
+
+int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
+{
+ struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev,
+ struct nvme_ns, cdev);
+
+ return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
+}
+
+int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
+ struct io_comp_batch *iob,
+ unsigned int poll_flags)
+{
+ struct bio *bio;
+ int ret = 0;
+ struct nvme_ns *ns;
+ struct request_queue *q;
+
+ rcu_read_lock();
+ bio = READ_ONCE(ioucmd->cookie);
+ ns = container_of(file_inode(ioucmd->file)->i_cdev,
+ struct nvme_ns, cdev);
+ q = ns->queue;
+ if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio && bio->bi_bdev)
+ ret = bio_poll(bio, iob, poll_flags);
+ rcu_read_unlock();
+ return ret;
+}
+#ifdef CONFIG_NVME_MULTIPATH
+static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
+ void __user *argp, struct nvme_ns_head *head, int srcu_idx)
+ __releases(&head->srcu)
+{
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ int ret;
+
+ nvme_get_ctrl(ns->ctrl);
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp);
+
+ nvme_put_ctrl(ctrl);
+ return ret;
+}
+
+int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct nvme_ns_head *head = bdev->bd_disk->private_data;
+ void __user *argp = (void __user *)arg;
+ struct nvme_ns *ns;
+ int srcu_idx, ret = -EWOULDBLOCK;
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ ns = nvme_find_path(head);
+ if (!ns)
+ goto out_unlock;
+
+ /*
+ * Handle ioctls that apply to the controller instead of the namespace
+ * seperately and drop the ns SRCU reference early. This avoids a
+ * deadlock when deleting namespaces using the passthrough interface.
+ */
+ if (is_ctrl_ioctl(cmd))
+ return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
+
+ ret = nvme_ns_ioctl(ns, cmd, argp);
+out_unlock:
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
+
+long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct cdev *cdev = file_inode(file)->i_cdev;
+ struct nvme_ns_head *head =
+ container_of(cdev, struct nvme_ns_head, cdev);
+ void __user *argp = (void __user *)arg;
+ struct nvme_ns *ns;
+ int srcu_idx, ret = -EWOULDBLOCK;
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ ns = nvme_find_path(head);
+ if (!ns)
+ goto out_unlock;
+
+ if (is_ctrl_ioctl(cmd))
+ return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
+
+ ret = nvme_ns_ioctl(ns, cmd, argp);
+out_unlock:
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
+
+int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags)
+{
+ struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
+ struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
+ int srcu_idx = srcu_read_lock(&head->srcu);
+ struct nvme_ns *ns = nvme_find_path(head);
+ int ret = -EINVAL;
+
+ if (ns)
+ ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
+
+int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
+ struct io_comp_batch *iob,
+ unsigned int poll_flags)
+{
+ struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
+ struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
+ int srcu_idx = srcu_read_lock(&head->srcu);
+ struct nvme_ns *ns = nvme_find_path(head);
+ struct bio *bio;
+ int ret = 0;
+ struct request_queue *q;
+
+ if (ns) {
+ rcu_read_lock();
+ bio = READ_ONCE(ioucmd->cookie);
+ q = ns->queue;
+ if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio
+ && bio->bi_bdev)
+ ret = bio_poll(bio, iob, poll_flags);
+ rcu_read_unlock();
+ }
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
+#endif /* CONFIG_NVME_MULTIPATH */
+
+int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
+{
+ struct nvme_ctrl *ctrl = ioucmd->file->private_data;
+ int ret;
+
+ /* IOPOLL not supported yet */
+ if (issue_flags & IO_URING_F_IOPOLL)
+ return -EOPNOTSUPP;
+
+ ret = nvme_uring_cmd_checks(issue_flags);
+ if (ret)
+ return ret;
+
+ switch (ioucmd->cmd_op) {
+ case NVME_URING_CMD_ADMIN:
+ ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, false);
+ break;
+ case NVME_URING_CMD_ADMIN_VEC:
+ ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, true);
+ break;
+ default:
+ ret = -ENOTTY;
+ }
+
+ return ret;
+}
+
+static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
+{
+ struct nvme_ns *ns;
+ int ret;
+
+ down_read(&ctrl->namespaces_rwsem);
+ if (list_empty(&ctrl->namespaces)) {
+ ret = -ENOTTY;
+ goto out_unlock;
+ }
+
+ ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
+ if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
+ dev_warn(ctrl->device,
+ "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ dev_warn(ctrl->device,
+ "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
+ kref_get(&ns->kref);
+ up_read(&ctrl->namespaces_rwsem);
+
+ ret = nvme_user_cmd(ctrl, ns, argp);
+ nvme_put_ns(ns);
+ return ret;
+
+out_unlock:
+ up_read(&ctrl->namespaces_rwsem);
+ return ret;
+}
+
+long nvme_dev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct nvme_ctrl *ctrl = file->private_data;
+ void __user *argp = (void __user *)arg;
+
+ switch (cmd) {
+ case NVME_IOCTL_ADMIN_CMD:
+ return nvme_user_cmd(ctrl, NULL, argp);
+ case NVME_IOCTL_ADMIN64_CMD:
+ return nvme_user_cmd64(ctrl, NULL, argp, false);
+ case NVME_IOCTL_IO_CMD:
+ return nvme_dev_user_cmd(ctrl, argp);
+ case NVME_IOCTL_RESET:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ dev_warn(ctrl->device, "resetting controller\n");
+ return nvme_reset_ctrl_sync(ctrl);
+ case NVME_IOCTL_SUBSYS_RESET:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ return nvme_reset_subsystem(ctrl);
+ case NVME_IOCTL_RESCAN:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ nvme_queue_scan(ctrl);
+ return 0;
+ default:
+ return -ENOTTY;
+ }
+}
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
deleted file mode 100644
index ec46693f6b64..000000000000
--- a/drivers/nvme/host/lightnvm.c
+++ /dev/null
@@ -1,1272 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * nvme-lightnvm.c - LightNVM NVMe device
- *
- * Copyright (C) 2014-2015 IT University of Copenhagen
- * Initial release: Matias Bjorling <mb@lightnvm.io>
- */
-
-#include "nvme.h"
-
-#include <linux/nvme.h>
-#include <linux/bitops.h>
-#include <linux/lightnvm.h>
-#include <linux/vmalloc.h>
-#include <linux/sched/sysctl.h>
-#include <uapi/linux/lightnvm.h>
-
-enum nvme_nvm_admin_opcode {
- nvme_nvm_admin_identity = 0xe2,
- nvme_nvm_admin_get_bb_tbl = 0xf2,
- nvme_nvm_admin_set_bb_tbl = 0xf1,
-};
-
-enum nvme_nvm_log_page {
- NVME_NVM_LOG_REPORT_CHUNK = 0xca,
-};
-
-struct nvme_nvm_ph_rw {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __le32 nsid;
- __u64 rsvd2;
- __le64 metadata;
- __le64 prp1;
- __le64 prp2;
- __le64 spba;
- __le16 length;
- __le16 control;
- __le32 dsmgmt;
- __le64 resv;
-};
-
-struct nvme_nvm_erase_blk {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __le32 nsid;
- __u64 rsvd[2];
- __le64 prp1;
- __le64 prp2;
- __le64 spba;
- __le16 length;
- __le16 control;
- __le32 dsmgmt;
- __le64 resv;
-};
-
-struct nvme_nvm_identity {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __le32 nsid;
- __u64 rsvd[2];
- __le64 prp1;
- __le64 prp2;
- __u32 rsvd11[6];
-};
-
-struct nvme_nvm_getbbtbl {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __le32 nsid;
- __u64 rsvd[2];
- __le64 prp1;
- __le64 prp2;
- __le64 spba;
- __u32 rsvd4[4];
-};
-
-struct nvme_nvm_setbbtbl {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __le32 nsid;
- __le64 rsvd[2];
- __le64 prp1;
- __le64 prp2;
- __le64 spba;
- __le16 nlb;
- __u8 value;
- __u8 rsvd3;
- __u32 rsvd4[3];
-};
-
-struct nvme_nvm_command {
- union {
- struct nvme_common_command common;
- struct nvme_nvm_ph_rw ph_rw;
- struct nvme_nvm_erase_blk erase;
- struct nvme_nvm_identity identity;
- struct nvme_nvm_getbbtbl get_bb;
- struct nvme_nvm_setbbtbl set_bb;
- };
-};
-
-struct nvme_nvm_id12_grp {
- __u8 mtype;
- __u8 fmtype;
- __le16 res16;
- __u8 num_ch;
- __u8 num_lun;
- __u8 num_pln;
- __u8 rsvd1;
- __le16 num_chk;
- __le16 num_pg;
- __le16 fpg_sz;
- __le16 csecs;
- __le16 sos;
- __le16 rsvd2;
- __le32 trdt;
- __le32 trdm;
- __le32 tprt;
- __le32 tprm;
- __le32 tbet;
- __le32 tbem;
- __le32 mpos;
- __le32 mccap;
- __le16 cpar;
- __u8 reserved[906];
-} __packed;
-
-struct nvme_nvm_id12_addrf {
- __u8 ch_offset;
- __u8 ch_len;
- __u8 lun_offset;
- __u8 lun_len;
- __u8 pln_offset;
- __u8 pln_len;
- __u8 blk_offset;
- __u8 blk_len;
- __u8 pg_offset;
- __u8 pg_len;
- __u8 sec_offset;
- __u8 sec_len;
- __u8 res[4];
-} __packed;
-
-struct nvme_nvm_id12 {
- __u8 ver_id;
- __u8 vmnt;
- __u8 cgrps;
- __u8 res;
- __le32 cap;
- __le32 dom;
- struct nvme_nvm_id12_addrf ppaf;
- __u8 resv[228];
- struct nvme_nvm_id12_grp grp;
- __u8 resv2[2880];
-} __packed;
-
-struct nvme_nvm_bb_tbl {
- __u8 tblid[4];
- __le16 verid;
- __le16 revid;
- __le32 rvsd1;
- __le32 tblks;
- __le32 tfact;
- __le32 tgrown;
- __le32 tdresv;
- __le32 thresv;
- __le32 rsvd2[8];
- __u8 blk[0];
-};
-
-struct nvme_nvm_id20_addrf {
- __u8 grp_len;
- __u8 pu_len;
- __u8 chk_len;
- __u8 lba_len;
- __u8 resv[4];
-};
-
-struct nvme_nvm_id20 {
- __u8 mjr;
- __u8 mnr;
- __u8 resv[6];
-
- struct nvme_nvm_id20_addrf lbaf;
-
- __le32 mccap;
- __u8 resv2[12];
-
- __u8 wit;
- __u8 resv3[31];
-
- /* Geometry */
- __le16 num_grp;
- __le16 num_pu;
- __le32 num_chk;
- __le32 clba;
- __u8 resv4[52];
-
- /* Write data requirements */
- __le32 ws_min;
- __le32 ws_opt;
- __le32 mw_cunits;
- __le32 maxoc;
- __le32 maxocpu;
- __u8 resv5[44];
-
- /* Performance related metrics */
- __le32 trdt;
- __le32 trdm;
- __le32 twrt;
- __le32 twrm;
- __le32 tcrst;
- __le32 tcrsm;
- __u8 resv6[40];
-
- /* Reserved area */
- __u8 resv7[2816];
-
- /* Vendor specific */
- __u8 vs[1024];
-};
-
-struct nvme_nvm_chk_meta {
- __u8 state;
- __u8 type;
- __u8 wi;
- __u8 rsvd[5];
- __le64 slba;
- __le64 cnlb;
- __le64 wp;
-};
-
-/*
- * Check we didn't inadvertently grow the command struct
- */
-static inline void _nvme_nvm_check_size(void)
-{
- BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_grp) != 960);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_addrf) != 16);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_id12) != NVME_IDENTIFY_DATA_SIZE);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_id20_addrf) != 8);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_id20) != NVME_IDENTIFY_DATA_SIZE);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_chk_meta) != 32);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_chk_meta) !=
- sizeof(struct nvm_chk_meta));
-}
-
-static void nvme_nvm_set_addr_12(struct nvm_addrf_12 *dst,
- struct nvme_nvm_id12_addrf *src)
-{
- dst->ch_len = src->ch_len;
- dst->lun_len = src->lun_len;
- dst->blk_len = src->blk_len;
- dst->pg_len = src->pg_len;
- dst->pln_len = src->pln_len;
- dst->sec_len = src->sec_len;
-
- dst->ch_offset = src->ch_offset;
- dst->lun_offset = src->lun_offset;
- dst->blk_offset = src->blk_offset;
- dst->pg_offset = src->pg_offset;
- dst->pln_offset = src->pln_offset;
- dst->sec_offset = src->sec_offset;
-
- dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
- dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
- dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset;
- dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset;
- dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset;
- dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
-}
-
-static int nvme_nvm_setup_12(struct nvme_nvm_id12 *id,
- struct nvm_geo *geo)
-{
- struct nvme_nvm_id12_grp *src;
- int sec_per_pg, sec_per_pl, pg_per_blk;
-
- if (id->cgrps != 1)
- return -EINVAL;
-
- src = &id->grp;
-
- if (src->mtype != 0) {
- pr_err("nvm: memory type not supported\n");
- return -EINVAL;
- }
-
- /* 1.2 spec. only reports a single version id - unfold */
- geo->major_ver_id = id->ver_id;
- geo->minor_ver_id = 2;
-
- /* Set compacted version for upper layers */
- geo->version = NVM_OCSSD_SPEC_12;
-
- geo->num_ch = src->num_ch;
- geo->num_lun = src->num_lun;
- geo->all_luns = geo->num_ch * geo->num_lun;
-
- geo->num_chk = le16_to_cpu(src->num_chk);
-
- geo->csecs = le16_to_cpu(src->csecs);
- geo->sos = le16_to_cpu(src->sos);
-
- pg_per_blk = le16_to_cpu(src->num_pg);
- sec_per_pg = le16_to_cpu(src->fpg_sz) / geo->csecs;
- sec_per_pl = sec_per_pg * src->num_pln;
- geo->clba = sec_per_pl * pg_per_blk;
-
- geo->all_chunks = geo->all_luns * geo->num_chk;
- geo->total_secs = geo->clba * geo->all_chunks;
-
- geo->ws_min = sec_per_pg;
- geo->ws_opt = sec_per_pg;
- geo->mw_cunits = geo->ws_opt << 3; /* default to MLC safe values */
-
- /* Do not impose values for maximum number of open blocks as it is
- * unspecified in 1.2. Users of 1.2 must be aware of this and eventually
- * specify these values through a quirk if restrictions apply.
- */
- geo->maxoc = geo->all_luns * geo->num_chk;
- geo->maxocpu = geo->num_chk;
-
- geo->mccap = le32_to_cpu(src->mccap);
-
- geo->trdt = le32_to_cpu(src->trdt);
- geo->trdm = le32_to_cpu(src->trdm);
- geo->tprt = le32_to_cpu(src->tprt);
- geo->tprm = le32_to_cpu(src->tprm);
- geo->tbet = le32_to_cpu(src->tbet);
- geo->tbem = le32_to_cpu(src->tbem);
-
- /* 1.2 compatibility */
- geo->vmnt = id->vmnt;
- geo->cap = le32_to_cpu(id->cap);
- geo->dom = le32_to_cpu(id->dom);
-
- geo->mtype = src->mtype;
- geo->fmtype = src->fmtype;
-
- geo->cpar = le16_to_cpu(src->cpar);
- geo->mpos = le32_to_cpu(src->mpos);
-
- geo->pln_mode = NVM_PLANE_SINGLE;
-
- if (geo->mpos & 0x020202) {
- geo->pln_mode = NVM_PLANE_DOUBLE;
- geo->ws_opt <<= 1;
- } else if (geo->mpos & 0x040404) {
- geo->pln_mode = NVM_PLANE_QUAD;
- geo->ws_opt <<= 2;
- }
-
- geo->num_pln = src->num_pln;
- geo->num_pg = le16_to_cpu(src->num_pg);
- geo->fpg_sz = le16_to_cpu(src->fpg_sz);
-
- nvme_nvm_set_addr_12((struct nvm_addrf_12 *)&geo->addrf, &id->ppaf);
-
- return 0;
-}
-
-static void nvme_nvm_set_addr_20(struct nvm_addrf *dst,
- struct nvme_nvm_id20_addrf *src)
-{
- dst->ch_len = src->grp_len;
- dst->lun_len = src->pu_len;
- dst->chk_len = src->chk_len;
- dst->sec_len = src->lba_len;
-
- dst->sec_offset = 0;
- dst->chk_offset = dst->sec_len;
- dst->lun_offset = dst->chk_offset + dst->chk_len;
- dst->ch_offset = dst->lun_offset + dst->lun_len;
-
- dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
- dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
- dst->chk_mask = ((1ULL << dst->chk_len) - 1) << dst->chk_offset;
- dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
-}
-
-static int nvme_nvm_setup_20(struct nvme_nvm_id20 *id,
- struct nvm_geo *geo)
-{
- geo->major_ver_id = id->mjr;
- geo->minor_ver_id = id->mnr;
-
- /* Set compacted version for upper layers */
- geo->version = NVM_OCSSD_SPEC_20;
-
- geo->num_ch = le16_to_cpu(id->num_grp);
- geo->num_lun = le16_to_cpu(id->num_pu);
- geo->all_luns = geo->num_ch * geo->num_lun;
-
- geo->num_chk = le32_to_cpu(id->num_chk);
- geo->clba = le32_to_cpu(id->clba);
-
- geo->all_chunks = geo->all_luns * geo->num_chk;
- geo->total_secs = geo->clba * geo->all_chunks;
-
- geo->ws_min = le32_to_cpu(id->ws_min);
- geo->ws_opt = le32_to_cpu(id->ws_opt);
- geo->mw_cunits = le32_to_cpu(id->mw_cunits);
- geo->maxoc = le32_to_cpu(id->maxoc);
- geo->maxocpu = le32_to_cpu(id->maxocpu);
-
- geo->trdt = le32_to_cpu(id->trdt);
- geo->trdm = le32_to_cpu(id->trdm);
- geo->tprt = le32_to_cpu(id->twrt);
- geo->tprm = le32_to_cpu(id->twrm);
- geo->tbet = le32_to_cpu(id->tcrst);
- geo->tbem = le32_to_cpu(id->tcrsm);
-
- nvme_nvm_set_addr_20(&geo->addrf, &id->lbaf);
-
- return 0;
-}
-
-static int nvme_nvm_identity(struct nvm_dev *nvmdev)
-{
- struct nvme_ns *ns = nvmdev->q->queuedata;
- struct nvme_nvm_id12 *id;
- struct nvme_nvm_command c = {};
- int ret;
-
- c.identity.opcode = nvme_nvm_admin_identity;
- c.identity.nsid = cpu_to_le32(ns->head->ns_id);
-
- id = kmalloc(sizeof(struct nvme_nvm_id12), GFP_KERNEL);
- if (!id)
- return -ENOMEM;
-
- ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
- id, sizeof(struct nvme_nvm_id12));
- if (ret) {
- ret = -EIO;
- goto out;
- }
-
- /*
- * The 1.2 and 2.0 specifications share the first byte in their geometry
- * command to make it possible to know what version a device implements.
- */
- switch (id->ver_id) {
- case 1:
- ret = nvme_nvm_setup_12(id, &nvmdev->geo);
- break;
- case 2:
- ret = nvme_nvm_setup_20((struct nvme_nvm_id20 *)id,
- &nvmdev->geo);
- break;
- default:
- dev_err(ns->ctrl->device, "OCSSD revision not supported (%d)\n",
- id->ver_id);
- ret = -EINVAL;
- }
-
-out:
- kfree(id);
- return ret;
-}
-
-static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
- u8 *blks)
-{
- struct request_queue *q = nvmdev->q;
- struct nvm_geo *geo = &nvmdev->geo;
- struct nvme_ns *ns = q->queuedata;
- struct nvme_ctrl *ctrl = ns->ctrl;
- struct nvme_nvm_command c = {};
- struct nvme_nvm_bb_tbl *bb_tbl;
- int nr_blks = geo->num_chk * geo->num_pln;
- int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
- int ret = 0;
-
- c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
- c.get_bb.nsid = cpu_to_le32(ns->head->ns_id);
- c.get_bb.spba = cpu_to_le64(ppa.ppa);
-
- bb_tbl = kzalloc(tblsz, GFP_KERNEL);
- if (!bb_tbl)
- return -ENOMEM;
-
- ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
- bb_tbl, tblsz);
- if (ret) {
- dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
- ret = -EIO;
- goto out;
- }
-
- if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
- bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
- dev_err(ctrl->device, "bbt format mismatch\n");
- ret = -EINVAL;
- goto out;
- }
-
- if (le16_to_cpu(bb_tbl->verid) != 1) {
- ret = -EINVAL;
- dev_err(ctrl->device, "bbt version not supported\n");
- goto out;
- }
-
- if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
- ret = -EINVAL;
- dev_err(ctrl->device,
- "bbt unsuspected blocks returned (%u!=%u)",
- le32_to_cpu(bb_tbl->tblks), nr_blks);
- goto out;
- }
-
- memcpy(blks, bb_tbl->blk, geo->num_chk * geo->num_pln);
-out:
- kfree(bb_tbl);
- return ret;
-}
-
-static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
- int nr_ppas, int type)
-{
- struct nvme_ns *ns = nvmdev->q->queuedata;
- struct nvme_nvm_command c = {};
- int ret = 0;
-
- c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
- c.set_bb.nsid = cpu_to_le32(ns->head->ns_id);
- c.set_bb.spba = cpu_to_le64(ppas->ppa);
- c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
- c.set_bb.value = type;
-
- ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
- NULL, 0);
- if (ret)
- dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
- ret);
- return ret;
-}
-
-/*
- * Expect the lba in device format
- */
-static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
- sector_t slba, int nchks,
- struct nvm_chk_meta *meta)
-{
- struct nvm_geo *geo = &ndev->geo;
- struct nvme_ns *ns = ndev->q->queuedata;
- struct nvme_ctrl *ctrl = ns->ctrl;
- struct nvme_nvm_chk_meta *dev_meta, *dev_meta_off;
- struct ppa_addr ppa;
- size_t left = nchks * sizeof(struct nvme_nvm_chk_meta);
- size_t log_pos, offset, len;
- int i, max_len;
- int ret = 0;
-
- /*
- * limit requests to maximum 256K to avoid issuing arbitrary large
- * requests when the device does not specific a maximum transfer size.
- */
- max_len = min_t(unsigned int, ctrl->max_hw_sectors << 9, 256 * 1024);
-
- dev_meta = kmalloc(max_len, GFP_KERNEL);
- if (!dev_meta)
- return -ENOMEM;
-
- /* Normalize lba address space to obtain log offset */
- ppa.ppa = slba;
- ppa = dev_to_generic_addr(ndev, ppa);
-
- log_pos = ppa.m.chk;
- log_pos += ppa.m.pu * geo->num_chk;
- log_pos += ppa.m.grp * geo->num_lun * geo->num_chk;
-
- offset = log_pos * sizeof(struct nvme_nvm_chk_meta);
-
- while (left) {
- len = min_t(unsigned int, left, max_len);
-
- memset(dev_meta, 0, max_len);
- dev_meta_off = dev_meta;
-
- ret = nvme_get_log(ctrl, ns->head->ns_id,
- NVME_NVM_LOG_REPORT_CHUNK, 0, dev_meta, len,
- offset);
- if (ret) {
- dev_err(ctrl->device, "Get REPORT CHUNK log error\n");
- break;
- }
-
- for (i = 0; i < len; i += sizeof(struct nvme_nvm_chk_meta)) {
- meta->state = dev_meta_off->state;
- meta->type = dev_meta_off->type;
- meta->wi = dev_meta_off->wi;
- meta->slba = le64_to_cpu(dev_meta_off->slba);
- meta->cnlb = le64_to_cpu(dev_meta_off->cnlb);
- meta->wp = le64_to_cpu(dev_meta_off->wp);
-
- meta++;
- dev_meta_off++;
- }
-
- offset += len;
- left -= len;
- }
-
- kfree(dev_meta);
-
- return ret;
-}
-
-static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
- struct nvme_nvm_command *c)
-{
- c->ph_rw.opcode = rqd->opcode;
- c->ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
- c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
- c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
- c->ph_rw.control = cpu_to_le16(rqd->flags);
- c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
-}
-
-static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
-{
- struct nvm_rq *rqd = rq->end_io_data;
-
- rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
- rqd->error = nvme_req(rq)->status;
- nvm_end_io(rqd);
-
- kfree(nvme_req(rq)->cmd);
- blk_mq_free_request(rq);
-}
-
-static struct request *nvme_nvm_alloc_request(struct request_queue *q,
- struct nvm_rq *rqd,
- struct nvme_nvm_command *cmd)
-{
- struct nvme_ns *ns = q->queuedata;
- struct request *rq;
-
- nvme_nvm_rqtocmd(rqd, ns, cmd);
-
- rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
- if (IS_ERR(rq))
- return rq;
-
- rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
-
- if (rqd->bio)
- blk_rq_append_bio(rq, &rqd->bio);
- else
- rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
-
- return rq;
-}
-
-static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd,
- void *buf)
-{
- struct nvm_geo *geo = &dev->geo;
- struct request_queue *q = dev->q;
- struct nvme_nvm_command *cmd;
- struct request *rq;
- int ret;
-
- cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
-
- rq = nvme_nvm_alloc_request(q, rqd, cmd);
- if (IS_ERR(rq)) {
- ret = PTR_ERR(rq);
- goto err_free_cmd;
- }
-
- if (buf) {
- ret = blk_rq_map_kern(q, rq, buf, geo->csecs * rqd->nr_ppas,
- GFP_KERNEL);
- if (ret)
- goto err_free_cmd;
- }
-
- rq->end_io_data = rqd;
-
- blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
-
- return 0;
-
-err_free_cmd:
- kfree(cmd);
- return ret;
-}
-
-static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name,
- int size)
-{
- struct nvme_ns *ns = nvmdev->q->queuedata;
-
- return dma_pool_create(name, ns->ctrl->dev, size, PAGE_SIZE, 0);
-}
-
-static void nvme_nvm_destroy_dma_pool(void *pool)
-{
- struct dma_pool *dma_pool = pool;
-
- dma_pool_destroy(dma_pool);
-}
-
-static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
- gfp_t mem_flags, dma_addr_t *dma_handler)
-{
- return dma_pool_alloc(pool, mem_flags, dma_handler);
-}
-
-static void nvme_nvm_dev_dma_free(void *pool, void *addr,
- dma_addr_t dma_handler)
-{
- dma_pool_free(pool, addr, dma_handler);
-}
-
-static struct nvm_dev_ops nvme_nvm_dev_ops = {
- .identity = nvme_nvm_identity,
-
- .get_bb_tbl = nvme_nvm_get_bb_tbl,
- .set_bb_tbl = nvme_nvm_set_bb_tbl,
-
- .get_chk_meta = nvme_nvm_get_chk_meta,
-
- .submit_io = nvme_nvm_submit_io,
-
- .create_dma_pool = nvme_nvm_create_dma_pool,
- .destroy_dma_pool = nvme_nvm_destroy_dma_pool,
- .dev_dma_alloc = nvme_nvm_dev_dma_alloc,
- .dev_dma_free = nvme_nvm_dev_dma_free,
-};
-
-static int nvme_nvm_submit_user_cmd(struct request_queue *q,
- struct nvme_ns *ns,
- struct nvme_nvm_command *vcmd,
- void __user *ubuf, unsigned int bufflen,
- void __user *meta_buf, unsigned int meta_len,
- void __user *ppa_buf, unsigned int ppa_len,
- u32 *result, u64 *status, unsigned int timeout)
-{
- bool write = nvme_is_write((struct nvme_command *)vcmd);
- struct nvm_dev *dev = ns->ndev;
- struct gendisk *disk = ns->disk;
- struct request *rq;
- struct bio *bio = NULL;
- __le64 *ppa_list = NULL;
- dma_addr_t ppa_dma;
- __le64 *metadata = NULL;
- dma_addr_t metadata_dma;
- DECLARE_COMPLETION_ONSTACK(wait);
- int ret = 0;
-
- rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
- NVME_QID_ANY);
- if (IS_ERR(rq)) {
- ret = -ENOMEM;
- goto err_cmd;
- }
-
- rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
-
- if (ppa_buf && ppa_len) {
- ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
- if (!ppa_list) {
- ret = -ENOMEM;
- goto err_rq;
- }
- if (copy_from_user(ppa_list, (void __user *)ppa_buf,
- sizeof(u64) * (ppa_len + 1))) {
- ret = -EFAULT;
- goto err_ppa;
- }
- vcmd->ph_rw.spba = cpu_to_le64(ppa_dma);
- } else {
- vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf);
- }
-
- if (ubuf && bufflen) {
- ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
- if (ret)
- goto err_ppa;
- bio = rq->bio;
-
- if (meta_buf && meta_len) {
- metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL,
- &metadata_dma);
- if (!metadata) {
- ret = -ENOMEM;
- goto err_map;
- }
-
- if (write) {
- if (copy_from_user(metadata,
- (void __user *)meta_buf,
- meta_len)) {
- ret = -EFAULT;
- goto err_meta;
- }
- }
- vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
- }
-
- bio->bi_disk = disk;
- }
-
- blk_execute_rq(q, NULL, rq, 0);
-
- if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
- ret = -EINTR;
- else if (nvme_req(rq)->status & 0x7ff)
- ret = -EIO;
- if (result)
- *result = nvme_req(rq)->status & 0x7ff;
- if (status)
- *status = le64_to_cpu(nvme_req(rq)->result.u64);
-
- if (metadata && !ret && !write) {
- if (copy_to_user(meta_buf, (void *)metadata, meta_len))
- ret = -EFAULT;
- }
-err_meta:
- if (meta_buf && meta_len)
- dma_pool_free(dev->dma_pool, metadata, metadata_dma);
-err_map:
- if (bio)
- blk_rq_unmap_user(bio);
-err_ppa:
- if (ppa_buf && ppa_len)
- dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
-err_rq:
- blk_mq_free_request(rq);
-err_cmd:
- return ret;
-}
-
-static int nvme_nvm_submit_vio(struct nvme_ns *ns,
- struct nvm_user_vio __user *uvio)
-{
- struct nvm_user_vio vio;
- struct nvme_nvm_command c;
- unsigned int length;
- int ret;
-
- if (copy_from_user(&vio, uvio, sizeof(vio)))
- return -EFAULT;
- if (vio.flags)
- return -EINVAL;
-
- memset(&c, 0, sizeof(c));
- c.ph_rw.opcode = vio.opcode;
- c.ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
- c.ph_rw.control = cpu_to_le16(vio.control);
- c.ph_rw.length = cpu_to_le16(vio.nppas);
-
- length = (vio.nppas + 1) << ns->lba_shift;
-
- ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c,
- (void __user *)(uintptr_t)vio.addr, length,
- (void __user *)(uintptr_t)vio.metadata,
- vio.metadata_len,
- (void __user *)(uintptr_t)vio.ppa_list, vio.nppas,
- &vio.result, &vio.status, 0);
-
- if (ret && copy_to_user(uvio, &vio, sizeof(vio)))
- return -EFAULT;
-
- return ret;
-}
-
-static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
- struct nvm_passthru_vio __user *uvcmd)
-{
- struct nvm_passthru_vio vcmd;
- struct nvme_nvm_command c;
- struct request_queue *q;
- unsigned int timeout = 0;
- int ret;
-
- if (copy_from_user(&vcmd, uvcmd, sizeof(vcmd)))
- return -EFAULT;
- if ((vcmd.opcode != 0xF2) && (!capable(CAP_SYS_ADMIN)))
- return -EACCES;
- if (vcmd.flags)
- return -EINVAL;
-
- memset(&c, 0, sizeof(c));
- c.common.opcode = vcmd.opcode;
- c.common.nsid = cpu_to_le32(ns->head->ns_id);
- c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2);
- c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3);
- /* cdw11-12 */
- c.ph_rw.length = cpu_to_le16(vcmd.nppas);
- c.ph_rw.control = cpu_to_le16(vcmd.control);
- c.common.cdw13 = cpu_to_le32(vcmd.cdw13);
- c.common.cdw14 = cpu_to_le32(vcmd.cdw14);
- c.common.cdw15 = cpu_to_le32(vcmd.cdw15);
-
- if (vcmd.timeout_ms)
- timeout = msecs_to_jiffies(vcmd.timeout_ms);
-
- q = admin ? ns->ctrl->admin_q : ns->queue;
-
- ret = nvme_nvm_submit_user_cmd(q, ns,
- (struct nvme_nvm_command *)&c,
- (void __user *)(uintptr_t)vcmd.addr, vcmd.data_len,
- (void __user *)(uintptr_t)vcmd.metadata,
- vcmd.metadata_len,
- (void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas,
- &vcmd.result, &vcmd.status, timeout);
-
- if (ret && copy_to_user(uvcmd, &vcmd, sizeof(vcmd)))
- return -EFAULT;
-
- return ret;
-}
-
-int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case NVME_NVM_IOCTL_ADMIN_VIO:
- return nvme_nvm_user_vcmd(ns, 1, (void __user *)arg);
- case NVME_NVM_IOCTL_IO_VIO:
- return nvme_nvm_user_vcmd(ns, 0, (void __user *)arg);
- case NVME_NVM_IOCTL_SUBMIT_VIO:
- return nvme_nvm_submit_vio(ns, (void __user *)arg);
- default:
- return -ENOTTY;
- }
-}
-
-int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
-{
- struct request_queue *q = ns->queue;
- struct nvm_dev *dev;
- struct nvm_geo *geo;
-
- _nvme_nvm_check_size();
-
- dev = nvm_alloc_dev(node);
- if (!dev)
- return -ENOMEM;
-
- /* Note that csecs and sos will be overridden if it is a 1.2 drive. */
- geo = &dev->geo;
- geo->csecs = 1 << ns->lba_shift;
- geo->sos = ns->ms;
- geo->ext = ns->ext;
- geo->mdts = ns->ctrl->max_hw_sectors;
-
- dev->q = q;
- memcpy(dev->name, disk_name, DISK_NAME_LEN);
- dev->ops = &nvme_nvm_dev_ops;
- dev->private_data = ns;
- ns->ndev = dev;
-
- return nvm_register(dev);
-}
-
-void nvme_nvm_unregister(struct nvme_ns *ns)
-{
- nvm_unregister(ns->ndev);
-}
-
-static ssize_t nvm_dev_attr_show(struct device *dev,
- struct device_attribute *dattr, char *page)
-{
- struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
- struct nvm_dev *ndev = ns->ndev;
- struct nvm_geo *geo = &ndev->geo;
- struct attribute *attr;
-
- if (!ndev)
- return 0;
-
- attr = &dattr->attr;
-
- if (strcmp(attr->name, "version") == 0) {
- if (geo->major_ver_id == 1)
- return scnprintf(page, PAGE_SIZE, "%u\n",
- geo->major_ver_id);
- else
- return scnprintf(page, PAGE_SIZE, "%u.%u\n",
- geo->major_ver_id,
- geo->minor_ver_id);
- } else if (strcmp(attr->name, "capabilities") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->cap);
- } else if (strcmp(attr->name, "read_typ") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->trdt);
- } else if (strcmp(attr->name, "read_max") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->trdm);
- } else {
- return scnprintf(page,
- PAGE_SIZE,
- "Unhandled attr(%s) in `%s`\n",
- attr->name, __func__);
- }
-}
-
-static ssize_t nvm_dev_attr_show_ppaf(struct nvm_addrf_12 *ppaf, char *page)
-{
- return scnprintf(page, PAGE_SIZE,
- "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- ppaf->ch_offset, ppaf->ch_len,
- ppaf->lun_offset, ppaf->lun_len,
- ppaf->pln_offset, ppaf->pln_len,
- ppaf->blk_offset, ppaf->blk_len,
- ppaf->pg_offset, ppaf->pg_len,
- ppaf->sec_offset, ppaf->sec_len);
-}
-
-static ssize_t nvm_dev_attr_show_12(struct device *dev,
- struct device_attribute *dattr, char *page)
-{
- struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
- struct nvm_dev *ndev = ns->ndev;
- struct nvm_geo *geo = &ndev->geo;
- struct attribute *attr;
-
- if (!ndev)
- return 0;
-
- attr = &dattr->attr;
-
- if (strcmp(attr->name, "vendor_opcode") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->vmnt);
- } else if (strcmp(attr->name, "device_mode") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->dom);
- /* kept for compatibility */
- } else if (strcmp(attr->name, "media_manager") == 0) {
- return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm");
- } else if (strcmp(attr->name, "ppa_format") == 0) {
- return nvm_dev_attr_show_ppaf((void *)&geo->addrf, page);
- } else if (strcmp(attr->name, "media_type") == 0) { /* u8 */
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->mtype);
- } else if (strcmp(attr->name, "flash_media_type") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->fmtype);
- } else if (strcmp(attr->name, "num_channels") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_ch);
- } else if (strcmp(attr->name, "num_luns") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_lun);
- } else if (strcmp(attr->name, "num_planes") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pln);
- } else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_chk);
- } else if (strcmp(attr->name, "num_pages") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pg);
- } else if (strcmp(attr->name, "page_size") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->fpg_sz);
- } else if (strcmp(attr->name, "hw_sector_size") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->csecs);
- } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->sos);
- } else if (strcmp(attr->name, "prog_typ") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprt);
- } else if (strcmp(attr->name, "prog_max") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprm);
- } else if (strcmp(attr->name, "erase_typ") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbet);
- } else if (strcmp(attr->name, "erase_max") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem);
- } else if (strcmp(attr->name, "multiplane_modes") == 0) {
- return scnprintf(page, PAGE_SIZE, "0x%08x\n", geo->mpos);
- } else if (strcmp(attr->name, "media_capabilities") == 0) {
- return scnprintf(page, PAGE_SIZE, "0x%08x\n", geo->mccap);
- } else if (strcmp(attr->name, "max_phys_secs") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", NVM_MAX_VLBA);
- } else {
- return scnprintf(page, PAGE_SIZE,
- "Unhandled attr(%s) in `%s`\n",
- attr->name, __func__);
- }
-}
-
-static ssize_t nvm_dev_attr_show_20(struct device *dev,
- struct device_attribute *dattr, char *page)
-{
- struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
- struct nvm_dev *ndev = ns->ndev;
- struct nvm_geo *geo = &ndev->geo;
- struct attribute *attr;
-
- if (!ndev)
- return 0;
-
- attr = &dattr->attr;
-
- if (strcmp(attr->name, "groups") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_ch);
- } else if (strcmp(attr->name, "punits") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_lun);
- } else if (strcmp(attr->name, "chunks") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_chk);
- } else if (strcmp(attr->name, "clba") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->clba);
- } else if (strcmp(attr->name, "ws_min") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_min);
- } else if (strcmp(attr->name, "ws_opt") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_opt);
- } else if (strcmp(attr->name, "maxoc") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->maxoc);
- } else if (strcmp(attr->name, "maxocpu") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->maxocpu);
- } else if (strcmp(attr->name, "mw_cunits") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->mw_cunits);
- } else if (strcmp(attr->name, "write_typ") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprt);
- } else if (strcmp(attr->name, "write_max") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprm);
- } else if (strcmp(attr->name, "reset_typ") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbet);
- } else if (strcmp(attr->name, "reset_max") == 0) {
- return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem);
- } else {
- return scnprintf(page, PAGE_SIZE,
- "Unhandled attr(%s) in `%s`\n",
- attr->name, __func__);
- }
-}
-
-#define NVM_DEV_ATTR_RO(_name) \
- DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
-#define NVM_DEV_ATTR_12_RO(_name) \
- DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show_12, NULL)
-#define NVM_DEV_ATTR_20_RO(_name) \
- DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show_20, NULL)
-
-/* general attributes */
-static NVM_DEV_ATTR_RO(version);
-static NVM_DEV_ATTR_RO(capabilities);
-
-static NVM_DEV_ATTR_RO(read_typ);
-static NVM_DEV_ATTR_RO(read_max);
-
-/* 1.2 values */
-static NVM_DEV_ATTR_12_RO(vendor_opcode);
-static NVM_DEV_ATTR_12_RO(device_mode);
-static NVM_DEV_ATTR_12_RO(ppa_format);
-static NVM_DEV_ATTR_12_RO(media_manager);
-static NVM_DEV_ATTR_12_RO(media_type);
-static NVM_DEV_ATTR_12_RO(flash_media_type);
-static NVM_DEV_ATTR_12_RO(num_channels);
-static NVM_DEV_ATTR_12_RO(num_luns);
-static NVM_DEV_ATTR_12_RO(num_planes);
-static NVM_DEV_ATTR_12_RO(num_blocks);
-static NVM_DEV_ATTR_12_RO(num_pages);
-static NVM_DEV_ATTR_12_RO(page_size);
-static NVM_DEV_ATTR_12_RO(hw_sector_size);
-static NVM_DEV_ATTR_12_RO(oob_sector_size);
-static NVM_DEV_ATTR_12_RO(prog_typ);
-static NVM_DEV_ATTR_12_RO(prog_max);
-static NVM_DEV_ATTR_12_RO(erase_typ);
-static NVM_DEV_ATTR_12_RO(erase_max);
-static NVM_DEV_ATTR_12_RO(multiplane_modes);
-static NVM_DEV_ATTR_12_RO(media_capabilities);
-static NVM_DEV_ATTR_12_RO(max_phys_secs);
-
-/* 2.0 values */
-static NVM_DEV_ATTR_20_RO(groups);
-static NVM_DEV_ATTR_20_RO(punits);
-static NVM_DEV_ATTR_20_RO(chunks);
-static NVM_DEV_ATTR_20_RO(clba);
-static NVM_DEV_ATTR_20_RO(ws_min);
-static NVM_DEV_ATTR_20_RO(ws_opt);
-static NVM_DEV_ATTR_20_RO(maxoc);
-static NVM_DEV_ATTR_20_RO(maxocpu);
-static NVM_DEV_ATTR_20_RO(mw_cunits);
-static NVM_DEV_ATTR_20_RO(write_typ);
-static NVM_DEV_ATTR_20_RO(write_max);
-static NVM_DEV_ATTR_20_RO(reset_typ);
-static NVM_DEV_ATTR_20_RO(reset_max);
-
-static struct attribute *nvm_dev_attrs[] = {
- /* version agnostic attrs */
- &dev_attr_version.attr,
- &dev_attr_capabilities.attr,
- &dev_attr_read_typ.attr,
- &dev_attr_read_max.attr,
-
- /* 1.2 attrs */
- &dev_attr_vendor_opcode.attr,
- &dev_attr_device_mode.attr,
- &dev_attr_media_manager.attr,
- &dev_attr_ppa_format.attr,
- &dev_attr_media_type.attr,
- &dev_attr_flash_media_type.attr,
- &dev_attr_num_channels.attr,
- &dev_attr_num_luns.attr,
- &dev_attr_num_planes.attr,
- &dev_attr_num_blocks.attr,
- &dev_attr_num_pages.attr,
- &dev_attr_page_size.attr,
- &dev_attr_hw_sector_size.attr,
- &dev_attr_oob_sector_size.attr,
- &dev_attr_prog_typ.attr,
- &dev_attr_prog_max.attr,
- &dev_attr_erase_typ.attr,
- &dev_attr_erase_max.attr,
- &dev_attr_multiplane_modes.attr,
- &dev_attr_media_capabilities.attr,
- &dev_attr_max_phys_secs.attr,
-
- /* 2.0 attrs */
- &dev_attr_groups.attr,
- &dev_attr_punits.attr,
- &dev_attr_chunks.attr,
- &dev_attr_clba.attr,
- &dev_attr_ws_min.attr,
- &dev_attr_ws_opt.attr,
- &dev_attr_maxoc.attr,
- &dev_attr_maxocpu.attr,
- &dev_attr_mw_cunits.attr,
-
- &dev_attr_write_typ.attr,
- &dev_attr_write_max.attr,
- &dev_attr_reset_typ.attr,
- &dev_attr_reset_max.attr,
-
- NULL,
-};
-
-static umode_t nvm_dev_attrs_visible(struct kobject *kobj,
- struct attribute *attr, int index)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct gendisk *disk = dev_to_disk(dev);
- struct nvme_ns *ns = disk->private_data;
- struct nvm_dev *ndev = ns->ndev;
- struct device_attribute *dev_attr =
- container_of(attr, typeof(*dev_attr), attr);
-
- if (!ndev)
- return 0;
-
- if (dev_attr->show == nvm_dev_attr_show)
- return attr->mode;
-
- switch (ndev->geo.major_ver_id) {
- case 1:
- if (dev_attr->show == nvm_dev_attr_show_12)
- return attr->mode;
- break;
- case 2:
- if (dev_attr->show == nvm_dev_attr_show_20)
- return attr->mode;
- break;
- }
-
- return 0;
-}
-
-const struct attribute_group nvme_nvm_attr_group = {
- .name = "lightnvm",
- .attrs = nvm_dev_attrs,
- .is_visible = nvm_dev_attrs_visible,
-};
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index a11900cf3a36..93e2138a8b42 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -3,15 +3,53 @@
* Copyright (c) 2017-2018 Christoph Hellwig.
*/
+#include <linux/backing-dev.h>
#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
#include <trace/events/block.h>
#include "nvme.h"
-static bool multipath = true;
+bool multipath = true;
module_param(multipath, bool, 0444);
MODULE_PARM_DESC(multipath,
"turn on native support for multiple controllers per subsystem");
+static const char *nvme_iopolicy_names[] = {
+ [NVME_IOPOLICY_NUMA] = "numa",
+ [NVME_IOPOLICY_RR] = "round-robin",
+};
+
+static int iopolicy = NVME_IOPOLICY_NUMA;
+
+static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
+{
+ if (!val)
+ return -EINVAL;
+ if (!strncmp(val, "numa", 4))
+ iopolicy = NVME_IOPOLICY_NUMA;
+ else if (!strncmp(val, "round-robin", 11))
+ iopolicy = NVME_IOPOLICY_RR;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
+{
+ return sprintf(buf, "%s\n", nvme_iopolicy_names[iopolicy]);
+}
+
+module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
+ &iopolicy, 0644);
+MODULE_PARM_DESC(iopolicy,
+ "Default multipath I/O policy; 'numa' (default) or 'round-robin'");
+
+void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
+{
+ subsys->iopolicy = iopolicy;
+}
+
void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
{
struct nvme_ns_head *h;
@@ -42,75 +80,37 @@ void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
blk_freeze_queue_start(h->disk->queue);
}
-/*
- * If multipathing is enabled we need to always use the subsystem instance
- * number for numbering our devices to avoid conflicts between subsystems that
- * have multiple controllers and thus use the multipath-aware subsystem node
- * and those that have a single controller and use the controller node
- * directly.
- */
-void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
- struct nvme_ctrl *ctrl, int *flags)
-{
- if (!multipath) {
- sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
- } else if (ns->head->disk) {
- sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
- ctrl->instance, ns->head->instance);
- *flags = GENHD_FL_HIDDEN;
- } else {
- sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
- ns->head->instance);
- }
-}
-
void nvme_failover_req(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
- u16 status = nvme_req(req)->status;
+ u16 status = nvme_req(req)->status & 0x7ff;
unsigned long flags;
+ struct bio *bio;
- spin_lock_irqsave(&ns->head->requeue_lock, flags);
- blk_steal_bios(&ns->head->requeue_list, req);
- spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
- blk_mq_end_request(req, 0);
+ nvme_mpath_clear_current_path(ns);
- switch (status & 0x7ff) {
- case NVME_SC_ANA_TRANSITION:
- case NVME_SC_ANA_INACCESSIBLE:
- case NVME_SC_ANA_PERSISTENT_LOSS:
- /*
- * If we got back an ANA error we know the controller is alive,
- * but not ready to serve this namespaces. The spec suggests
- * we should update our general state here, but due to the fact
- * that the admin and I/O queues are not serialized that is
- * fundamentally racy. So instead just clear the current path,
- * mark the the path as pending and kick of a re-read of the ANA
- * log page ASAP.
- */
- nvme_mpath_clear_current_path(ns);
- if (ns->ctrl->ana_log_buf) {
- set_bit(NVME_NS_ANA_PENDING, &ns->flags);
- queue_work(nvme_wq, &ns->ctrl->ana_work);
+ /*
+ * If we got back an ANA error, we know the controller is alive but not
+ * ready to serve this namespace. Kick of a re-read of the ANA
+ * information page, and just try any other available path for now.
+ */
+ if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) {
+ set_bit(NVME_NS_ANA_PENDING, &ns->flags);
+ queue_work(nvme_wq, &ns->ctrl->ana_work);
+ }
+
+ spin_lock_irqsave(&ns->head->requeue_lock, flags);
+ for (bio = req->bio; bio; bio = bio->bi_next) {
+ bio_set_dev(bio, ns->head->disk->part0);
+ if (bio->bi_opf & REQ_POLLED) {
+ bio->bi_opf &= ~REQ_POLLED;
+ bio->bi_cookie = BLK_QC_T_NONE;
}
- break;
- case NVME_SC_HOST_PATH_ERROR:
- case NVME_SC_HOST_ABORTED_CMD:
- /*
- * Temporary transport disruption in talking to the controller.
- * Try to send on a new path.
- */
- nvme_mpath_clear_current_path(ns);
- break;
- default:
- /*
- * Reset the controller for any non-ANA error as we don't know
- * what caused the error.
- */
- nvme_reset_ctrl(ns->ctrl);
- break;
}
+ blk_steal_bios(&ns->head->requeue_list, req);
+ spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
+ blk_mq_end_request(req, 0);
kblockd_schedule_work(&ns->head->requeue_work);
}
@@ -120,8 +120,11 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) {
- if (ns->head->disk)
- kblockd_schedule_work(&ns->head->requeue_work);
+ if (!ns->head->disk)
+ continue;
+ kblockd_schedule_work(&ns->head->requeue_work);
+ if (ctrl->state == NVME_CTRL_LIVE)
+ disk_uevent(ns->head->disk, KOBJ_CHANGE);
}
up_read(&ctrl->namespaces_rwsem);
}
@@ -158,20 +161,44 @@ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
- mutex_lock(&ctrl->scan_lock);
down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
- if (nvme_mpath_clear_current_path(ns))
- kblockd_schedule_work(&ns->head->requeue_work);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+ nvme_mpath_clear_current_path(ns);
+ kblockd_schedule_work(&ns->head->requeue_work);
+ }
up_read(&ctrl->namespaces_rwsem);
- mutex_unlock(&ctrl->scan_lock);
+}
+
+void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
+{
+ struct nvme_ns_head *head = ns->head;
+ sector_t capacity = get_capacity(head->disk);
+ int node;
+
+ list_for_each_entry_rcu(ns, &head->list, siblings) {
+ if (capacity != get_capacity(ns->disk))
+ clear_bit(NVME_NS_READY, &ns->flags);
+ }
+
+ for_each_node(node)
+ rcu_assign_pointer(head->current_path[node], NULL);
+ kblockd_schedule_work(&head->requeue_work);
}
static bool nvme_path_is_disabled(struct nvme_ns *ns)
{
- return ns->ctrl->state != NVME_CTRL_LIVE ||
- test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
- test_bit(NVME_NS_REMOVING, &ns->flags);
+ /*
+ * We don't treat NVME_CTRL_DELETING as a disabled path as I/O should
+ * still be able to complete assuming that the controller is connected.
+ * Otherwise it will fail immediately and return to the requeue list.
+ */
+ if (ns->ctrl->state != NVME_CTRL_LIVE &&
+ ns->ctrl->state != NVME_CTRL_DELETING)
+ return true;
+ if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
+ !test_bit(NVME_NS_READY, &ns->flags))
+ return true;
+ return false;
}
static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
@@ -226,7 +253,7 @@ static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
int node, struct nvme_ns *old)
{
- struct nvme_ns *ns, *found, *fallback = NULL;
+ struct nvme_ns *ns, *found = NULL;
if (list_is_singular(&head->list)) {
if (nvme_path_is_disabled(old))
@@ -235,7 +262,7 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
}
for (ns = nvme_next_ns(head, old);
- ns != old;
+ ns && ns != old;
ns = nvme_next_ns(head, ns)) {
if (nvme_path_is_disabled(ns))
continue;
@@ -245,12 +272,22 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
goto out;
}
if (ns->ana_state == NVME_ANA_NONOPTIMIZED)
- fallback = ns;
+ found = ns;
}
- if (!fallback)
+ /*
+ * The loop above skips the current path for round-robin semantics.
+ * Fall back to the current path if either:
+ * - no other optimized path found and current is optimized,
+ * - no other usable path found and current is usable.
+ */
+ if (!nvme_path_is_disabled(old) &&
+ (old->ana_state == NVME_ANA_OPTIMIZED ||
+ (!found && old->ana_state == NVME_ANA_NONOPTIMIZED)))
+ return old;
+
+ if (!found)
return NULL;
- found = fallback;
out:
rcu_assign_pointer(head->current_path[node], found);
return found;
@@ -268,10 +305,13 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
struct nvme_ns *ns;
ns = srcu_dereference(head->current_path[node], &head->srcu);
- if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR && ns)
- ns = nvme_round_robin_path(head, node, ns);
- if (unlikely(!ns || !nvme_path_is_optimized(ns)))
- ns = __nvme_find_path(head, node);
+ if (unlikely(!ns))
+ return __nvme_find_path(head, node);
+
+ if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR)
+ return nvme_round_robin_path(head, node, ns);
+ if (unlikely(!nvme_path_is_optimized(ns)))
+ return __nvme_find_path(head, node);
return ns;
}
@@ -280,6 +320,8 @@ static bool nvme_available_path(struct nvme_ns_head *head)
struct nvme_ns *ns;
list_for_each_entry_rcu(ns, &head->list, siblings) {
+ if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
+ continue;
switch (ns->ctrl->state) {
case NVME_CTRL_LIVE:
case NVME_CTRL_RESETTING:
@@ -293,32 +335,28 @@ static bool nvme_available_path(struct nvme_ns_head *head)
return false;
}
-static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
- struct bio *bio)
+static void nvme_ns_head_submit_bio(struct bio *bio)
{
- struct nvme_ns_head *head = q->queuedata;
+ struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data;
struct device *dev = disk_to_dev(head->disk);
struct nvme_ns *ns;
- blk_qc_t ret = BLK_QC_T_NONE;
int srcu_idx;
/*
- * The namespace might be going away and the bio might
- * be moved to a different queue via blk_steal_bios(),
- * so we need to use the bio_split pool from the original
- * queue to allocate the bvecs from.
+ * The namespace might be going away and the bio might be moved to a
+ * different queue via blk_steal_bios(), so we need to use the bio_split
+ * pool from the original queue to allocate the bvecs from.
*/
- blk_queue_split(q, &bio);
+ bio = bio_split_to_limits(bio);
srcu_idx = srcu_read_lock(&head->srcu);
ns = nvme_find_path(head);
if (likely(ns)) {
- bio->bi_disk = ns->disk;
+ bio_set_dev(bio, ns->disk->part0);
bio->bi_opf |= REQ_NVME_MPATH;
- trace_block_bio_remap(bio->bi_disk->queue, bio,
- disk_devt(ns->head->disk),
+ trace_block_bio_remap(bio, disk_devt(ns->head->disk),
bio->bi_iter.bi_sector);
- ret = direct_make_request(bio);
+ submit_bio_noacct(bio);
} else if (nvme_available_path(head)) {
dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
@@ -328,11 +366,94 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
} else {
dev_warn_ratelimited(dev, "no available path - failing I/O\n");
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
+ bio_io_error(bio);
}
srcu_read_unlock(&head->srcu, srcu_idx);
+}
+
+static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
+{
+ if (!nvme_tryget_ns_head(bdev->bd_disk->private_data))
+ return -ENXIO;
+ return 0;
+}
+
+static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
+{
+ nvme_put_ns_head(disk->private_data);
+}
+
+#ifdef CONFIG_BLK_DEV_ZONED
+static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
+{
+ struct nvme_ns_head *head = disk->private_data;
+ struct nvme_ns *ns;
+ int srcu_idx, ret = -EWOULDBLOCK;
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ ns = nvme_find_path(head);
+ if (ns)
+ ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data);
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
+#else
+#define nvme_ns_head_report_zones NULL
+#endif /* CONFIG_BLK_DEV_ZONED */
+
+const struct block_device_operations nvme_ns_head_ops = {
+ .owner = THIS_MODULE,
+ .submit_bio = nvme_ns_head_submit_bio,
+ .open = nvme_ns_head_open,
+ .release = nvme_ns_head_release,
+ .ioctl = nvme_ns_head_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
+ .getgeo = nvme_getgeo,
+ .report_zones = nvme_ns_head_report_zones,
+ .pr_ops = &nvme_pr_ops,
+};
+
+static inline struct nvme_ns_head *cdev_to_ns_head(struct cdev *cdev)
+{
+ return container_of(cdev, struct nvme_ns_head, cdev);
+}
+
+static int nvme_ns_head_chr_open(struct inode *inode, struct file *file)
+{
+ if (!nvme_tryget_ns_head(cdev_to_ns_head(inode->i_cdev)))
+ return -ENXIO;
+ return 0;
+}
+
+static int nvme_ns_head_chr_release(struct inode *inode, struct file *file)
+{
+ nvme_put_ns_head(cdev_to_ns_head(inode->i_cdev));
+ return 0;
+}
+
+static const struct file_operations nvme_ns_head_chr_fops = {
+ .owner = THIS_MODULE,
+ .open = nvme_ns_head_chr_open,
+ .release = nvme_ns_head_chr_release,
+ .unlocked_ioctl = nvme_ns_head_chr_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .uring_cmd = nvme_ns_head_chr_uring_cmd,
+ .uring_cmd_iopoll = nvme_ns_head_chr_uring_cmd_iopoll,
+};
+
+static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
+{
+ int ret;
+
+ head->cdev_device.parent = &head->subsys->dev;
+ ret = dev_set_name(&head->cdev_device, "ng%dn%d",
+ head->subsys->instance, head->instance);
+ if (ret)
+ return ret;
+ ret = nvme_cdev_add(&head->cdev, &head->cdev_device,
+ &nvme_ns_head_chr_fops, THIS_MODULE);
return ret;
}
@@ -350,18 +471,12 @@ static void nvme_requeue_work(struct work_struct *work)
next = bio->bi_next;
bio->bi_next = NULL;
- /*
- * Reset disk to the mpath node and resubmit to select a new
- * path.
- */
- bio->bi_disk = head->disk;
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
}
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
{
- struct request_queue *q;
bool vwc = false;
mutex_init(&head->lock);
@@ -371,57 +486,69 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
/*
* Add a multipath node if the subsystems supports multiple controllers.
- * We also do this for private namespaces as the namespace sharing data could
- * change after a rescan.
+ * We also do this for private namespaces as the namespace sharing flag
+ * could change after a rescan.
*/
- if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath)
+ if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
+ !nvme_is_unique_nsid(ctrl, head) || !multipath)
return 0;
- q = blk_alloc_queue_node(GFP_KERNEL, ctrl->numa_node);
- if (!q)
- goto out;
- q->queuedata = head;
- blk_queue_make_request(q, nvme_ns_head_make_request);
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- /* set to a default value for 512 until disk is validated */
- blk_queue_logical_block_size(q, 512);
- blk_set_stacking_limits(&q->limits);
-
- /* we need to propagate up the VMC settings */
- if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
- vwc = true;
- blk_queue_write_cache(q, vwc, vwc);
-
- head->disk = alloc_disk(0);
+ head->disk = blk_alloc_disk(ctrl->numa_node);
if (!head->disk)
- goto out_cleanup_queue;
+ return -ENOMEM;
head->disk->fops = &nvme_ns_head_ops;
head->disk->private_data = head;
- head->disk->queue = q;
- head->disk->flags = GENHD_FL_EXT_DEVT;
sprintf(head->disk->disk_name, "nvme%dn%d",
ctrl->subsys->instance, head->instance);
- return 0;
-out_cleanup_queue:
- blk_cleanup_queue(q);
-out:
- return -ENOMEM;
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue);
+ blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue);
+ /*
+ * This assumes all controllers that refer to a namespace either
+ * support poll queues or not. That is not a strict guarantee,
+ * but if the assumption is wrong the effect is only suboptimal
+ * performance but not correctness problem.
+ */
+ if (ctrl->tagset->nr_maps > HCTX_TYPE_POLL &&
+ ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues)
+ blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue);
+
+ /* set to a default value of 512 until the disk is validated */
+ blk_queue_logical_block_size(head->disk->queue, 512);
+ blk_set_stacking_limits(&head->disk->queue->limits);
+ blk_queue_dma_alignment(head->disk->queue, 3);
+
+ /* we need to propagate up the VMC settings */
+ if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
+ vwc = true;
+ blk_queue_write_cache(head->disk->queue, vwc, vwc);
+ return 0;
}
static void nvme_mpath_set_live(struct nvme_ns *ns)
{
struct nvme_ns_head *head = ns->head;
-
- lockdep_assert_held(&ns->head->lock);
+ int rc;
if (!head->disk)
return;
- if (!(head->disk->flags & GENHD_FL_UP))
- device_add_disk(&head->subsys->dev, head->disk,
- nvme_ns_id_attr_groups);
+ /*
+ * test_and_set_bit() is used because it is protecting against two nvme
+ * paths simultaneously calling device_add_disk() on the same namespace
+ * head.
+ */
+ if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
+ rc = device_add_disk(&head->subsys->dev, head->disk,
+ nvme_ns_id_attr_groups);
+ if (rc) {
+ clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags);
+ return;
+ }
+ nvme_add_ns_head_cdev(head);
+ }
+ mutex_lock(&head->lock);
if (nvme_path_is_optimized(ns)) {
int node, srcu_idx;
@@ -430,9 +557,10 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
__nvme_find_path(head, node);
srcu_read_unlock(&head->srcu, srcu_idx);
}
+ mutex_unlock(&head->lock);
- synchronize_srcu(&ns->head->srcu);
- kblockd_schedule_work(&ns->head->requeue_work);
+ synchronize_srcu(&head->srcu);
+ kblockd_schedule_work(&head->requeue_work);
}
static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
@@ -454,7 +582,7 @@ static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
return -EINVAL;
nr_nsids = le32_to_cpu(desc->nnsids);
- nsid_buf_size = nr_nsids * sizeof(__le32);
+ nsid_buf_size = flex_array_size(desc, nsids, nr_nsids);
if (WARN_ON_ONCE(desc->grpid == 0))
return -EINVAL;
@@ -487,14 +615,21 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state)
static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
struct nvme_ns *ns)
{
- mutex_lock(&ns->head->lock);
ns->ana_grpid = le32_to_cpu(desc->grpid);
ns->ana_state = desc->state;
clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
-
- if (nvme_state_is_live(ns->ana_state))
+ /*
+ * nvme_mpath_set_live() will trigger I/O to the multipath path device
+ * and in turn to this path device. However we cannot accept this I/O
+ * if the controller is not live. This may deadlock if called from
+ * nvme_mpath_init_identify() and the ctrl will never complete
+ * initialization, preventing I/O from completing. For this case we
+ * will reprocess the ANA log page in nvme_mpath_update() once the
+ * controller is ready.
+ */
+ if (nvme_state_is_live(ns->ana_state) &&
+ ns->ctrl->state == NVME_CTRL_LIVE)
nvme_mpath_set_live(ns);
- mutex_unlock(&ns->head->lock);
}
static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
@@ -514,18 +649,21 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
if (!nr_nsids)
return 0;
- down_write(&ctrl->namespaces_rwsem);
+ down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) {
- unsigned nsid = le32_to_cpu(desc->nsids[n]);
-
+ unsigned nsid;
+again:
+ nsid = le32_to_cpu(desc->nsids[n]);
if (ns->head->ns_id < nsid)
continue;
if (ns->head->ns_id == nsid)
nvme_update_ns_ana_state(desc, ns);
if (++n == nr_nsids)
break;
+ if (ns->head->ns_id > nsid)
+ goto again;
}
- up_write(&ctrl->namespaces_rwsem);
+ up_read(&ctrl->namespaces_rwsem);
return 0;
}
@@ -535,7 +673,7 @@ static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
int error;
mutex_lock(&ctrl->ana_lock);
- error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0,
+ error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0, NVME_CSI_NVM,
ctrl->ana_log_buf, ctrl->ana_log_size, 0);
if (error) {
dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
@@ -571,9 +709,24 @@ static void nvme_ana_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
+ if (ctrl->state != NVME_CTRL_LIVE)
+ return;
+
nvme_read_ana_log(ctrl);
}
+void nvme_mpath_update(struct nvme_ctrl *ctrl)
+{
+ u32 nr_change_groups = 0;
+
+ if (!ctrl->ana_log_buf)
+ return;
+
+ mutex_lock(&ctrl->ana_lock);
+ nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
+ mutex_unlock(&ctrl->ana_lock);
+}
+
static void nvme_anatt_timeout(struct timer_list *t)
{
struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
@@ -594,19 +747,14 @@ void nvme_mpath_stop(struct nvme_ctrl *ctrl)
struct device_attribute subsys_attr_##_name = \
__ATTR(_name, _mode, _show, _store)
-static const char *nvme_iopolicy_names[] = {
- [NVME_IOPOLICY_NUMA] = "numa",
- [NVME_IOPOLICY_RR] = "round-robin",
-};
-
static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvme_subsystem *subsys =
container_of(dev, struct nvme_subsystem, dev);
- return sprintf(buf, "%s\n",
- nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
+ return sysfs_emit(buf, "%s\n",
+ nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
}
static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
@@ -631,7 +779,7 @@ SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
+ return sysfs_emit(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
}
DEVICE_ATTR_RO(ana_grpid);
@@ -640,102 +788,139 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
{
struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
- return sprintf(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
+ return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
}
DEVICE_ATTR_RO(ana_state);
-static int nvme_set_ns_ana_state(struct nvme_ctrl *ctrl,
+static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
struct nvme_ana_group_desc *desc, void *data)
{
- struct nvme_ns *ns = data;
+ struct nvme_ana_group_desc *dst = data;
- if (ns->ana_grpid == le32_to_cpu(desc->grpid)) {
- nvme_update_ns_ana_state(desc, ns);
- return -ENXIO; /* just break out of the loop */
- }
+ if (desc->grpid != dst->grpid)
+ return 0;
- return 0;
+ *dst = *desc;
+ return -ENXIO; /* just break out of the loop */
}
-void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
+void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
{
if (nvme_ctrl_use_ana(ns->ctrl)) {
+ struct nvme_ana_group_desc desc = {
+ .grpid = anagrpid,
+ .state = 0,
+ };
+
mutex_lock(&ns->ctrl->ana_lock);
- ns->ana_grpid = le32_to_cpu(id->anagrpid);
- nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state);
+ ns->ana_grpid = le32_to_cpu(anagrpid);
+ nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
mutex_unlock(&ns->ctrl->ana_lock);
+ if (desc.state) {
+ /* found the group desc: update */
+ nvme_update_ns_ana_state(&desc, ns);
+ } else {
+ /* group desc not found: trigger a re-read */
+ set_bit(NVME_NS_ANA_PENDING, &ns->flags);
+ queue_work(nvme_wq, &ns->ctrl->ana_work);
+ }
} else {
- mutex_lock(&ns->head->lock);
- ns->ana_state = NVME_ANA_OPTIMIZED;
+ ns->ana_state = NVME_ANA_OPTIMIZED;
nvme_mpath_set_live(ns);
- mutex_unlock(&ns->head->lock);
}
+
+ if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
+ ns->head->disk->queue);
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
+ ns->head->disk->nr_zones = ns->disk->nr_zones;
+#endif
}
-void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
{
if (!head->disk)
return;
- if (head->disk->flags & GENHD_FL_UP)
+ kblockd_schedule_work(&head->requeue_work);
+ if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
+ nvme_cdev_del(&head->cdev, &head->cdev_device);
del_gendisk(head->disk);
- blk_set_queue_dying(head->disk->queue);
+ }
+}
+
+void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+{
+ if (!head->disk)
+ return;
+ blk_mark_disk_dead(head->disk);
/* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work);
flush_work(&head->requeue_work);
- blk_cleanup_queue(head->disk->queue);
put_disk(head->disk);
}
-int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
{
- int error;
+ mutex_init(&ctrl->ana_lock);
+ timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
+ INIT_WORK(&ctrl->ana_work, nvme_ana_work);
+}
+
+int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+{
+ size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
+ size_t ana_log_size;
+ int error = 0;
/* check if multipath is enabled and we have the capability */
- if (!multipath || !ctrl->subsys || !(ctrl->subsys->cmic & (1 << 3)))
+ if (!multipath || !ctrl->subsys ||
+ !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
return 0;
+ if (!ctrl->max_namespaces ||
+ ctrl->max_namespaces > le32_to_cpu(id->nn)) {
+ dev_err(ctrl->device,
+ "Invalid MNAN value %u\n", ctrl->max_namespaces);
+ return -EINVAL;
+ }
+
ctrl->anacap = id->anacap;
ctrl->anatt = id->anatt;
ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
- mutex_init(&ctrl->ana_lock);
- timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
- ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
- ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
- ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
-
- if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
+ ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
+ ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
+ ctrl->max_namespaces * sizeof(__le32);
+ if (ana_log_size > max_transfer_size) {
dev_err(ctrl->device,
- "ANA log page size (%zd) larger than MDTS (%d).\n",
- ctrl->ana_log_size,
- ctrl->max_hw_sectors << SECTOR_SHIFT);
+ "ANA log page size (%zd) larger than MDTS (%zd).\n",
+ ana_log_size, max_transfer_size);
dev_err(ctrl->device, "disabling ANA support.\n");
- return 0;
+ goto out_uninit;
}
-
- INIT_WORK(&ctrl->ana_work, nvme_ana_work);
- kfree(ctrl->ana_log_buf);
- ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
- if (!ctrl->ana_log_buf) {
- error = -ENOMEM;
- goto out;
+ if (ana_log_size > ctrl->ana_log_size) {
+ nvme_mpath_stop(ctrl);
+ nvme_mpath_uninit(ctrl);
+ ctrl->ana_log_buf = kvmalloc(ana_log_size, GFP_KERNEL);
+ if (!ctrl->ana_log_buf)
+ return -ENOMEM;
}
-
+ ctrl->ana_log_size = ana_log_size;
error = nvme_read_ana_log(ctrl);
if (error)
- goto out_free_ana_log_buf;
+ goto out_uninit;
return 0;
-out_free_ana_log_buf:
- kfree(ctrl->ana_log_buf);
- ctrl->ana_log_buf = NULL;
-out:
+
+out_uninit:
+ nvme_mpath_uninit(ctrl);
return error;
}
void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
{
- kfree(ctrl->ana_log_buf);
+ kvfree(ctrl->ana_log_buf);
ctrl->ana_log_buf = NULL;
+ ctrl->ana_log_size = 0;
}
-
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 1024fec7914c..a29877217ee6 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -11,11 +11,11 @@
#include <linux/pci.h>
#include <linux/kref.h>
#include <linux/blk-mq.h>
-#include <linux/lightnvm.h>
#include <linux/sed-opal.h>
#include <linux/fault-inject.h>
#include <linux/rcupdate.h>
#include <linux/wait.h>
+#include <linux/t10-pi.h>
#include <trace/events/block.h>
@@ -23,26 +23,30 @@ extern unsigned int nvme_io_timeout;
#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
extern unsigned int admin_timeout;
-#define ADMIN_TIMEOUT (admin_timeout * HZ)
+#define NVME_ADMIN_TIMEOUT (admin_timeout * HZ)
#define NVME_DEFAULT_KATO 5
-#define NVME_KATO_GRACE 10
#ifdef CONFIG_ARCH_NO_SG_CHAIN
#define NVME_INLINE_SG_CNT 0
+#define NVME_INLINE_METADATA_SG_CNT 0
#else
#define NVME_INLINE_SG_CNT 2
+#define NVME_INLINE_METADATA_SG_CNT 1
#endif
+/*
+ * Default to a 4K page size, with the intention to update this
+ * path in the future to accommodate architectures with differing
+ * kernel and IO page sizes.
+ */
+#define NVME_CTRL_PAGE_SHIFT 12
+#define NVME_CTRL_PAGE_SIZE (1 << NVME_CTRL_PAGE_SHIFT)
+
extern struct workqueue_struct *nvme_wq;
extern struct workqueue_struct *nvme_reset_wq;
extern struct workqueue_struct *nvme_delete_wq;
-enum {
- NVME_NS_LBA = 0,
- NVME_NS_LIGHTNVM = 1,
-};
-
/*
* List of workarounds for devices that required behavior not specified in
* the standard.
@@ -83,11 +87,6 @@ enum nvme_quirks {
NVME_QUIRK_NO_DEEPEST_PS = (1 << 5),
/*
- * Supports the LighNVM command set if indicated in vs[1].
- */
- NVME_QUIRK_LIGHTNVM = (1 << 6),
-
- /*
* Set MEDIUM priority on SQ creation
*/
NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7),
@@ -126,6 +125,30 @@ enum nvme_quirks {
* Don't change the value of the temperature threshold feature
*/
NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14),
+
+ /*
+ * The controller doesn't handle the Identify Namespace
+ * Identification Descriptor list subcommand despite claiming
+ * NVMe 1.3 compliance.
+ */
+ NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15),
+
+ /*
+ * The controller does not properly handle DMA addresses over
+ * 48 bits.
+ */
+ NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16),
+
+ /*
+ * The controller requires the command_id value be limited, so skip
+ * encoding the generation sequence number.
+ */
+ NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
+
+ /*
+ * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
+ */
+ NVME_QUIRK_BOGUS_NID = (1 << 18),
};
/*
@@ -135,6 +158,7 @@ enum nvme_quirks {
struct nvme_request {
struct nvme_command *cmd;
union nvme_result result;
+ u8 genctr;
u8 retries;
u8 flags;
u16 status;
@@ -158,9 +182,10 @@ static inline struct nvme_request *nvme_req(struct request *req)
static inline u16 nvme_req_qid(struct request *req)
{
- if (!req->rq_disk)
+ if (!req->q->queuedata)
return 0;
- return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(req)) + 1;
+
+ return req->mq_hctx->queue_num + 1;
}
/* The below value is the specific amount of delay needed before checking
@@ -170,12 +195,32 @@ static inline u16 nvme_req_qid(struct request *req)
*/
#define NVME_QUIRK_DELAY_AMOUNT 2300
+/*
+ * enum nvme_ctrl_state: Controller state
+ *
+ * @NVME_CTRL_NEW: New controller just allocated, initial state
+ * @NVME_CTRL_LIVE: Controller is connected and I/O capable
+ * @NVME_CTRL_RESETTING: Controller is resetting (or scheduled reset)
+ * @NVME_CTRL_CONNECTING: Controller is disconnected, now connecting the
+ * transport
+ * @NVME_CTRL_DELETING: Controller is deleting (or scheduled deletion)
+ * @NVME_CTRL_DELETING_NOIO: Controller is deleting and I/O is not
+ * disabled/failed immediately. This state comes
+ * after all async event processing took place and
+ * before ns removal and the controller deletion
+ * progress
+ * @NVME_CTRL_DEAD: Controller is non-present/unresponsive during
+ * shutdown or removal. In this case we forcibly
+ * kill all inflight I/O as they have no chance to
+ * complete
+ */
enum nvme_ctrl_state {
NVME_CTRL_NEW,
NVME_CTRL_LIVE,
NVME_CTRL_RESETTING,
NVME_CTRL_CONNECTING,
NVME_CTRL_DELETING,
+ NVME_CTRL_DELETING_NOIO,
NVME_CTRL_DEAD,
};
@@ -188,6 +233,12 @@ struct nvme_fault_inject {
#endif
};
+enum nvme_ctrl_flags {
+ NVME_CTRL_FAILFAST_EXPIRED = 0,
+ NVME_CTRL_ADMIN_Q_STOPPED = 1,
+ NVME_CTRL_STARTED_ONCE = 2,
+};
+
struct nvme_ctrl {
bool comp_seen;
enum nvme_ctrl_state state;
@@ -207,6 +258,9 @@ struct nvme_ctrl {
struct rw_semaphore namespaces_rwsem;
struct device ctrl_device;
struct device *device; /* char device */
+#ifdef CONFIG_NVME_HWMON
+ struct device *hwmon_device;
+#endif
struct cdev cdev;
struct work_struct reset_work;
struct work_struct delete_work;
@@ -225,14 +279,19 @@ struct nvme_ctrl {
u32 queue_count;
u64 cap;
- u32 page_size;
u32 max_hw_sectors;
u32 max_segments;
+ u32 max_integrity_segments;
+ u32 max_discard_sectors;
+ u32 max_discard_segments;
+ u32 max_zeroes_sectors;
+#ifdef CONFIG_BLK_DEV_ZONED
+ u32 max_zone_append;
+#endif
u16 crdt[3];
u16 oncs;
+ u32 dmrsl;
u16 oacs;
- u16 nssa;
- u16 nr_streams;
u16 sqsize;
u32 max_namespaces;
atomic_t abort_limit;
@@ -253,9 +312,11 @@ struct nvme_ctrl {
unsigned long quirks;
struct nvme_id_power_state psd[32];
struct nvme_effects_log *effects;
+ struct xarray cels;
struct work_struct scan_work;
struct work_struct async_event_work;
struct delayed_work ka_work;
+ struct delayed_work failfast_work;
struct nvme_command ka_cmd;
struct work_struct fw_act_work;
unsigned long events;
@@ -273,6 +334,15 @@ struct nvme_ctrl {
struct work_struct ana_work;
#endif
+#ifdef CONFIG_NVME_AUTH
+ struct work_struct dhchap_auth_work;
+ struct list_head dhchap_auth_list;
+ struct mutex dhchap_auth_mutex;
+ struct nvme_dhchap_key *host_key;
+ struct nvme_dhchap_key *ctrl_key;
+ u16 transaction;
+#endif
+
/* Power saving configuration */
u64 ps_max_latency_us;
bool apst_enabled;
@@ -289,12 +359,16 @@ struct nvme_ctrl {
u16 icdoff;
u16 maxcmd;
int nr_reconnects;
+ unsigned long flags;
struct nvmf_ctrl_options *opts;
struct page *discard_page;
unsigned long discard_page_busy;
struct nvme_fault_inject fault_inject;
+
+ enum nvme_ctrl_type cntrltype;
+ enum nvme_dctype dctype;
};
enum nvme_iopolicy {
@@ -319,6 +393,7 @@ struct nvme_subsystem {
char model[40];
char firmware_rev[8];
u8 cmic;
+ enum nvme_subsys_type subtype;
u16 vendor_id;
u16 awupf; /* 0's based awupf value. */
struct ida ns_ida;
@@ -334,6 +409,7 @@ struct nvme_ns_ids {
u8 eui64[8];
u8 nguid[16];
uuid_t uuid;
+ u8 csi;
};
/*
@@ -351,17 +427,35 @@ struct nvme_ns_head {
struct nvme_ns_ids ids;
struct list_head entry;
struct kref ref;
+ bool shared;
int instance;
-#ifdef CONFIG_NVME_MULTIPATH
+ struct nvme_effects_log *effects;
+
+ struct cdev cdev;
+ struct device cdev_device;
+
struct gendisk *disk;
+#ifdef CONFIG_NVME_MULTIPATH
struct bio_list requeue_list;
spinlock_t requeue_lock;
struct work_struct requeue_work;
struct mutex lock;
+ unsigned long flags;
+#define NVME_NSHEAD_DISK_LIVE 0
struct nvme_ns __rcu *current_path[];
#endif
};
+static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
+{
+ return IS_ENABLED(CONFIG_NVME_MULTIPATH) && head->disk;
+}
+
+enum nvme_ns_features {
+ NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
+ NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
+};
+
struct nvme_ns {
struct list_head list;
@@ -373,42 +467,129 @@ struct nvme_ns {
u32 ana_grpid;
#endif
struct list_head siblings;
- struct nvm_dev *ndev;
struct kref kref;
struct nvme_ns_head *head;
int lba_shift;
u16 ms;
+ u16 pi_size;
u16 sgs;
u32 sws;
- bool ext;
u8 pi_type;
+ u8 guard_type;
+#ifdef CONFIG_BLK_DEV_ZONED
+ u64 zsze;
+#endif
+ unsigned long features;
unsigned long flags;
#define NVME_NS_REMOVING 0
#define NVME_NS_DEAD 1
#define NVME_NS_ANA_PENDING 2
- u16 noiob;
+#define NVME_NS_FORCE_RO 3
+#define NVME_NS_READY 4
+#define NVME_NS_STOPPED 5
+
+ struct cdev cdev;
+ struct device cdev_device;
struct nvme_fault_inject fault_inject;
};
+/* NVMe ns supports metadata actions by the controller (generate/strip) */
+static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
+{
+ return ns->pi_type && ns->ms == ns->pi_size;
+}
+
struct nvme_ctrl_ops {
const char *name;
struct module *module;
unsigned int flags;
#define NVME_F_FABRICS (1 << 0)
#define NVME_F_METADATA_SUPPORTED (1 << 1)
-#define NVME_F_PCI_P2PDMA (1 << 2)
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
void (*free_ctrl)(struct nvme_ctrl *ctrl);
void (*submit_async_event)(struct nvme_ctrl *ctrl);
void (*delete_ctrl)(struct nvme_ctrl *ctrl);
+ void (*stop_ctrl)(struct nvme_ctrl *ctrl);
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
+ void (*print_device_info)(struct nvme_ctrl *ctrl);
+ bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl);
};
+/*
+ * nvme command_id is constructed as such:
+ * | xxxx | xxxxxxxxxxxx |
+ * gen request tag
+ */
+#define nvme_genctr_mask(gen) (gen & 0xf)
+#define nvme_cid_install_genctr(gen) (nvme_genctr_mask(gen) << 12)
+#define nvme_genctr_from_cid(cid) ((cid & 0xf000) >> 12)
+#define nvme_tag_from_cid(cid) (cid & 0xfff)
+
+static inline u16 nvme_cid(struct request *rq)
+{
+ return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag;
+}
+
+static inline struct request *nvme_find_rq(struct blk_mq_tags *tags,
+ u16 command_id)
+{
+ u8 genctr = nvme_genctr_from_cid(command_id);
+ u16 tag = nvme_tag_from_cid(command_id);
+ struct request *rq;
+
+ rq = blk_mq_tag_to_rq(tags, tag);
+ if (unlikely(!rq)) {
+ pr_err("could not locate request for tag %#x\n",
+ tag);
+ return NULL;
+ }
+ if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) {
+ dev_err(nvme_req(rq)->ctrl->device,
+ "request %#x genctr mismatch (got %#x expected %#x)\n",
+ tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr));
+ return NULL;
+ }
+ return rq;
+}
+
+static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
+ u16 command_id)
+{
+ return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
+}
+
+/*
+ * Return the length of the string without the space padding
+ */
+static inline int nvme_strlen(char *s, int len)
+{
+ while (s[len - 1] == ' ')
+ len--;
+ return len;
+}
+
+static inline void nvme_print_device_info(struct nvme_ctrl *ctrl)
+{
+ struct nvme_subsystem *subsys = ctrl->subsys;
+
+ if (ctrl->ops->print_device_info) {
+ ctrl->ops->print_device_info(ctrl);
+ return;
+ }
+
+ dev_err(ctrl->device,
+ "VID:%04x model:%.*s firmware:%.*s\n", subsys->vendor_id,
+ nvme_strlen(subsys->model, sizeof(subsys->model)),
+ subsys->model, nvme_strlen(subsys->firmware_rev,
+ sizeof(subsys->firmware_rev)),
+ subsys->firmware_rev);
+}
+
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
const char *dev_name);
@@ -425,11 +606,23 @@ static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj)
static inline void nvme_should_fail(struct request *req) {}
#endif
+bool nvme_wait_reset(struct nvme_ctrl *ctrl);
+int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
+
static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
{
+ int ret;
+
if (!ctrl->subsystem)
return -ENOTTY;
- return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
+ if (!nvme_wait_reset(ctrl))
+ return -EBUSY;
+
+ ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
+ if (ret)
+ return ret;
+
+ return nvme_try_sched_reset(ctrl);
}
/*
@@ -448,16 +641,54 @@ static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba)
return lba << (ns->lba_shift - SECTOR_SHIFT);
}
-static inline void nvme_end_request(struct request *req, __le16 status,
+/*
+ * Convert byte length to nvme's 0-based num dwords
+ */
+static inline u32 nvme_bytes_to_numd(size_t len)
+{
+ return (len >> 2) - 1;
+}
+
+static inline bool nvme_is_ana_error(u16 status)
+{
+ switch (status & 0x7ff) {
+ case NVME_SC_ANA_TRANSITION:
+ case NVME_SC_ANA_INACCESSIBLE:
+ case NVME_SC_ANA_PERSISTENT_LOSS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool nvme_is_path_error(u16 status)
+{
+ /* check for a status code type of 'path related status' */
+ return (status & 0x700) == 0x300;
+}
+
+/*
+ * Fill in the status and result information from the CQE, and then figure out
+ * if blk-mq will need to use IPI magic to complete the request, and if yes do
+ * so. If not let the caller complete the request without an indirect function
+ * call.
+ */
+static inline bool nvme_try_complete_req(struct request *req, __le16 status,
union nvme_result result)
{
struct nvme_request *rq = nvme_req(req);
+ struct nvme_ctrl *ctrl = rq->ctrl;
+
+ if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
+ rq->genctr++;
rq->status = le16_to_cpu(status) >> 1;
rq->result = result;
/* inject error when permitted by fault injection framework */
nvme_should_fail(req);
- blk_mq_complete_request(req);
+ if (unlikely(blk_should_fake_timeout(req->q)))
+ return true;
+ return blk_mq_complete_request_remote(req);
}
static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
@@ -472,14 +703,31 @@ static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
{
- return !qid && command_id >= NVME_AQ_BLK_MQ_DEPTH;
+ return !qid &&
+ nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
}
void nvme_complete_rq(struct request *req);
-bool nvme_cancel_request(struct request *req, void *data, bool reserved);
+void nvme_complete_batch_req(struct request *req);
+
+static __always_inline void nvme_complete_batch(struct io_comp_batch *iob,
+ void (*fn)(struct request *rq))
+{
+ struct request *req;
+
+ rq_list_for_each(&iob->req_list, req) {
+ fn(req);
+ nvme_complete_batch_req(req);
+ }
+ blk_mq_end_request_batch(iob);
+}
+
+blk_status_t nvme_host_path_error(struct request *req);
+bool nvme_cancel_request(struct request *req, void *data);
+void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
+void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state);
-bool nvme_wait_reset(struct nvme_ctrl *ctrl);
int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
@@ -488,8 +736,15 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
void nvme_start_ctrl(struct nvme_ctrl *ctrl);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
-void nvme_put_ctrl(struct nvme_ctrl *ctrl);
-int nvme_init_identify(struct nvme_ctrl *ctrl);
+int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);
+int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int flags,
+ unsigned int cmd_size);
+void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
+int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int flags,
+ unsigned int cmd_size);
+void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
@@ -501,25 +756,65 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl);
+void nvme_stop_admin_queue(struct nvme_ctrl *ctrl);
+void nvme_start_admin_queue(struct nvme_ctrl *ctrl);
void nvme_kill_queues(struct nvme_ctrl *ctrl);
void nvme_sync_queues(struct nvme_ctrl *ctrl);
+void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
void nvme_unfreeze(struct nvme_ctrl *ctrl);
void nvme_wait_freeze(struct nvme_ctrl *ctrl);
-void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
+int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
void nvme_start_freeze(struct nvme_ctrl *ctrl);
+static inline enum req_op nvme_req_op(struct nvme_command *cmd)
+{
+ return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
+}
+
#define NVME_QID_ANY -1
-struct request *nvme_alloc_request(struct request_queue *q,
- struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
+void nvme_init_request(struct request *req, struct nvme_command *cmd);
void nvme_cleanup_cmd(struct request *req);
-blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
- struct nvme_command *cmd);
+blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
+blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
+ struct request *req);
+bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
+ bool queue_live);
+
+static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
+ bool queue_live)
+{
+ if (likely(ctrl->state == NVME_CTRL_LIVE))
+ return true;
+ if (ctrl->ops->flags & NVME_F_FABRICS &&
+ ctrl->state == NVME_CTRL_DELETING)
+ return queue_live;
+ return __nvme_check_ready(ctrl, rq, queue_live);
+}
+
+/*
+ * NSID shall be unique for all shared namespaces, or if at least one of the
+ * following conditions is met:
+ * 1. Namespace Management is supported by the controller
+ * 2. ANA is supported by the controller
+ * 3. NVM Set are supported by the controller
+ *
+ * In other case, private namespace are not required to report a unique NSID.
+ */
+static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
+ struct nvme_ns_head *head)
+{
+ return head->shared ||
+ (ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) ||
+ (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) ||
+ (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
+}
+
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- unsigned timeout, int qid, int at_head,
- blk_mq_req_flags_t flags, bool poll);
+ int qid, int at_head,
+ blk_mq_req_flags_t flags);
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen,
u32 *result);
@@ -530,15 +825,40 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
-int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
-
-int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
+void nvme_queue_scan(struct nvme_ctrl *ctrl);
+int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
void *log, size_t size, u64 offset);
+bool nvme_tryget_ns_head(struct nvme_ns_head *head);
+void nvme_put_ns_head(struct nvme_ns_head *head);
+int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
+ const struct file_operations *fops, struct module *owner);
+void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device);
+int nvme_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg);
+long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg);
+long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+long nvme_dev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
+ struct io_comp_batch *iob, unsigned int poll_flags);
+int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
+ struct io_comp_batch *iob, unsigned int poll_flags);
+int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags);
+int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags);
+int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
+int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
extern const struct attribute_group *nvme_ns_id_attr_groups[];
+extern const struct pr_ops nvme_pr_ops;
extern const struct block_device_operations nvme_ns_head_ops;
+struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
#ifdef CONFIG_NVME_MULTIPATH
static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
{
@@ -548,57 +868,41 @@ static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
-void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
- struct nvme_ctrl *ctrl, int *flags);
+void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
void nvme_failover_req(struct request *req);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
-void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
+void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid);
void nvme_mpath_remove_disk(struct nvme_ns_head *head);
-int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
+int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
+void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
+void nvme_mpath_update(struct nvme_ctrl *ctrl);
void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
+void nvme_mpath_revalidate_paths(struct nvme_ns *ns);
void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
-struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
+void nvme_mpath_shutdown_disk(struct nvme_ns_head *head);
-static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
-{
- struct nvme_ns_head *head = ns->head;
-
- if (head->disk && list_empty(&head->list))
- kblockd_schedule_work(&head->requeue_work);
-}
-
-static inline void nvme_trace_bio_complete(struct request *req,
- blk_status_t status)
+static inline void nvme_trace_bio_complete(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
if (req->cmd_flags & REQ_NVME_MPATH)
- trace_block_bio_complete(ns->head->disk->queue,
- req->bio, status);
+ trace_block_bio_complete(ns->head->disk->queue, req->bio);
}
+extern bool multipath;
extern struct device_attribute dev_attr_ana_grpid;
extern struct device_attribute dev_attr_ana_state;
extern struct device_attribute subsys_attr_iopolicy;
#else
+#define multipath false
static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
{
return false;
}
-/*
- * Without the multipath code enabled, multiple controller per subsystems are
- * visible as devices and thus we cannot use the subsystem instance.
- */
-static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
- struct nvme_ctrl *ctrl, int *flags)
-{
- sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
-}
-
static inline void nvme_failover_req(struct request *req)
{
}
@@ -610,8 +914,7 @@ static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
{
return 0;
}
-static inline void nvme_mpath_add_disk(struct nvme_ns *ns,
- struct nvme_id_ns *id)
+static inline void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
{
}
static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
@@ -621,24 +924,32 @@ static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
{
return false;
}
+static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
+{
+}
static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
{
}
-static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
+{
+}
+static inline void nvme_trace_bio_complete(struct request *req)
{
}
-static inline void nvme_trace_bio_complete(struct request *req,
- blk_status_t status)
+static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
{
}
-static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
+static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
struct nvme_id_ctrl *id)
{
- if (ctrl->subsys->cmic & (1 << 3))
+ if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)
dev_warn(ctrl->device,
"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
return 0;
}
+static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
+{
+}
static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
{
}
@@ -654,27 +965,34 @@ static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
{
}
+static inline void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
+{
+}
#endif /* CONFIG_NVME_MULTIPATH */
-#ifdef CONFIG_NVM
-int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
-void nvme_nvm_unregister(struct nvme_ns *ns);
-extern const struct attribute_group nvme_nvm_attr_group;
-int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
+int nvme_revalidate_zones(struct nvme_ns *ns);
+int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
+#ifdef CONFIG_BLK_DEV_ZONED
+int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf);
+blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
+ struct nvme_command *cmnd,
+ enum nvme_zone_mgmt_action action);
#else
-static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
- int node)
+static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
+ struct request *req, struct nvme_command *cmnd,
+ enum nvme_zone_mgmt_action action)
{
- return 0;
+ return BLK_STS_NOTSUPP;
}
-static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
-static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
- unsigned long arg)
+static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
{
- return -ENOTTY;
+ dev_warn(ns->ctrl->device,
+ "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n");
+ return -EPROTONOSUPPORT;
}
-#endif /* CONFIG_NVM */
+#endif
static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
{
@@ -682,9 +1000,76 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
}
#ifdef CONFIG_NVME_HWMON
-void nvme_hwmon_init(struct nvme_ctrl *ctrl);
+int nvme_hwmon_init(struct nvme_ctrl *ctrl);
+void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
#else
-static inline void nvme_hwmon_init(struct nvme_ctrl *ctrl) { }
+static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
+{
+ return 0;
+}
+
+static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
+{
+}
#endif
+static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
+{
+ return ctrl->sgls & ((1 << 0) | (1 << 1));
+}
+
+#ifdef CONFIG_NVME_AUTH
+void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
+void nvme_auth_stop(struct nvme_ctrl *ctrl);
+int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
+int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid);
+void nvme_auth_reset(struct nvme_ctrl *ctrl);
+void nvme_auth_free(struct nvme_ctrl *ctrl);
+#else
+static inline void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) {};
+static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {};
+static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
+{
+ return -EPROTONOSUPPORT;
+}
+static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
+{
+ return NVME_SC_AUTH_REQUIRED;
+}
+static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
+#endif
+
+u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ u8 opcode);
+int nvme_execute_passthru_rq(struct request *rq, u32 *effects);
+void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
+ struct nvme_command *cmd, int status);
+struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
+struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
+void nvme_put_ns(struct nvme_ns *ns);
+
+static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
+{
+ return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
+}
+
+#ifdef CONFIG_NVME_VERBOSE_ERRORS
+const unsigned char *nvme_get_error_status_str(u16 status);
+const unsigned char *nvme_get_opcode_str(u8 opcode);
+const unsigned char *nvme_get_admin_opcode_str(u8 opcode);
+#else /* CONFIG_NVME_VERBOSE_ERRORS */
+static inline const unsigned char *nvme_get_error_status_str(u16 status)
+{
+ return "I/O Error";
+}
+static inline const unsigned char *nvme_get_opcode_str(u8 opcode)
+{
+ return "I/O Cmd";
+}
+static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
+{
+ return "Admin Cmd";
+}
+#endif /* CONFIG_NVME_VERBOSE_ERRORS */
+
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d3f23d6254e4..02b5578773a1 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -4,15 +4,18 @@
* Copyright (c) 2011-2014, Intel Corporation.
*/
+#include <linux/acpi.h>
#include <linux/aer.h>
#include <linux/async.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/blk-mq-pci.h>
+#include <linux/blk-integrity.h>
#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/memremap.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -22,6 +25,7 @@
#include <linux/t10-pi.h>
#include <linux/types.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/sed-opal.h>
#include <linux/pci-p2pdma.h>
@@ -41,7 +45,7 @@
#define NVME_MAX_SEGS 127
static int use_threaded_interrupts;
-module_param(use_threaded_interrupts, int, 0);
+module_param(use_threaded_interrupts, int, 0444);
static bool use_cmb_sqes = true;
module_param(use_cmb_sqes, bool, 0444);
@@ -58,26 +62,48 @@ MODULE_PARM_DESC(sgl_threshold,
"Use SGLs when average request segment size is larger or equal to "
"this size. Use 0 to disable SGLs.");
+#define NVME_PCI_MIN_QUEUE_SIZE 2
+#define NVME_PCI_MAX_QUEUE_SIZE 4095
static int io_queue_depth_set(const char *val, const struct kernel_param *kp);
static const struct kernel_param_ops io_queue_depth_ops = {
.set = io_queue_depth_set,
- .get = param_get_int,
+ .get = param_get_uint,
};
-static int io_queue_depth = 1024;
+static unsigned int io_queue_depth = 1024;
module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
-MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
+MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2 and < 4096");
+
+static int io_queue_count_set(const char *val, const struct kernel_param *kp)
+{
+ unsigned int n;
+ int ret;
+
+ ret = kstrtouint(val, 10, &n);
+ if (ret != 0 || n > num_possible_cpus())
+ return -EINVAL;
+ return param_set_uint(val, kp);
+}
+
+static const struct kernel_param_ops io_queue_count_ops = {
+ .set = io_queue_count_set,
+ .get = param_get_uint,
+};
static unsigned int write_queues;
-module_param(write_queues, uint, 0644);
+module_param_cb(write_queues, &io_queue_count_ops, &write_queues, 0644);
MODULE_PARM_DESC(write_queues,
"Number of queues to use for writes. If not set, reads and writes "
"will share a queue set.");
static unsigned int poll_queues;
-module_param(poll_queues, uint, 0644);
+module_param_cb(poll_queues, &io_queue_count_ops, &poll_queues, 0644);
MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO.");
+static bool noacpi;
+module_param(noacpi, bool, 0444);
+MODULE_PARM_DESC(noacpi, "disable acpi bios quirks");
+
struct nvme_dev;
struct nvme_queue;
@@ -99,7 +125,7 @@ struct nvme_dev {
unsigned max_qid;
unsigned io_queues[HCTX_MAX_TYPES];
unsigned int num_vecs;
- int q_depth;
+ u32 q_depth;
int io_sqes;
u32 db_stride;
void __iomem *bar;
@@ -113,6 +139,7 @@ struct nvme_dev {
u32 cmbloc;
struct nvme_ctrl ctrl;
u32 last_ps;
+ bool hmb;
mempool_t *iod_mempool;
@@ -128,17 +155,17 @@ struct nvme_dev {
dma_addr_t host_mem_descs_dma;
struct nvme_host_mem_buf_desc *host_mem_descs;
void **host_mem_desc_bufs;
+ unsigned int nr_allocated_queues;
+ unsigned int nr_write_queues;
+ unsigned int nr_poll_queues;
+
+ bool attrs_added;
};
static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
{
- int n = 0, ret;
-
- ret = kstrtoint(val, 10, &n);
- if (ret != 0 || n < 2)
- return -EINVAL;
-
- return param_set_int(val, kp);
+ return param_set_uint_minmax(val, kp, NVME_PCI_MIN_QUEUE_SIZE,
+ NVME_PCI_MAX_QUEUE_SIZE);
}
static inline unsigned int sq_idx(unsigned int qid, u32 stride)
@@ -166,11 +193,11 @@ struct nvme_queue {
void *sq_cmds;
/* only used for poll queues: */
spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
- volatile struct nvme_completion *cqes;
+ struct nvme_completion *cqes;
dma_addr_t sq_dma_addr;
dma_addr_t cq_dma_addr;
u32 __iomem *q_db;
- u16 q_depth;
+ u32 q_depth;
u16 cq_vector;
u16 sq_tail;
u16 last_sq_tail;
@@ -198,39 +225,35 @@ struct nvme_queue {
*/
struct nvme_iod {
struct nvme_request req;
- struct nvme_queue *nvmeq;
+ struct nvme_command cmd;
bool use_sgl;
- int aborted;
- int npages; /* In the PRP list. 0 means small pool in use */
- int nents; /* Used in scatterlist */
- dma_addr_t first_dma;
+ bool aborted;
+ s8 nr_allocations; /* PRP list pool allocations. 0 means small
+ pool in use */
unsigned int dma_len; /* length of single DMA segment mapping */
+ dma_addr_t first_dma;
dma_addr_t meta_dma;
- struct scatterlist *sg;
+ struct sg_table sgt;
};
-static unsigned int max_io_queues(void)
-{
- return num_possible_cpus() + write_queues + poll_queues;
-}
-
-static unsigned int max_queue_count(void)
+static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
{
- /* IO queues + admin queue */
- return 1 + max_io_queues();
-}
-
-static inline unsigned int nvme_dbbuf_size(u32 stride)
-{
- return (max_queue_count() * 8 * stride);
+ return dev->nr_allocated_queues * 8 * dev->db_stride;
}
static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
{
- unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);
+ unsigned int mem_size = nvme_dbbuf_size(dev);
- if (dev->dbbuf_dbs)
+ if (dev->dbbuf_dbs) {
+ /*
+ * Clear the dbbuf memory so the driver doesn't observe stale
+ * values from the previous instantiation.
+ */
+ memset(dev->dbbuf_dbs, 0, mem_size);
+ memset(dev->dbbuf_eis, 0, mem_size);
return 0;
+ }
dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size,
&dev->dbbuf_dbs_dma_addr,
@@ -252,7 +275,7 @@ static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
static void nvme_dbbuf_dma_free(struct nvme_dev *dev)
{
- unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);
+ unsigned int mem_size = nvme_dbbuf_size(dev);
if (dev->dbbuf_dbs) {
dma_free_coherent(dev->dev, mem_size,
@@ -278,14 +301,25 @@ static void nvme_dbbuf_init(struct nvme_dev *dev,
nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
}
+static void nvme_dbbuf_free(struct nvme_queue *nvmeq)
+{
+ if (!nvmeq->qid)
+ return;
+
+ nvmeq->dbbuf_sq_db = NULL;
+ nvmeq->dbbuf_cq_db = NULL;
+ nvmeq->dbbuf_sq_ei = NULL;
+ nvmeq->dbbuf_cq_ei = NULL;
+}
+
static void nvme_dbbuf_set(struct nvme_dev *dev)
{
- struct nvme_command c;
+ struct nvme_command c = { };
+ unsigned int i;
if (!dev->dbbuf_dbs)
return;
- memset(&c, 0, sizeof(c));
c.dbbuf.opcode = nvme_admin_dbbuf;
c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr);
c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);
@@ -294,6 +328,9 @@ static void nvme_dbbuf_set(struct nvme_dev *dev)
dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
/* Free memory and continue on */
nvme_dbbuf_dma_free(dev);
+
+ for (i = 1; i <= dev->online_queues; i++)
+ nvme_dbbuf_free(&dev->queues[i]);
}
}
@@ -338,10 +375,10 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
* as it only leads to a small amount of wasted memory for the lifetime of
* the I/O.
*/
-static int nvme_npages(unsigned size, struct nvme_dev *dev)
+static int nvme_pci_npages_prp(void)
{
- unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size,
- dev->ctrl.page_size);
+ unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE,
+ NVME_CTRL_PAGE_SIZE);
return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
}
@@ -349,22 +386,18 @@ static int nvme_npages(unsigned size, struct nvme_dev *dev)
* Calculates the number of pages needed for the SGL segments. For example a 4k
* page can accommodate 256 SGL descriptors.
*/
-static int nvme_pci_npages_sgl(unsigned int num_seg)
+static int nvme_pci_npages_sgl(void)
{
- return DIV_ROUND_UP(num_seg * sizeof(struct nvme_sgl_desc), PAGE_SIZE);
+ return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc),
+ PAGE_SIZE);
}
-static unsigned int nvme_pci_iod_alloc_size(struct nvme_dev *dev,
- unsigned int size, unsigned int nseg, bool use_sgl)
+static size_t nvme_pci_iod_alloc_size(void)
{
- size_t alloc_size;
-
- if (use_sgl)
- alloc_size = sizeof(__le64 *) * nvme_pci_npages_sgl(nseg);
- else
- alloc_size = sizeof(__le64 *) * nvme_npages(size, dev);
+ size_t npages = max(nvme_pci_npages_prp(), nvme_pci_npages_sgl());
- return alloc_size + sizeof(struct scatterlist) * nseg;
+ return sizeof(__le64 *) * npages +
+ sizeof(struct scatterlist) * NVME_MAX_SEGS;
}
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
@@ -391,18 +424,15 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
return 0;
}
-static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
- unsigned int hctx_idx, unsigned int numa_node)
+static int nvme_pci_init_request(struct blk_mq_tag_set *set,
+ struct request *req, unsigned int hctx_idx,
+ unsigned int numa_node)
{
struct nvme_dev *dev = set->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
- struct nvme_queue *nvmeq = &dev->queues[queue_idx];
-
- BUG_ON(!nvmeq);
- iod->nvmeq = nvmeq;
nvme_req(req)->ctrl = &dev->ctrl;
+ nvme_req(req)->cmd = &iod->cmd;
return 0;
}
@@ -415,7 +445,7 @@ static int queue_irq_offset(struct nvme_dev *dev)
return 0;
}
-static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
+static void nvme_pci_map_queues(struct blk_mq_tag_set *set)
{
struct nvme_dev *dev = set->driver_data;
int i, qoff, offset;
@@ -442,8 +472,6 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
qoff += map->nr_queues;
offset += map->nr_queues;
}
-
- return 0;
}
/*
@@ -466,22 +494,13 @@ static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq)
nvmeq->last_sq_tail = nvmeq->sq_tail;
}
-/**
- * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
- * @nvmeq: The queue to use
- * @cmd: The command to send
- * @write_sq: whether to write to the SQ doorbell
- */
-static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
- bool write_sq)
+static inline void nvme_sq_copy_cmd(struct nvme_queue *nvmeq,
+ struct nvme_command *cmd)
{
- spin_lock(&nvmeq->sq_lock);
memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes),
- cmd, sizeof(*cmd));
+ absolute_pointer(cmd), sizeof(*cmd));
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
- nvme_write_sq_db(nvmeq, write_sq);
- spin_unlock(&nvmeq->sq_lock);
}
static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
@@ -497,74 +516,80 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
static void **nvme_pci_iod_list(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
+ return (void **)(iod->sgt.sgl + blk_rq_nr_phys_segments(req));
}
static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
int nseg = blk_rq_nr_phys_segments(req);
unsigned int avg_seg_size;
- if (nseg == 0)
- return false;
-
avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
- if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
+ if (!nvme_ctrl_sgl_supported(&dev->ctrl))
return false;
- if (!iod->nvmeq->qid)
+ if (!nvmeq->qid)
return false;
if (!sgl_threshold || avg_seg_size < sgl_threshold)
return false;
return true;
}
-static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
{
+ const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1;
- dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
+ dma_addr_t dma_addr = iod->first_dma;
int i;
- if (iod->dma_len) {
- dma_unmap_page(dev->dev, dma_addr, iod->dma_len,
- rq_dma_dir(req));
- return;
- }
-
- WARN_ON_ONCE(!iod->nents);
+ for (i = 0; i < iod->nr_allocations; i++) {
+ __le64 *prp_list = nvme_pci_iod_list(req)[i];
+ dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
- if (is_pci_p2pdma_page(sg_page(iod->sg)))
- pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
- rq_dma_dir(req));
- else
- dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
+ dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
+ dma_addr = next_dma_addr;
+ }
+}
+static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
+{
+ const int last_sg = SGES_PER_PAGE - 1;
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ dma_addr_t dma_addr = iod->first_dma;
+ int i;
- if (iod->npages == 0)
- dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
- dma_addr);
+ for (i = 0; i < iod->nr_allocations; i++) {
+ struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i];
+ dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr);
- for (i = 0; i < iod->npages; i++) {
- void *addr = nvme_pci_iod_list(req)[i];
+ dma_pool_free(dev->prp_page_pool, sg_list, dma_addr);
+ dma_addr = next_dma_addr;
+ }
+}
- if (iod->use_sgl) {
- struct nvme_sgl_desc *sg_list = addr;
+static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- next_dma_addr =
- le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
- } else {
- __le64 *prp_list = addr;
+ if (iod->dma_len) {
+ dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
+ rq_dma_dir(req));
+ return;
+ }
- next_dma_addr = le64_to_cpu(prp_list[last_prp]);
- }
+ WARN_ON_ONCE(!iod->sgt.nents);
- dma_pool_free(dev->prp_page_pool, addr, dma_addr);
- dma_addr = next_dma_addr;
- }
+ dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
- mempool_free(iod->sg, dev->iod_mempool);
+ if (iod->nr_allocations == 0)
+ dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
+ iod->first_dma);
+ else if (iod->use_sgl)
+ nvme_free_sgls(dev, req);
+ else
+ nvme_free_prps(dev, req);
+ mempool_free(iod->sgt.sgl, dev->iod_mempool);
}
static void nvme_print_sgl(struct scatterlist *sgl, int nents)
@@ -587,69 +612,67 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool;
int length = blk_rq_payload_bytes(req);
- struct scatterlist *sg = iod->sg;
+ struct scatterlist *sg = iod->sgt.sgl;
int dma_len = sg_dma_len(sg);
u64 dma_addr = sg_dma_address(sg);
- u32 page_size = dev->ctrl.page_size;
- int offset = dma_addr & (page_size - 1);
+ int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
__le64 *prp_list;
void **list = nvme_pci_iod_list(req);
dma_addr_t prp_dma;
int nprps, i;
- length -= (page_size - offset);
+ length -= (NVME_CTRL_PAGE_SIZE - offset);
if (length <= 0) {
iod->first_dma = 0;
goto done;
}
- dma_len -= (page_size - offset);
+ dma_len -= (NVME_CTRL_PAGE_SIZE - offset);
if (dma_len) {
- dma_addr += (page_size - offset);
+ dma_addr += (NVME_CTRL_PAGE_SIZE - offset);
} else {
sg = sg_next(sg);
dma_addr = sg_dma_address(sg);
dma_len = sg_dma_len(sg);
}
- if (length <= page_size) {
+ if (length <= NVME_CTRL_PAGE_SIZE) {
iod->first_dma = dma_addr;
goto done;
}
- nprps = DIV_ROUND_UP(length, page_size);
+ nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
if (nprps <= (256 / 8)) {
pool = dev->prp_small_pool;
- iod->npages = 0;
+ iod->nr_allocations = 0;
} else {
pool = dev->prp_page_pool;
- iod->npages = 1;
+ iod->nr_allocations = 1;
}
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list) {
- iod->first_dma = dma_addr;
- iod->npages = -1;
+ iod->nr_allocations = -1;
return BLK_STS_RESOURCE;
}
list[0] = prp_list;
iod->first_dma = prp_dma;
i = 0;
for (;;) {
- if (i == page_size >> 3) {
+ if (i == NVME_CTRL_PAGE_SIZE >> 3) {
__le64 *old_prp_list = prp_list;
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list)
- return BLK_STS_RESOURCE;
- list[iod->npages++] = prp_list;
+ goto free_prps;
+ list[iod->nr_allocations++] = prp_list;
prp_list[0] = old_prp_list[i - 1];
old_prp_list[i - 1] = cpu_to_le64(prp_dma);
i = 1;
}
prp_list[i++] = cpu_to_le64(dma_addr);
- dma_len -= page_size;
- dma_addr += page_size;
- length -= page_size;
+ dma_len -= NVME_CTRL_PAGE_SIZE;
+ dma_addr += NVME_CTRL_PAGE_SIZE;
+ length -= NVME_CTRL_PAGE_SIZE;
if (length <= 0)
break;
if (dma_len > 0)
@@ -660,17 +683,17 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
dma_addr = sg_dma_address(sg);
dma_len = sg_dma_len(sg);
}
-
done:
- cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl));
cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
-
return BLK_STS_OK;
-
- bad_sgl:
- WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
+free_prps:
+ nvme_free_prps(dev, req);
+ return BLK_STS_RESOURCE;
+bad_sgl:
+ WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents),
"Invalid SGL for payload:%d nents:%d\n",
- blk_rq_payload_bytes(req), iod->nents);
+ blk_rq_payload_bytes(req), iod->sgt.nents);
return BLK_STS_IOERR;
}
@@ -696,12 +719,13 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
}
static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
- struct request *req, struct nvme_rw_command *cmd, int entries)
+ struct request *req, struct nvme_rw_command *cmd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool;
struct nvme_sgl_desc *sg_list;
- struct scatterlist *sg = iod->sg;
+ struct scatterlist *sg = iod->sgt.sgl;
+ unsigned int entries = iod->sgt.nents;
dma_addr_t sgl_dma;
int i = 0;
@@ -715,15 +739,15 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
if (entries <= (256 / sizeof(struct nvme_sgl_desc))) {
pool = dev->prp_small_pool;
- iod->npages = 0;
+ iod->nr_allocations = 0;
} else {
pool = dev->prp_page_pool;
- iod->npages = 1;
+ iod->nr_allocations = 1;
}
sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
if (!sg_list) {
- iod->npages = -1;
+ iod->nr_allocations = -1;
return BLK_STS_RESOURCE;
}
@@ -739,10 +763,10 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
if (!sg_list)
- return BLK_STS_RESOURCE;
+ goto free_sgls;
i = 0;
- nvme_pci_iod_list(req)[iod->npages++] = sg_list;
+ nvme_pci_iod_list(req)[iod->nr_allocations++] = sg_list;
sg_list[i++] = *link;
nvme_pci_sgl_set_seg(link, sgl_dma, entries);
}
@@ -752,6 +776,9 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
} while (--entries > 0);
return BLK_STS_OK;
+free_sgls:
+ nvme_free_sgls(dev, req);
+ return BLK_STS_RESOURCE;
}
static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
@@ -759,8 +786,8 @@ static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
struct bio_vec *bv)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- unsigned int offset = bv->bv_offset & (dev->ctrl.page_size - 1);
- unsigned int first_prp_len = dev->ctrl.page_size - offset;
+ unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1);
+ unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset;
iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
if (dma_mapping_error(dev->dev, iod->first_dma))
@@ -770,7 +797,7 @@ static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
if (bv->bv_len > first_prp_len)
cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
- return 0;
+ return BLK_STS_OK;
}
static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev,
@@ -788,7 +815,7 @@ static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev,
cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma);
cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len);
cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4;
- return 0;
+ return BLK_STS_OK;
}
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
@@ -796,49 +823,54 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
blk_status_t ret = BLK_STS_RESOURCE;
- int nr_mapped;
+ int rc;
if (blk_rq_nr_phys_segments(req) == 1) {
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
struct bio_vec bv = req_bvec(req);
if (!is_pci_p2pdma_page(bv.bv_page)) {
- if (bv.bv_offset + bv.bv_len <= dev->ctrl.page_size * 2)
+ if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
return nvme_setup_prp_simple(dev, req,
&cmnd->rw, &bv);
- if (iod->nvmeq->qid &&
- dev->ctrl.sgls & ((1 << 0) | (1 << 1)))
+ if (nvmeq->qid && sgl_threshold &&
+ nvme_ctrl_sgl_supported(&dev->ctrl))
return nvme_setup_sgl_simple(dev, req,
&cmnd->rw, &bv);
}
}
iod->dma_len = 0;
- iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
- if (!iod->sg)
+ iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
+ if (!iod->sgt.sgl)
return BLK_STS_RESOURCE;
- sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
- iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
- if (!iod->nents)
- goto out;
+ sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
+ iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl);
+ if (!iod->sgt.orig_nents)
+ goto out_free_sg;
- if (is_pci_p2pdma_page(sg_page(iod->sg)))
- nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
- iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN);
- else
- nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
- rq_dma_dir(req), DMA_ATTR_NO_WARN);
- if (!nr_mapped)
- goto out;
+ rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req),
+ DMA_ATTR_NO_WARN);
+ if (rc) {
+ if (rc == -EREMOTEIO)
+ ret = BLK_STS_TARGET;
+ goto out_free_sg;
+ }
iod->use_sgl = nvme_pci_use_sgls(dev, req);
if (iod->use_sgl)
- ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
+ ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
else
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
-out:
if (ret != BLK_STS_OK)
- nvme_unmap_data(dev, req);
+ goto out_unmap_sg;
+ return BLK_STS_OK;
+
+out_unmap_sg:
+ dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
+out_free_sg:
+ mempool_free(iod->sgt.sgl, dev->iod_mempool);
return ret;
}
@@ -852,52 +884,35 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
if (dma_mapping_error(dev->dev, iod->meta_dma))
return BLK_STS_IOERR;
cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
- return 0;
+ return BLK_STS_OK;
}
-/*
- * NOTE: ns is NULL when called on the admin queue.
- */
-static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
+static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
{
- struct nvme_ns *ns = hctx->queue->queuedata;
- struct nvme_queue *nvmeq = hctx->driver_data;
- struct nvme_dev *dev = nvmeq->dev;
- struct request *req = bd->rq;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct nvme_command cmnd;
blk_status_t ret;
- iod->aborted = 0;
- iod->npages = -1;
- iod->nents = 0;
+ iod->aborted = false;
+ iod->nr_allocations = -1;
+ iod->sgt.nents = 0;
- /*
- * We should not need to do this, but we're still using this to
- * ensure we can drain requests on a dying queue.
- */
- if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
- return BLK_STS_IOERR;
-
- ret = nvme_setup_cmd(ns, req, &cmnd);
+ ret = nvme_setup_cmd(req->q->queuedata, req);
if (ret)
return ret;
if (blk_rq_nr_phys_segments(req)) {
- ret = nvme_map_data(dev, req, &cmnd);
+ ret = nvme_map_data(dev, req, &iod->cmd);
if (ret)
goto out_free_cmd;
}
if (blk_integrity_rq(req)) {
- ret = nvme_map_metadata(dev, req, &cmnd);
+ ret = nvme_map_metadata(dev, req, &iod->cmd);
if (ret)
goto out_unmap_data;
}
blk_mq_start_request(req);
- nvme_submit_cmd(nvmeq, &cmnd, bd->last);
return BLK_STS_OK;
out_unmap_data:
nvme_unmap_data(dev, req);
@@ -906,24 +921,129 @@ out_free_cmd:
return ret;
}
-static void nvme_pci_complete_rq(struct request *req)
+/*
+ * NOTE: ns is NULL when called on the admin queue.
+ */
+static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
+ struct nvme_queue *nvmeq = hctx->driver_data;
+ struct nvme_dev *dev = nvmeq->dev;
+ struct request *req = bd->rq;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct nvme_dev *dev = iod->nvmeq->dev;
+ blk_status_t ret;
+
+ /*
+ * We should not need to do this, but we're still using this to
+ * ensure we can drain requests on a dying queue.
+ */
+ if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
+ return BLK_STS_IOERR;
+
+ if (unlikely(!nvme_check_ready(&dev->ctrl, req, true)))
+ return nvme_fail_nonready_command(&dev->ctrl, req);
+
+ ret = nvme_prep_rq(dev, req);
+ if (unlikely(ret))
+ return ret;
+ spin_lock(&nvmeq->sq_lock);
+ nvme_sq_copy_cmd(nvmeq, &iod->cmd);
+ nvme_write_sq_db(nvmeq, bd->last);
+ spin_unlock(&nvmeq->sq_lock);
+ return BLK_STS_OK;
+}
+
+static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist)
+{
+ spin_lock(&nvmeq->sq_lock);
+ while (!rq_list_empty(*rqlist)) {
+ struct request *req = rq_list_pop(rqlist);
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ nvme_sq_copy_cmd(nvmeq, &iod->cmd);
+ }
+ nvme_write_sq_db(nvmeq, true);
+ spin_unlock(&nvmeq->sq_lock);
+}
+
+static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
+{
+ /*
+ * We should not need to do this, but we're still using this to
+ * ensure we can drain requests on a dying queue.
+ */
+ if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
+ return false;
+ if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true)))
+ return false;
+
+ req->mq_hctx->tags->rqs[req->tag] = req;
+ return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK;
+}
+
+static void nvme_queue_rqs(struct request **rqlist)
+{
+ struct request *req, *next, *prev = NULL;
+ struct request *requeue_list = NULL;
+
+ rq_list_for_each_safe(rqlist, req, next) {
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+
+ if (!nvme_prep_rq_batch(nvmeq, req)) {
+ /* detach 'req' and add to remainder list */
+ rq_list_move(rqlist, &requeue_list, req, prev);
+
+ req = prev;
+ if (!req)
+ continue;
+ }
+
+ if (!next || req->mq_hctx != next->mq_hctx) {
+ /* detach rest of list, and submit */
+ req->rq_next = NULL;
+ nvme_submit_cmds(nvmeq, rqlist);
+ *rqlist = next;
+ prev = NULL;
+ } else
+ prev = req;
+ }
+
+ *rqlist = requeue_list;
+}
+
+static __always_inline void nvme_pci_unmap_rq(struct request *req)
+{
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ struct nvme_dev *dev = nvmeq->dev;
+
+ if (blk_integrity_rq(req)) {
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- if (blk_integrity_rq(req))
dma_unmap_page(dev->dev, iod->meta_dma,
rq_integrity_vec(req)->bv_len, rq_data_dir(req));
+ }
+
if (blk_rq_nr_phys_segments(req))
nvme_unmap_data(dev, req);
+}
+
+static void nvme_pci_complete_rq(struct request *req)
+{
+ nvme_pci_unmap_rq(req);
nvme_complete_rq(req);
}
+static void nvme_pci_complete_batch(struct io_comp_batch *iob)
+{
+ nvme_complete_batch(iob, nvme_pci_unmap_rq);
+}
+
/* We read the CQE phase first to check if the rest of the entry is valid */
static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq)
{
- return (le16_to_cpu(nvmeq->cqes[nvmeq->cq_head].status) & 1) ==
- nvmeq->cq_phase;
+ struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head];
+
+ return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase;
}
static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
@@ -942,68 +1062,69 @@ static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
return nvmeq->dev->tagset.tags[nvmeq->qid - 1];
}
-static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
+static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
+ struct io_comp_batch *iob, u16 idx)
{
- volatile struct nvme_completion *cqe = &nvmeq->cqes[idx];
+ struct nvme_completion *cqe = &nvmeq->cqes[idx];
+ __u16 command_id = READ_ONCE(cqe->command_id);
struct request *req;
- if (unlikely(cqe->command_id >= nvmeq->q_depth)) {
- dev_warn(nvmeq->dev->ctrl.device,
- "invalid id %d completed on queue %d\n",
- cqe->command_id, le16_to_cpu(cqe->sq_id));
- return;
- }
-
/*
* AEN requests are special as they don't time out and can
* survive any kind of queue freeze and often don't respond to
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) {
+ if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) {
nvme_complete_async_event(&nvmeq->dev->ctrl,
cqe->status, &cqe->result);
return;
}
- req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
- trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
- nvme_end_request(req, cqe->status, cqe->result);
-}
-
-static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end)
-{
- while (start != end) {
- nvme_handle_cqe(nvmeq, start);
- if (++start == nvmeq->q_depth)
- start = 0;
+ req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id);
+ if (unlikely(!req)) {
+ dev_warn(nvmeq->dev->ctrl.device,
+ "invalid id %d completed on queue %d\n",
+ command_id, le16_to_cpu(cqe->sq_id));
+ return;
}
+
+ trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
+ if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
+ !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
+ nvme_pci_complete_batch))
+ nvme_pci_complete_rq(req);
}
static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
{
- if (nvmeq->cq_head == nvmeq->q_depth - 1) {
+ u32 tmp = nvmeq->cq_head + 1;
+
+ if (tmp == nvmeq->q_depth) {
nvmeq->cq_head = 0;
- nvmeq->cq_phase = !nvmeq->cq_phase;
+ nvmeq->cq_phase ^= 1;
} else {
- nvmeq->cq_head++;
+ nvmeq->cq_head = tmp;
}
}
-static inline int nvme_process_cq(struct nvme_queue *nvmeq, u16 *start,
- u16 *end, unsigned int tag)
+static inline int nvme_poll_cq(struct nvme_queue *nvmeq,
+ struct io_comp_batch *iob)
{
int found = 0;
- *start = nvmeq->cq_head;
while (nvme_cqe_pending(nvmeq)) {
- if (tag == -1U || nvmeq->cqes[nvmeq->cq_head].command_id == tag)
- found++;
+ found++;
+ /*
+ * load-load control dependency between phase and the rest of
+ * the cqe requires a full read memory barrier
+ */
+ dma_rmb();
+ nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head);
nvme_update_cq_head(nvmeq);
}
- *end = nvmeq->cq_head;
- if (*start != *end)
+ if (found)
nvme_ring_cq_doorbell(nvmeq);
return found;
}
@@ -1011,74 +1132,50 @@ static inline int nvme_process_cq(struct nvme_queue *nvmeq, u16 *start,
static irqreturn_t nvme_irq(int irq, void *data)
{
struct nvme_queue *nvmeq = data;
- irqreturn_t ret = IRQ_NONE;
- u16 start, end;
-
- /*
- * The rmb/wmb pair ensures we see all updates from a previous run of
- * the irq handler, even if that was on another CPU.
- */
- rmb();
- nvme_process_cq(nvmeq, &start, &end, -1);
- wmb();
+ DEFINE_IO_COMP_BATCH(iob);
- if (start != end) {
- nvme_complete_cqes(nvmeq, start, end);
+ if (nvme_poll_cq(nvmeq, &iob)) {
+ if (!rq_list_empty(iob.req_list))
+ nvme_pci_complete_batch(&iob);
return IRQ_HANDLED;
}
-
- return ret;
+ return IRQ_NONE;
}
static irqreturn_t nvme_irq_check(int irq, void *data)
{
struct nvme_queue *nvmeq = data;
+
if (nvme_cqe_pending(nvmeq))
return IRQ_WAKE_THREAD;
return IRQ_NONE;
}
/*
- * Poll for completions any queue, including those not dedicated to polling.
+ * Poll for completions for any interrupt driven queue
* Can be called from any context.
*/
-static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag)
+static void nvme_poll_irqdisable(struct nvme_queue *nvmeq)
{
struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
- u16 start, end;
- int found;
- /*
- * For a poll queue we need to protect against the polling thread
- * using the CQ lock. For normal interrupt driven threads we have
- * to disable the interrupt to avoid racing with it.
- */
- if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) {
- spin_lock(&nvmeq->cq_poll_lock);
- found = nvme_process_cq(nvmeq, &start, &end, tag);
- spin_unlock(&nvmeq->cq_poll_lock);
- } else {
- disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
- found = nvme_process_cq(nvmeq, &start, &end, tag);
- enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
- }
+ WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags));
- nvme_complete_cqes(nvmeq, start, end);
- return found;
+ disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
+ nvme_poll_cq(nvmeq, NULL);
+ enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
}
-static int nvme_poll(struct blk_mq_hw_ctx *hctx)
+static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct nvme_queue *nvmeq = hctx->driver_data;
- u16 start, end;
bool found;
if (!nvme_cqe_pending(nvmeq))
return 0;
spin_lock(&nvmeq->cq_poll_lock);
- found = nvme_process_cq(nvmeq, &start, &end, -1);
- nvme_complete_cqes(nvmeq, start, end);
+ found = nvme_poll_cq(nvmeq, iob);
spin_unlock(&nvmeq->cq_poll_lock);
return found;
@@ -1088,19 +1185,21 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
{
struct nvme_dev *dev = to_nvme_dev(ctrl);
struct nvme_queue *nvmeq = &dev->queues[0];
- struct nvme_command c;
+ struct nvme_command c = { };
- memset(&c, 0, sizeof(c));
c.common.opcode = nvme_admin_async_event;
c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
- nvme_submit_cmd(nvmeq, &c, true);
+
+ spin_lock(&nvmeq->sq_lock);
+ nvme_sq_copy_cmd(nvmeq, &c);
+ nvme_write_sq_db(nvmeq, true);
+ spin_unlock(&nvmeq->sq_lock);
}
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
{
- struct nvme_command c;
+ struct nvme_command c = { };
- memset(&c, 0, sizeof(c));
c.delete_queue.opcode = opcode;
c.delete_queue.qid = cpu_to_le16(id);
@@ -1110,7 +1209,7 @@ static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
struct nvme_queue *nvmeq, s16 vector)
{
- struct nvme_command c;
+ struct nvme_command c = { };
int flags = NVME_QUEUE_PHYS_CONTIG;
if (!test_bit(NVMEQ_POLLED, &nvmeq->flags))
@@ -1120,7 +1219,6 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
* Note: we (ab)use the fact that the prp fields survive if no data
* is attached to the request.
*/
- memset(&c, 0, sizeof(c));
c.create_cq.opcode = nvme_admin_create_cq;
c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
c.create_cq.cqid = cpu_to_le16(qid);
@@ -1135,7 +1233,7 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
struct nvme_queue *nvmeq)
{
struct nvme_ctrl *ctrl = &dev->ctrl;
- struct nvme_command c;
+ struct nvme_command c = { };
int flags = NVME_QUEUE_PHYS_CONTIG;
/*
@@ -1150,7 +1248,6 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
* Note: we (ab)use the fact that the prp fields survive if no data
* is attached to the request.
*/
- memset(&c, 0, sizeof(c));
c.create_sq.opcode = nvme_admin_create_sq;
c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
c.create_sq.sqid = cpu_to_le16(qid);
@@ -1171,20 +1268,19 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}
-static void abort_endio(struct request *req, blk_status_t error)
+static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct nvme_queue *nvmeq = iod->nvmeq;
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
dev_warn(nvmeq->dev->ctrl.device,
"Abort status: 0x%x", nvme_req(req)->status);
atomic_inc(&nvmeq->dev->ctrl.abort_limit);
blk_mq_free_request(req);
+ return RQ_END_IO_NONE;
}
static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
{
-
/* If true, indicates loss of adapter communication, possibly by a
* NVMe Subsystem reset.
*/
@@ -1224,15 +1320,23 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
dev_warn(dev->ctrl.device,
"controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
csts, result);
+
+ if (csts != ~0)
+ return;
+
+ dev_warn(dev->ctrl.device,
+ "Does your device have a faulty power saving mode enabled?\n");
+ dev_warn(dev->ctrl.device,
+ "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n");
}
-static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
+static enum blk_eh_timer_return nvme_timeout(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct nvme_queue *nvmeq = iod->nvmeq;
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
struct nvme_dev *dev = nvmeq->dev;
struct request *abort_req;
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
u32 csts = readl(dev->bar + NVME_REG_CSTS);
/* If PCI error recovery process is happening, we cannot reset or
@@ -1255,7 +1359,12 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
/*
* Did we miss an interrupt?
*/
- if (nvme_poll_irqdisable(nvmeq, req->tag)) {
+ if (test_bit(NVMEQ_POLLED, &nvmeq->flags))
+ nvme_poll(req->mq_hctx, NULL);
+ else
+ nvme_poll_irqdisable(nvmeq);
+
+ if (blk_mq_request_completed(req)) {
dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, completion polled\n",
req->tag, nvmeq->qid);
@@ -1271,13 +1380,13 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
switch (dev->ctrl.state) {
case NVME_CTRL_CONNECTING:
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
- /* fall through */
+ fallthrough;
case NVME_CTRL_DELETING:
dev_warn_ratelimited(dev->ctrl.device,
"I/O %d QID %d timeout, disable controller\n",
req->tag, nvmeq->qid);
- nvme_dev_disable(dev, true);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
+ nvme_dev_disable(dev, true);
return BLK_EH_DONE;
case NVME_CTRL_RESETTING:
return BLK_EH_RESET_TIMER;
@@ -1286,18 +1395,18 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
}
/*
- * Shutdown the controller immediately and schedule a reset if the
- * command was already aborted once before and still hasn't been
- * returned to the driver, or if this is the admin queue.
+ * Shutdown the controller immediately and schedule a reset if the
+ * command was already aborted once before and still hasn't been
+ * returned to the driver, or if this is the admin queue.
*/
if (!nvmeq->qid || iod->aborted) {
dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, reset controller\n",
req->tag, nvmeq->qid);
+ nvme_req(req)->flags |= NVME_REQ_CANCELLED;
nvme_dev_disable(dev, false);
nvme_reset_ctrl(&dev->ctrl);
- nvme_req(req)->flags |= NVME_REQ_CANCELLED;
return BLK_EH_DONE;
}
@@ -1305,27 +1414,29 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
atomic_inc(&dev->ctrl.abort_limit);
return BLK_EH_RESET_TIMER;
}
- iod->aborted = 1;
+ iod->aborted = true;
- memset(&cmd, 0, sizeof(cmd));
cmd.abort.opcode = nvme_admin_abort_cmd;
- cmd.abort.cid = req->tag;
+ cmd.abort.cid = nvme_cid(req);
cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
dev_warn(nvmeq->dev->ctrl.device,
- "I/O %d QID %d timeout, aborting\n",
- req->tag, nvmeq->qid);
+ "I/O %d (%s) QID %d timeout, aborting\n",
+ req->tag,
+ nvme_get_opcode_str(nvme_req(req)->cmd->common.opcode),
+ nvmeq->qid);
- abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
- BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
+ abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd),
+ BLK_MQ_REQ_NOWAIT);
if (IS_ERR(abort_req)) {
atomic_inc(&dev->ctrl.abort_limit);
return BLK_EH_RESET_TIMER;
}
+ nvme_init_request(abort_req, &cmd);
- abort_req->timeout = ADMIN_TIMEOUT;
+ abort_req->end_io = abort_endio;
abort_req->end_io_data = NULL;
- blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
+ blk_execute_rq_nowait(abort_req, false);
/*
* The aborted req will be completed on receiving the abort req.
@@ -1375,7 +1486,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
nvmeq->dev->online_queues--;
if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
- blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q);
+ nvme_stop_admin_queue(&nvmeq->dev->ctrl);
if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags))
pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq);
return 0;
@@ -1398,23 +1509,23 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
else
nvme_disable_ctrl(&dev->ctrl);
- nvme_poll_irqdisable(nvmeq, -1);
+ nvme_poll_irqdisable(nvmeq);
}
/*
* Called only on a device that has been disabled and after all other threads
- * that can check this device's completion queues have synced. This is the
- * last chance for the driver to see a natural completion before
- * nvme_cancel_request() terminates all incomplete requests.
+ * that can check this device's completion queues have synced, except
+ * nvme_poll(). This is the last chance for the driver to see a natural
+ * completion before nvme_cancel_request() terminates all incomplete requests.
*/
static void nvme_reap_pending_cqes(struct nvme_dev *dev)
{
- u16 start, end;
int i;
for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
- nvme_process_cq(&dev->queues[i], &start, &end, -1);
- nvme_complete_cqes(&dev->queues[i], start, end);
+ spin_lock(&dev->queues[i].cq_poll_lock);
+ nvme_poll_cq(&dev->queues[i], NULL);
+ spin_unlock(&dev->queues[i].cq_poll_lock);
}
}
@@ -1423,11 +1534,12 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
{
int q_depth = dev->q_depth;
unsigned q_size_aligned = roundup(q_depth * entry_size,
- dev->ctrl.page_size);
+ NVME_CTRL_PAGE_SIZE);
if (q_size_aligned * nr_io_queues > dev->cmb_size) {
u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
- mem_per_q = round_down(mem_per_q, dev->ctrl.page_size);
+
+ mem_per_q = round_down(mem_per_q, NVME_CTRL_PAGE_SIZE);
q_depth = div_u64(mem_per_q, entry_size);
/*
@@ -1532,6 +1644,28 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
wmb(); /* ensure the first interrupt sees the initialization */
}
+/*
+ * Try getting shutdown_lock while setting up IO queues.
+ */
+static int nvme_setup_io_queues_trylock(struct nvme_dev *dev)
+{
+ /*
+ * Give up if the lock is being held by nvme_dev_disable.
+ */
+ if (!mutex_trylock(&dev->shutdown_lock))
+ return -ENODEV;
+
+ /*
+ * Controller is in wrong state, fail early.
+ */
+ if (dev->ctrl.state != NVME_CTRL_CONNECTING) {
+ mutex_unlock(&dev->shutdown_lock);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
{
struct nvme_dev *dev = nvmeq->dev;
@@ -1560,8 +1694,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
goto release_cq;
nvmeq->cq_vector = vector;
- nvme_init_queue(nvmeq, qid);
+ result = nvme_setup_io_queues_trylock(dev);
+ if (result)
+ return result;
+ nvme_init_queue(nvmeq, qid);
if (!polled) {
result = queue_request_irq(nvmeq);
if (result < 0)
@@ -1569,10 +1706,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
}
set_bit(NVMEQ_ENABLED, &nvmeq->flags);
+ mutex_unlock(&dev->shutdown_lock);
return result;
release_sq:
dev->online_queues--;
+ mutex_unlock(&dev->shutdown_lock);
adapter_delete_sq(dev, qid);
release_cq:
adapter_delete_cq(dev, qid);
@@ -1583,16 +1722,17 @@ static const struct blk_mq_ops nvme_mq_admin_ops = {
.queue_rq = nvme_queue_rq,
.complete = nvme_pci_complete_rq,
.init_hctx = nvme_admin_init_hctx,
- .init_request = nvme_init_request,
+ .init_request = nvme_pci_init_request,
.timeout = nvme_timeout,
};
static const struct blk_mq_ops nvme_mq_ops = {
.queue_rq = nvme_queue_rq,
+ .queue_rqs = nvme_queue_rqs,
.complete = nvme_pci_complete_rq,
.commit_rqs = nvme_commit_rqs,
.init_hctx = nvme_init_hctx,
- .init_request = nvme_init_request,
+ .init_request = nvme_pci_init_request,
.map_queues = nvme_pci_map_queues,
.timeout = nvme_timeout,
.poll = nvme_poll,
@@ -1606,42 +1746,41 @@ static void nvme_dev_remove_admin(struct nvme_dev *dev)
* user requests may be waiting on a stopped queue. Start the
* queue to flush these to completion.
*/
- blk_mq_unquiesce_queue(dev->ctrl.admin_q);
- blk_cleanup_queue(dev->ctrl.admin_q);
+ nvme_start_admin_queue(&dev->ctrl);
+ blk_mq_destroy_queue(dev->ctrl.admin_q);
blk_mq_free_tag_set(&dev->admin_tagset);
}
}
-static int nvme_alloc_admin_tags(struct nvme_dev *dev)
+static int nvme_pci_alloc_admin_tag_set(struct nvme_dev *dev)
{
- if (!dev->ctrl.admin_q) {
- dev->admin_tagset.ops = &nvme_mq_admin_ops;
- dev->admin_tagset.nr_hw_queues = 1;
+ struct blk_mq_tag_set *set = &dev->admin_tagset;
- dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- dev->admin_tagset.timeout = ADMIN_TIMEOUT;
- dev->admin_tagset.numa_node = dev_to_node(dev->dev);
- dev->admin_tagset.cmd_size = sizeof(struct nvme_iod);
- dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
- dev->admin_tagset.driver_data = dev;
+ set->ops = &nvme_mq_admin_ops;
+ set->nr_hw_queues = 1;
- if (blk_mq_alloc_tag_set(&dev->admin_tagset))
- return -ENOMEM;
- dev->ctrl.admin_tagset = &dev->admin_tagset;
+ set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+ set->timeout = NVME_ADMIN_TIMEOUT;
+ set->numa_node = dev->ctrl.numa_node;
+ set->cmd_size = sizeof(struct nvme_iod);
+ set->flags = BLK_MQ_F_NO_SCHED;
+ set->driver_data = dev;
- dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
- if (IS_ERR(dev->ctrl.admin_q)) {
- blk_mq_free_tag_set(&dev->admin_tagset);
- return -ENOMEM;
- }
- if (!blk_get_queue(dev->ctrl.admin_q)) {
- nvme_dev_remove_admin(dev);
- dev->ctrl.admin_q = NULL;
- return -ENODEV;
- }
- } else
- blk_mq_unquiesce_queue(dev->ctrl.admin_q);
+ if (blk_mq_alloc_tag_set(set))
+ return -ENOMEM;
+ dev->ctrl.admin_tagset = set;
+ dev->ctrl.admin_q = blk_mq_init_queue(set);
+ if (IS_ERR(dev->ctrl.admin_q)) {
+ blk_mq_free_tag_set(set);
+ dev->ctrl.admin_q = NULL;
+ return -ENOMEM;
+ }
+ if (!blk_get_queue(dev->ctrl.admin_q)) {
+ nvme_dev_remove_admin(dev);
+ dev->ctrl.admin_q = NULL;
+ return -ENODEV;
+ }
return 0;
}
@@ -1696,6 +1835,8 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
if (result)
return result;
+ dev->ctrl.numa_node = dev_to_node(dev->dev);
+
nvmeq = &dev->queues[0];
aqa = nvmeq->q_depth - 1;
aqa |= aqa << 16;
@@ -1757,17 +1898,6 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
return ret >= 0 ? 0 : ret;
}
-static ssize_t nvme_cmb_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
-
- return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n",
- ndev->cmbloc, ndev->cmbsz);
-}
-static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
-
static u64 nvme_cmb_size_unit(struct nvme_dev *dev)
{
u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK;
@@ -1790,6 +1920,9 @@ static void nvme_map_cmb(struct nvme_dev *dev)
if (dev->cmb_size)
return;
+ if (NVME_CAP_CMBS(dev->ctrl.cap))
+ writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC);
+
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
if (!dev->cmbsz)
return;
@@ -1804,6 +1937,16 @@ static void nvme_map_cmb(struct nvme_dev *dev)
return;
/*
+ * Tell the controller about the host side address mapping the CMB,
+ * and enable CMB decoding for the NVMe 1.4+ scheme:
+ */
+ if (NVME_CAP_CMBS(dev->ctrl.cap)) {
+ hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE |
+ (pci_bus_address(pdev, bar) + offset),
+ dev->bar + NVME_REG_CMBMSC);
+ }
+
+ /*
* Controllers may support a CMB size larger than their BAR,
* for example, due to being behind a bridge. Reduce the CMB to
* the reported size of the BAR
@@ -1823,34 +1966,19 @@ static void nvme_map_cmb(struct nvme_dev *dev)
if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) ==
(NVME_CMBSZ_WDS | NVME_CMBSZ_RDS))
pci_p2pmem_publish(pdev, true);
-
- if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
- &dev_attr_cmb.attr, NULL))
- dev_warn(dev->ctrl.device,
- "failed to add sysfs attribute for CMB\n");
-}
-
-static inline void nvme_release_cmb(struct nvme_dev *dev)
-{
- if (dev->cmb_size) {
- sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
- &dev_attr_cmb.attr, NULL);
- dev->cmb_size = 0;
- }
}
static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
{
+ u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT;
u64 dma_addr = dev->host_mem_descs_dma;
- struct nvme_command c;
+ struct nvme_command c = { };
int ret;
- memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_set_features;
c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
c.features.dword11 = cpu_to_le32(bits);
- c.features.dword12 = cpu_to_le32(dev->host_mem_size >>
- ilog2(dev->ctrl.page_size));
+ c.features.dword12 = cpu_to_le32(host_mem_size);
c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr));
c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr));
c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs);
@@ -1860,7 +1988,9 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
dev_warn(dev->ctrl.device,
"failed to set host mem (err %d, flags %#x).\n",
ret, bits);
- }
+ } else
+ dev->hmb = bits & NVME_HOST_MEM_ENABLE;
+
return ret;
}
@@ -1870,7 +2000,7 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
for (i = 0; i < dev->nr_host_mem_descs; i++) {
struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
- size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
+ size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE;
dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
le64_to_cpu(desc->addr),
@@ -1922,7 +2052,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
break;
descs[i].addr = cpu_to_le64(dma_addr);
- descs[i].size = cpu_to_le32(len / dev->ctrl.page_size);
+ descs[i].size = cpu_to_le32(len / NVME_CTRL_PAGE_SIZE);
i++;
}
@@ -1938,7 +2068,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
out_free_bufs:
while (--i >= 0) {
- size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
+ size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE;
dma_free_attrs(dev->dev, size, bufs[i],
le64_to_cpu(descs[i].addr),
@@ -1956,12 +2086,12 @@ out:
static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
{
- u32 chunk_size;
+ u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
+ u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
+ u64 chunk_size;
/* start big and work our way down */
- for (chunk_size = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
- chunk_size >= max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
- chunk_size /= 2) {
+ for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) {
if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
if (!min || dev->host_mem_size >= min)
return 0;
@@ -2017,6 +2147,102 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
return ret;
}
+static ssize_t cmb_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+
+ return sysfs_emit(buf, "cmbloc : x%08x\ncmbsz : x%08x\n",
+ ndev->cmbloc, ndev->cmbsz);
+}
+static DEVICE_ATTR_RO(cmb);
+
+static ssize_t cmbloc_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+
+ return sysfs_emit(buf, "%u\n", ndev->cmbloc);
+}
+static DEVICE_ATTR_RO(cmbloc);
+
+static ssize_t cmbsz_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+
+ return sysfs_emit(buf, "%u\n", ndev->cmbsz);
+}
+static DEVICE_ATTR_RO(cmbsz);
+
+static ssize_t hmb_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+
+ return sysfs_emit(buf, "%d\n", ndev->hmb);
+}
+
+static ssize_t hmb_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+ bool new;
+ int ret;
+
+ if (strtobool(buf, &new) < 0)
+ return -EINVAL;
+
+ if (new == ndev->hmb)
+ return count;
+
+ if (new) {
+ ret = nvme_setup_host_mem(ndev);
+ } else {
+ ret = nvme_set_host_mem(ndev, 0);
+ if (!ret)
+ nvme_free_host_mem(ndev);
+ }
+
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_RW(hmb);
+
+static umode_t nvme_pci_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct nvme_ctrl *ctrl =
+ dev_get_drvdata(container_of(kobj, struct device, kobj));
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
+
+ if (a == &dev_attr_cmb.attr ||
+ a == &dev_attr_cmbloc.attr ||
+ a == &dev_attr_cmbsz.attr) {
+ if (!dev->cmbsz)
+ return 0;
+ }
+ if (a == &dev_attr_hmb.attr && !ctrl->hmpre)
+ return 0;
+
+ return a->mode;
+}
+
+static struct attribute *nvme_pci_attrs[] = {
+ &dev_attr_cmb.attr,
+ &dev_attr_cmbloc.attr,
+ &dev_attr_cmbsz.attr,
+ &dev_attr_hmb.attr,
+ NULL,
+};
+
+static const struct attribute_group nvme_pci_attr_group = {
+ .attrs = nvme_pci_attrs,
+ .is_visible = nvme_pci_attrs_are_visible,
+};
+
/*
* nirqs is the number of interrupts available for write and read
* queues. The core already reserved an interrupt for the admin queue.
@@ -2024,10 +2250,10 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
{
struct nvme_dev *dev = affd->priv;
- unsigned int nr_read_queues;
+ unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues;
/*
- * If there is no interupt available for queues, ensure that
+ * If there is no interrupt available for queues, ensure that
* the default queue is set to 1. The affinity set size is
* also set to one, but the irq core ignores it for this case.
*
@@ -2040,12 +2266,12 @@ static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
if (!nrirqs) {
nrirqs = 1;
nr_read_queues = 0;
- } else if (nrirqs == 1 || !write_queues) {
+ } else if (nrirqs == 1 || !nr_write_queues) {
nr_read_queues = 0;
- } else if (write_queues >= nrirqs) {
+ } else if (nr_write_queues >= nrirqs) {
nr_read_queues = 1;
} else {
- nr_read_queues = nrirqs - write_queues;
+ nr_read_queues = nrirqs - nr_write_queues;
}
dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
@@ -2063,32 +2289,30 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
.calc_sets = nvme_calc_irq_sets,
.priv = dev,
};
- unsigned int irq_queues, this_p_queues;
+ unsigned int irq_queues, poll_queues;
/*
- * Poll queues don't need interrupts, but we need at least one IO
- * queue left over for non-polled IO.
+ * Poll queues don't need interrupts, but we need at least one I/O queue
+ * left over for non-polled I/O.
*/
- this_p_queues = poll_queues;
- if (this_p_queues >= nr_io_queues) {
- this_p_queues = nr_io_queues - 1;
- irq_queues = 1;
- } else {
- irq_queues = nr_io_queues - this_p_queues + 1;
- }
- dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
+ poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1);
+ dev->io_queues[HCTX_TYPE_POLL] = poll_queues;
- /* Initialize for the single interrupt case */
+ /*
+ * Initialize for the single interrupt case, will be updated in
+ * nvme_calc_irq_sets().
+ */
dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
dev->io_queues[HCTX_TYPE_READ] = 0;
/*
- * Some Apple controllers require all queues to use the
- * first vector.
+ * We need interrupts for the admin queue and each non-polled I/O queue,
+ * but some Apple controllers require all queues to use the first
+ * vector.
*/
- if (dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)
- irq_queues = 1;
-
+ irq_queues = 1;
+ if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR))
+ irq_queues += (nr_io_queues - poll_queues);
return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues,
PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
}
@@ -2099,30 +2323,52 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
__nvme_disable_io_queues(dev, nvme_admin_delete_cq);
}
+static unsigned int nvme_max_io_queues(struct nvme_dev *dev)
+{
+ /*
+ * If tags are shared with admin queue (Apple bug), then
+ * make sure we only use one IO queue.
+ */
+ if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
+ return 1;
+ return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues;
+}
+
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
struct nvme_queue *adminq = &dev->queues[0];
struct pci_dev *pdev = to_pci_dev(dev->dev);
- int result, nr_io_queues;
+ unsigned int nr_io_queues;
unsigned long size;
-
- nr_io_queues = max_io_queues();
+ int result;
/*
- * If tags are shared with admin queue (Apple bug), then
- * make sure we only use one IO queue.
+ * Sample the module parameters once at reset time so that we have
+ * stable values to work with.
*/
- if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
- nr_io_queues = 1;
+ dev->nr_write_queues = write_queues;
+ dev->nr_poll_queues = poll_queues;
+ nr_io_queues = dev->nr_allocated_queues - 1;
result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
if (result < 0)
return result;
if (nr_io_queues == 0)
return 0;
-
- clear_bit(NVMEQ_ENABLED, &adminq->flags);
+
+ /*
+ * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions
+ * from set to unset. If there is a window to it is truely freed,
+ * pci_free_irq_vectors() jumping into this window will crash.
+ * And take lock to avoid racing with pci_free_irq_vectors() in
+ * nvme_dev_disable() path.
+ */
+ result = nvme_setup_io_queues_trylock(dev);
+ if (result)
+ return result;
+ if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags))
+ pci_free_irq(pdev, 0, adminq);
if (dev->cmb_use_sqes) {
result = nvme_cmb_qdepth(dev, nr_io_queues,
@@ -2138,14 +2384,17 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
result = nvme_remap_bar(dev, size);
if (!result)
break;
- if (!--nr_io_queues)
- return -ENOMEM;
+ if (!--nr_io_queues) {
+ result = -ENOMEM;
+ goto out_unlock;
+ }
} while (1);
adminq->q_db = dev->dbs;
retry:
/* Deregister the admin queue's interrupt */
- pci_free_irq(pdev, 0, adminq);
+ if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags))
+ pci_free_irq(pdev, 0, adminq);
/*
* If we enable msix early due to not intx, disable it again before
@@ -2154,8 +2403,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
pci_free_irq_vectors(pdev);
result = nvme_setup_irqs(dev, nr_io_queues);
- if (result <= 0)
- return -EIO;
+ if (result <= 0) {
+ result = -EIO;
+ goto out_unlock;
+ }
dev->num_vecs = result;
result = max(result - 1, 1);
@@ -2169,8 +2420,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
*/
result = queue_request_irq(adminq);
if (result)
- return result;
+ goto out_unlock;
set_bit(NVMEQ_ENABLED, &adminq->flags);
+ mutex_unlock(&dev->shutdown_lock);
result = nvme_create_io_queues(dev);
if (result || dev->online_queues < 2)
@@ -2179,6 +2431,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (dev->online_queues - 1 < dev->max_qid) {
nr_io_queues = dev->online_queues - 1;
nvme_disable_io_queues(dev);
+ result = nvme_setup_io_queues_trylock(dev);
+ if (result)
+ return result;
nvme_suspend_io_queues(dev);
goto retry;
}
@@ -2187,47 +2442,54 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
dev->io_queues[HCTX_TYPE_READ],
dev->io_queues[HCTX_TYPE_POLL]);
return 0;
+out_unlock:
+ mutex_unlock(&dev->shutdown_lock);
+ return result;
}
-static void nvme_del_queue_end(struct request *req, blk_status_t error)
+static enum rq_end_io_ret nvme_del_queue_end(struct request *req,
+ blk_status_t error)
{
struct nvme_queue *nvmeq = req->end_io_data;
blk_mq_free_request(req);
complete(&nvmeq->delete_done);
+ return RQ_END_IO_NONE;
}
-static void nvme_del_cq_end(struct request *req, blk_status_t error)
+static enum rq_end_io_ret nvme_del_cq_end(struct request *req,
+ blk_status_t error)
{
struct nvme_queue *nvmeq = req->end_io_data;
if (error)
set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
- nvme_del_queue_end(req, error);
+ return nvme_del_queue_end(req, error);
}
static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
{
struct request_queue *q = nvmeq->dev->ctrl.admin_q;
struct request *req;
- struct nvme_command cmd;
+ struct nvme_command cmd = { };
- memset(&cmd, 0, sizeof(cmd));
cmd.delete_queue.opcode = opcode;
cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
- req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
+ req = blk_mq_alloc_request(q, nvme_req_op(&cmd), BLK_MQ_REQ_NOWAIT);
if (IS_ERR(req))
return PTR_ERR(req);
+ nvme_init_request(req, &cmd);
- req->timeout = ADMIN_TIMEOUT;
+ if (opcode == nvme_admin_delete_cq)
+ req->end_io = nvme_del_cq_end;
+ else
+ req->end_io = nvme_del_queue_end;
req->end_io_data = nvmeq;
init_completion(&nvmeq->delete_done);
- blk_execute_rq_nowait(q, NULL, req, false,
- opcode == nvme_admin_delete_cq ?
- nvme_del_cq_end : nvme_del_queue_end);
+ blk_execute_rq_nowait(req, false);
return 0;
}
@@ -2237,7 +2499,7 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
unsigned long timeout;
retry:
- timeout = ADMIN_TIMEOUT;
+ timeout = NVME_ADMIN_TIMEOUT;
while (nr_queues > 0) {
if (nvme_delete_queue(&dev->queues[nr_queues], opcode))
break;
@@ -2259,60 +2521,63 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
return true;
}
-static void nvme_dev_add(struct nvme_dev *dev)
+static void nvme_pci_alloc_tag_set(struct nvme_dev *dev)
{
+ struct blk_mq_tag_set * set = &dev->tagset;
int ret;
- if (!dev->ctrl.tagset) {
- dev->tagset.ops = &nvme_mq_ops;
- dev->tagset.nr_hw_queues = dev->online_queues - 1;
- dev->tagset.nr_maps = 2; /* default + read */
- if (dev->io_queues[HCTX_TYPE_POLL])
- dev->tagset.nr_maps++;
- dev->tagset.timeout = NVME_IO_TIMEOUT;
- dev->tagset.numa_node = dev_to_node(dev->dev);
- dev->tagset.queue_depth =
- min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
- dev->tagset.cmd_size = sizeof(struct nvme_iod);
- dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
- dev->tagset.driver_data = dev;
-
- /*
- * Some Apple controllers requires tags to be unique
- * across admin and IO queue, so reserve the first 32
- * tags of the IO queue.
- */
- if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
- dev->tagset.reserved_tags = NVME_AQ_DEPTH;
+ set->ops = &nvme_mq_ops;
+ set->nr_hw_queues = dev->online_queues - 1;
+ set->nr_maps = 1;
+ if (dev->io_queues[HCTX_TYPE_READ])
+ set->nr_maps = 2;
+ if (dev->io_queues[HCTX_TYPE_POLL])
+ set->nr_maps = 3;
+ set->timeout = NVME_IO_TIMEOUT;
+ set->numa_node = dev->ctrl.numa_node;
+ set->queue_depth = min_t(unsigned, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
+ set->cmd_size = sizeof(struct nvme_iod);
+ set->flags = BLK_MQ_F_SHOULD_MERGE;
+ set->driver_data = dev;
- ret = blk_mq_alloc_tag_set(&dev->tagset);
- if (ret) {
- dev_warn(dev->ctrl.device,
- "IO queues tagset allocation failed %d\n", ret);
- return;
- }
- dev->ctrl.tagset = &dev->tagset;
- } else {
- blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+ /*
+ * Some Apple controllers requires tags to be unique
+ * across admin and IO queue, so reserve the first 32
+ * tags of the IO queue.
+ */
+ if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
+ set->reserved_tags = NVME_AQ_DEPTH;
- /* Free previously allocated queues that are no longer usable */
- nvme_free_queues(dev, dev->online_queues);
+ ret = blk_mq_alloc_tag_set(set);
+ if (ret) {
+ dev_warn(dev->ctrl.device,
+ "IO queues tagset allocation failed %d\n", ret);
+ return;
}
+ dev->ctrl.tagset = set;
+}
- nvme_dbbuf_set(dev);
+static void nvme_pci_update_nr_queues(struct nvme_dev *dev)
+{
+ blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+ /* free previously allocated queues that are no longer usable */
+ nvme_free_queues(dev, dev->online_queues);
}
static int nvme_pci_enable(struct nvme_dev *dev)
{
int result = -ENOMEM;
struct pci_dev *pdev = to_pci_dev(dev->dev);
+ int dma_address_bits = 64;
if (pci_enable_device_mem(pdev))
return result;
pci_set_master(pdev);
- if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)))
+ if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48)
+ dma_address_bits = 48;
+ if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(dma_address_bits)))
goto disable;
if (readl(dev->bar + NVME_REG_CSTS) == -1) {
@@ -2331,7 +2596,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
- dev->q_depth = min_t(int, NVME_CAP_MQES(dev->ctrl.cap) + 1,
+ dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1,
io_queue_depth);
dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */
dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
@@ -2413,7 +2678,12 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
mutex_lock(&dev->shutdown_lock);
if (pci_is_enabled(pdev)) {
- u32 csts = readl(dev->bar + NVME_REG_CSTS);
+ u32 csts;
+
+ if (pci_device_is_present(pdev))
+ csts = readl(dev->bar + NVME_REG_CSTS);
+ else
+ csts = ~0;
if (dev->ctrl.state == NVME_CTRL_LIVE ||
dev->ctrl.state == NVME_CTRL_RESETTING) {
@@ -2442,10 +2712,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_pci_disable(dev);
nvme_reap_pending_cqes(dev);
- blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
- blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
- blk_mq_tagset_wait_completed_request(&dev->tagset);
- blk_mq_tagset_wait_completed_request(&dev->admin_tagset);
+ nvme_cancel_tagset(&dev->ctrl);
+ nvme_cancel_admin_tagset(&dev->ctrl);
/*
* The driver will not be starting up queues again if shutting down so
@@ -2455,7 +2723,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
if (shutdown) {
nvme_start_queues(&dev->ctrl);
if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
- blk_mq_unquiesce_queue(dev->ctrl.admin_q);
+ nvme_start_admin_queue(&dev->ctrl);
}
mutex_unlock(&dev->shutdown_lock);
}
@@ -2471,7 +2739,8 @@ static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
static int nvme_setup_prp_pools(struct nvme_dev *dev)
{
dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
- PAGE_SIZE, PAGE_SIZE, 0);
+ NVME_CTRL_PAGE_SIZE,
+ NVME_CTRL_PAGE_SIZE, 0);
if (!dev->prp_page_pool)
return -ENOMEM;
@@ -2503,13 +2772,13 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
struct nvme_dev *dev = to_nvme_dev(ctrl);
nvme_dbbuf_dma_free(dev);
- put_device(dev->dev);
nvme_free_tagset(dev);
if (dev->ctrl.admin_q)
blk_put_queue(dev->ctrl.admin_q);
- kfree(dev->queues);
free_opal_dev(dev->ctrl.opal_dev);
mempool_destroy(dev->iod_mempool);
+ put_device(dev->dev);
+ kfree(dev->queues);
kfree(dev);
}
@@ -2534,7 +2803,9 @@ static void nvme_reset_work(struct work_struct *work)
bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
int result;
- if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) {
+ if (dev->ctrl.state != NVME_CTRL_RESETTING) {
+ dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n",
+ dev->ctrl.state);
result = -ENODEV;
goto out;
}
@@ -2556,9 +2827,15 @@ static void nvme_reset_work(struct work_struct *work)
if (result)
goto out_unlock;
- result = nvme_alloc_admin_tags(dev);
- if (result)
- goto out_unlock;
+ if (!dev->ctrl.admin_q) {
+ result = nvme_pci_alloc_admin_tag_set(dev);
+ if (result)
+ goto out_unlock;
+ } else {
+ nvme_start_admin_queue(&dev->ctrl);
+ }
+
+ dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1);
/*
* Limit the max command size to prevent iod->sg allocations going
@@ -2586,7 +2863,13 @@ static void nvme_reset_work(struct work_struct *work)
goto out;
}
- result = nvme_init_identify(&dev->ctrl);
+ /*
+ * We do not support an SGL for metadata (yet), so we are limited to a
+ * single integrity segment for the separate metadata pointer.
+ */
+ dev->ctrl.max_integrity_segments = 1;
+
+ result = nvme_init_ctrl_finish(&dev->ctrl);
if (result)
goto out;
@@ -2630,7 +2913,11 @@ static void nvme_reset_work(struct work_struct *work)
} else {
nvme_start_queues(&dev->ctrl);
nvme_wait_freeze(&dev->ctrl);
- nvme_dev_add(dev);
+ if (!dev->ctrl.tagset)
+ nvme_pci_alloc_tag_set(dev);
+ else
+ nvme_pci_update_nr_queues(dev);
+ nvme_dbbuf_set(dev);
nvme_unfreeze(&dev->ctrl);
}
@@ -2645,6 +2932,10 @@ static void nvme_reset_work(struct work_struct *work)
goto out;
}
+ if (!dev->attrs_added && !sysfs_create_group(&dev->ctrl.device->kobj,
+ &nvme_pci_attr_group))
+ dev->attrs_added = true;
+
nvme_start_ctrl(&dev->ctrl);
return;
@@ -2689,20 +2980,42 @@ static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
{
struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
- return snprintf(buf, size, "%s", dev_name(&pdev->dev));
+ return snprintf(buf, size, "%s\n", dev_name(&pdev->dev));
+}
+
+static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl)
+{
+ struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
+ struct nvme_subsystem *subsys = ctrl->subsys;
+
+ dev_err(ctrl->device,
+ "VID:DID %04x:%04x model:%.*s firmware:%.*s\n",
+ pdev->vendor, pdev->device,
+ nvme_strlen(subsys->model, sizeof(subsys->model)),
+ subsys->model, nvme_strlen(subsys->firmware_rev,
+ sizeof(subsys->firmware_rev)),
+ subsys->firmware_rev);
+}
+
+static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
+
+ return dma_pci_p2pdma_supported(dev->dev);
}
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.name = "pcie",
.module = THIS_MODULE,
- .flags = NVME_F_METADATA_SUPPORTED |
- NVME_F_PCI_P2PDMA,
+ .flags = NVME_F_METADATA_SUPPORTED,
.reg_read32 = nvme_pci_reg_read32,
.reg_write32 = nvme_pci_reg_write32,
.reg_read64 = nvme_pci_reg_read64,
.free_ctrl = nvme_pci_free_ctrl,
.submit_async_event = nvme_pci_submit_async_event,
.get_address = nvme_pci_get_address,
+ .print_device_info = nvme_pci_print_device_info,
+ .supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma,
};
static int nvme_dev_map(struct nvme_dev *dev)
@@ -2788,8 +3101,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!dev)
return -ENOMEM;
- dev->queues = kcalloc_node(max_queue_count(), sizeof(struct nvme_queue),
- GFP_KERNEL, node);
+ dev->nr_write_queues = write_queues;
+ dev->nr_poll_queues = poll_queues;
+ dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1;
+ dev->queues = kcalloc_node(dev->nr_allocated_queues,
+ sizeof(struct nvme_queue), GFP_KERNEL, node);
if (!dev->queues)
goto free;
@@ -2810,12 +3126,21 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
quirks |= check_vendor_combination_bug(pdev);
+ if (!noacpi && acpi_storage_d3(&pdev->dev)) {
+ /*
+ * Some systems use a bios work around to ask for D3 on
+ * platforms that support kernel managed suspend.
+ */
+ dev_info(&pdev->dev,
+ "platform quirk: setting simple suspend\n");
+ quirks |= NVME_QUIRK_SIMPLE_SUSPEND;
+ }
+
/*
* Double check that our mempool alloc size will cover the biggest
* command we support.
*/
- alloc_size = nvme_pci_iod_alloc_size(dev, NVME_MAX_KB_SZ,
- NVME_MAX_SEGS, true);
+ alloc_size = nvme_pci_iod_alloc_size();
WARN_ON_ONCE(alloc_size > PAGE_SIZE);
dev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
@@ -2835,7 +3160,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
nvme_reset_ctrl(&dev->ctrl);
- nvme_get_ctrl(&dev->ctrl);
async_schedule(nvme_async_probe, dev);
return 0;
@@ -2878,9 +3202,17 @@ static void nvme_reset_done(struct pci_dev *pdev)
static void nvme_shutdown(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
+
nvme_disable_prepare_reset(dev, true);
}
+static void nvme_remove_attrs(struct nvme_dev *dev)
+{
+ if (dev->attrs_added)
+ sysfs_remove_group(&dev->ctrl.device->kobj,
+ &nvme_pci_attr_group);
+}
+
/*
* The driver's remove may be called on a device in a partially initialized
* state. This function must not have any dependencies on the device state in
@@ -2896,21 +3228,19 @@ static void nvme_remove(struct pci_dev *pdev)
if (!pci_device_is_present(pdev)) {
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
nvme_dev_disable(dev, true);
- nvme_dev_remove_admin(dev);
}
flush_work(&dev->ctrl.reset_work);
nvme_stop_ctrl(&dev->ctrl);
nvme_remove_namespaces(&dev->ctrl);
nvme_dev_disable(dev, true);
- nvme_release_cmb(dev);
+ nvme_remove_attrs(dev);
nvme_free_host_mem(dev);
nvme_dev_remove_admin(dev);
nvme_free_queues(dev, 0);
- nvme_uninit_ctrl(&dev->ctrl);
nvme_release_prp_pools(dev);
nvme_dev_unmap(dev);
- nvme_put_ctrl(&dev->ctrl);
+ nvme_uninit_ctrl(&dev->ctrl);
}
#ifdef CONFIG_PM_SLEEP
@@ -2931,8 +3261,13 @@ static int nvme_resume(struct device *dev)
if (ndev->last_ps == U32_MAX ||
nvme_set_power_state(ctrl, ndev->last_ps) != 0)
- return nvme_try_sched_reset(&ndev->ctrl);
+ goto reset;
+ if (ctrl->hmpre && nvme_setup_host_mem(ndev))
+ goto reset;
+
return 0;
+reset:
+ return nvme_try_sched_reset(ctrl);
}
static int nvme_suspend(struct device *dev)
@@ -2969,6 +3304,17 @@ static int nvme_suspend(struct device *dev)
if (ctrl->state != NVME_CTRL_LIVE)
goto unfreeze;
+ /*
+ * Host memory access may not be successful in a system suspend state,
+ * but the specification allows the controller to access memory in a
+ * non-operational power state.
+ */
+ if (ndev->hmb) {
+ ret = nvme_set_host_mem(ndev, 0);
+ if (ret < 0)
+ goto unfreeze;
+ }
+
ret = nvme_get_power_state(ctrl, &ndev->last_ps);
if (ret < 0)
goto unfreeze;
@@ -3003,6 +3349,7 @@ unfreeze:
static int nvme_simple_suspend(struct device *dev)
{
struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
+
return nvme_disable_prepare_reset(ndev, true);
}
@@ -3076,29 +3423,38 @@ static const struct pci_error_handlers nvme_err_handler = {
};
static const struct pci_device_id nvme_id_table[] = {
- { PCI_VDEVICE(INTEL, 0x0953),
+ { PCI_VDEVICE(INTEL, 0x0953), /* Intel 750/P3500/P3600/P3700 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, },
- { PCI_VDEVICE(INTEL, 0x0a53),
+ { PCI_VDEVICE(INTEL, 0x0a53), /* Intel P3520 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, },
- { PCI_VDEVICE(INTEL, 0x0a54),
+ { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
- NVME_QUIRK_DEALLOCATE_ZEROES, },
- { PCI_VDEVICE(INTEL, 0x0a55),
+ NVME_QUIRK_DEALLOCATE_ZEROES |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, },
{ PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_MEDIUM_PRIO_SQ |
- NVME_QUIRK_NO_TEMP_THRESH_CHANGE },
+ NVME_QUIRK_NO_TEMP_THRESH_CHANGE |
+ NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS |
- NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ NVME_QUIRK_DISABLE_WRITE_ZEROES |
+ NVME_QUIRK_BOGUS_NID, },
+ { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
+ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
+ NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
- .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
+ NVME_QUIRK_NO_NS_DESC_LIST, },
{ PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */
@@ -3108,26 +3464,102 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
- .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
- { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
- .driver_data = NVME_QUIRK_LIGHTNVM, },
- { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
- .driver_data = NVME_QUIRK_LIGHTNVM, },
- { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */
- .driver_data = NVME_QUIRK_LIGHTNVM, },
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
+ NVME_QUIRK_DISABLE_WRITE_ZEROES|
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
+ NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1987, 0x5019), /* phison E19 */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1987, 0x5021), /* Phison E21 */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
+ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x1cc1, 0x33f8), /* ADATA IM2P33F8ABR1 1 TB */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
- .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
+ NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
- { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
+ { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN },
+ { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x144d, 0xa80b), /* Samsung PM9B1 256G and 512G */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1cc4, 0x6303), /* UMIS RPJTJ512MGE1QDY 512G */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1cc4, 0x6302), /* UMIS RPJTJ256MGE1QDY 256G */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x2646, 0x501A), /* KINGSTON OM8PGP4xxxxP OS21005 NVMe SSD */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x2646, 0x501B), /* KINGSTON OM8PGP4xxxxQ OS21005 NVMe SSD */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
.driver_data = NVME_QUIRK_SINGLE_VECTOR },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
.driver_data = NVME_QUIRK_SINGLE_VECTOR |
NVME_QUIRK_128_BYTES_SQES |
- NVME_QUIRK_SHARED_TAGS },
+ NVME_QUIRK_SHARED_TAGS |
+ NVME_QUIRK_SKIP_CID_GEN },
+ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, nvme_id_table);
@@ -3153,9 +3585,9 @@ static int __init nvme_init(void)
BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
+ BUILD_BUG_ON(DIV_ROUND_UP(nvme_pci_npages_prp(), NVME_CTRL_PAGE_SIZE) >
+ S8_MAX);
- write_queues = min(write_queues, num_possible_cpus());
- poll_queues = min(poll_queues, num_possible_cpus());
return pci_register_driver(&nvme_driver);
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 0fe08c4dfd2f..6e079abb22ee 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -13,6 +13,7 @@
#include <linux/atomic.h>
#include <linux/blk-mq.h>
#include <linux/blk-mq-rdma.h>
+#include <linux/blk-integrity.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -28,12 +29,17 @@
#include "fabrics.h"
-#define NVME_RDMA_CONNECT_TIMEOUT_MS 3000 /* 3 second */
+#define NVME_RDMA_CM_TIMEOUT_MS 3000 /* 3 second */
#define NVME_RDMA_MAX_SEGMENTS 256
#define NVME_RDMA_MAX_INLINE_SEGMENTS 4
+#define NVME_RDMA_DATA_SGL_SIZE \
+ (sizeof(struct scatterlist) * NVME_INLINE_SG_CNT)
+#define NVME_RDMA_METADATA_SGL_SIZE \
+ (sizeof(struct scatterlist) * NVME_INLINE_METADATA_SG_CNT)
+
struct nvme_rdma_device {
struct ib_device *dev;
struct ib_pd *pd;
@@ -48,6 +54,11 @@ struct nvme_rdma_qe {
u64 dma;
};
+struct nvme_rdma_sgl {
+ int nents;
+ struct sg_table sg_table;
+};
+
struct nvme_rdma_queue;
struct nvme_rdma_request {
struct nvme_request req;
@@ -58,12 +69,12 @@ struct nvme_rdma_request {
refcount_t ref;
struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
u32 num_sge;
- int nents;
struct ib_reg_wr reg_wr;
struct ib_cqe reg_cqe;
struct nvme_rdma_queue *queue;
- struct sg_table sg_table;
- struct scatterlist first_sgl[];
+ struct nvme_rdma_sgl data_sgl;
+ struct nvme_rdma_sgl *metadata_sgl;
+ bool use_sig_mr;
};
enum nvme_rdma_queue_flags {
@@ -85,6 +96,9 @@ struct nvme_rdma_queue {
struct rdma_cm_id *cm_id;
int cm_error;
struct completion cm_done;
+ bool pi_support;
+ int cq_size;
+ struct mutex queue_lock;
};
struct nvme_rdma_ctrl {
@@ -138,18 +152,11 @@ MODULE_PARM_DESC(register_always,
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event);
static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+static void nvme_rdma_complete_rq(struct request *rq);
static const struct blk_mq_ops nvme_rdma_mq_ops;
static const struct blk_mq_ops nvme_rdma_admin_mq_ops;
-/* XXX: really should move to a generic header sooner or later.. */
-static inline void put_unaligned_le24(u32 val, u8 *p)
-{
- *p++ = val;
- *p++ = val >> 8;
- *p++ = val >> 16;
-}
-
static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
{
return queue - queue->ctrl->queues;
@@ -241,12 +248,9 @@ static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
{
int ret;
- ret = wait_for_completion_interruptible_timeout(&queue->cm_done,
- msecs_to_jiffies(NVME_RDMA_CONNECT_TIMEOUT_MS) + 1);
- if (ret < 0)
+ ret = wait_for_completion_interruptible(&queue->cm_done);
+ if (ret)
return ret;
- if (ret == 0)
- return -ETIMEDOUT;
WARN_ON_ONCE(queue->cm_error > 0);
return queue->cm_error;
}
@@ -269,6 +273,9 @@ static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
init_attr.qp_type = IB_QPT_RC;
init_attr.send_cq = queue->ib_cq;
init_attr.recv_cq = queue->ib_cq;
+ if (queue->pi_support)
+ init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
+ init_attr.qp_context = queue;
ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr);
@@ -288,7 +295,7 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx,
unsigned int numa_node)
{
- struct nvme_rdma_ctrl *ctrl = set->driver_data;
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
@@ -298,7 +305,14 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
if (!req->sqe.data)
return -ENOMEM;
+ /* metadata nvme_rdma_sgl struct is located after command's data SGL */
+ if (queue->pi_support)
+ req->metadata_sgl = (void *)nvme_req(rq) +
+ sizeof(struct nvme_rdma_request) +
+ NVME_RDMA_DATA_SGL_SIZE;
+
req->queue = queue;
+ nvme_req(rq)->cmd = req->sqe.data;
return 0;
}
@@ -306,7 +320,7 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_rdma_ctrl *ctrl = data;
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data);
struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
@@ -318,7 +332,7 @@ static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_rdma_ctrl *ctrl = data;
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data);
struct nvme_rdma_queue *queue = &ctrl->queues[0];
BUG_ON(hctx_idx != 0);
@@ -397,6 +411,14 @@ out_err:
return NULL;
}
+static void nvme_rdma_free_cq(struct nvme_rdma_queue *queue)
+{
+ if (nvme_rdma_poll_queue(queue))
+ ib_free_cq(queue->ib_cq);
+ else
+ ib_cq_pool_put(queue->ib_cq, queue->cq_size);
+}
+
static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
{
struct nvme_rdma_device *dev;
@@ -408,6 +430,8 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
dev = queue->device;
ibdev = dev->dev;
+ if (queue->pi_support)
+ ib_mr_pool_destroy(queue->qp, &queue->qp->sig_mrs);
ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
/*
@@ -416,7 +440,7 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
* the destruction of the QP shouldn't use rdma_cm API.
*/
ib_destroy_qp(queue->qp);
- ib_free_cq(queue->ib_cq);
+ nvme_rdma_free_cq(queue);
nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
sizeof(struct nvme_completion), DMA_FROM_DEVICE);
@@ -424,10 +448,47 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
nvme_rdma_dev_put(dev);
}
-static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev)
+static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev, bool pi_support)
+{
+ u32 max_page_list_len;
+
+ if (pi_support)
+ max_page_list_len = ibdev->attrs.max_pi_fast_reg_page_list_len;
+ else
+ max_page_list_len = ibdev->attrs.max_fast_reg_page_list_len;
+
+ return min_t(u32, NVME_RDMA_MAX_SEGMENTS, max_page_list_len - 1);
+}
+
+static int nvme_rdma_create_cq(struct ib_device *ibdev,
+ struct nvme_rdma_queue *queue)
{
- return min_t(u32, NVME_RDMA_MAX_SEGMENTS,
- ibdev->attrs.max_fast_reg_page_list_len - 1);
+ int ret, comp_vector, idx = nvme_rdma_queue_idx(queue);
+ enum ib_poll_context poll_ctx;
+
+ /*
+ * Spread I/O queues completion vectors according their queue index.
+ * Admin queues can always go on completion vector 0.
+ */
+ comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors;
+
+ /* Polling queues need direct cq polling context */
+ if (nvme_rdma_poll_queue(queue)) {
+ poll_ctx = IB_POLL_DIRECT;
+ queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size,
+ comp_vector, poll_ctx);
+ } else {
+ poll_ctx = IB_POLL_SOFTIRQ;
+ queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size,
+ comp_vector, poll_ctx);
+ }
+
+ if (IS_ERR(queue->ib_cq)) {
+ ret = PTR_ERR(queue->ib_cq);
+ return ret;
+ }
+
+ return 0;
}
static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
@@ -435,8 +496,6 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
struct ib_device *ibdev;
const int send_wr_factor = 3; /* MR, SEND, INV */
const int cq_factor = send_wr_factor + 1; /* + RECV */
- int comp_vector, idx = nvme_rdma_queue_idx(queue);
- enum ib_poll_context poll_ctx;
int ret, pages_per_mr;
queue->device = nvme_rdma_find_get_device(queue->cm_id);
@@ -447,26 +506,12 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
}
ibdev = queue->device->dev;
- /*
- * Spread I/O queues completion vectors according their queue index.
- * Admin queues can always go on completion vector 0.
- */
- comp_vector = idx == 0 ? idx : idx - 1;
-
- /* Polling queues need direct cq polling context */
- if (nvme_rdma_poll_queue(queue))
- poll_ctx = IB_POLL_DIRECT;
- else
- poll_ctx = IB_POLL_SOFTIRQ;
-
/* +1 for ib_stop_cq */
- queue->ib_cq = ib_alloc_cq(ibdev, queue,
- cq_factor * queue->queue_size + 1,
- comp_vector, poll_ctx);
- if (IS_ERR(queue->ib_cq)) {
- ret = PTR_ERR(queue->ib_cq);
+ queue->cq_size = cq_factor * queue->queue_size + 1;
+
+ ret = nvme_rdma_create_cq(ibdev, queue);
+ if (ret)
goto out_put_dev;
- }
ret = nvme_rdma_create_qp(queue, send_wr_factor);
if (ret)
@@ -484,7 +529,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
* misaligned we'll end up using two entries for a single data page,
* so one additional entry is required.
*/
- pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev) + 1;
+ pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev, queue->pi_support) + 1;
ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
queue->queue_size,
IB_MR_TYPE_MEM_REG,
@@ -492,21 +537,35 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
if (ret) {
dev_err(queue->ctrl->ctrl.device,
"failed to initialize MR pool sized %d for QID %d\n",
- queue->queue_size, idx);
+ queue->queue_size, nvme_rdma_queue_idx(queue));
goto out_destroy_ring;
}
+ if (queue->pi_support) {
+ ret = ib_mr_pool_init(queue->qp, &queue->qp->sig_mrs,
+ queue->queue_size, IB_MR_TYPE_INTEGRITY,
+ pages_per_mr, pages_per_mr);
+ if (ret) {
+ dev_err(queue->ctrl->ctrl.device,
+ "failed to initialize PI MR pool sized %d for QID %d\n",
+ queue->queue_size, nvme_rdma_queue_idx(queue));
+ goto out_destroy_mr_pool;
+ }
+ }
+
set_bit(NVME_RDMA_Q_TR_READY, &queue->flags);
return 0;
+out_destroy_mr_pool:
+ ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
out_destroy_ring:
nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
sizeof(struct nvme_completion), DMA_FROM_DEVICE);
out_destroy_qp:
rdma_destroy_qp(queue->cm_id);
out_destroy_ib_cq:
- ib_free_cq(queue->ib_cq);
+ nvme_rdma_free_cq(queue);
out_put_dev:
nvme_rdma_dev_put(queue->device);
return ret;
@@ -520,7 +579,12 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
int ret;
queue = &ctrl->queues[idx];
+ mutex_init(&queue->queue_lock);
queue->ctrl = ctrl;
+ if (idx && ctrl->ctrl.max_integrity_segments)
+ queue->pi_support = true;
+ else
+ queue->pi_support = false;
init_completion(&queue->cm_done);
if (idx > 0)
@@ -535,7 +599,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
if (IS_ERR(queue->cm_id)) {
dev_info(ctrl->ctrl.device,
"failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
- return PTR_ERR(queue->cm_id);
+ ret = PTR_ERR(queue->cm_id);
+ goto out_destroy_mutex;
}
if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
@@ -544,7 +609,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
queue->cm_error = -ETIMEDOUT;
ret = rdma_resolve_addr(queue->cm_id, src_addr,
(struct sockaddr *)&ctrl->addr,
- NVME_RDMA_CONNECT_TIMEOUT_MS);
+ NVME_RDMA_CM_TIMEOUT_MS);
if (ret) {
dev_info(ctrl->ctrl.device,
"rdma_resolve_addr failed (%d).\n", ret);
@@ -565,6 +630,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
out_destroy_cm_id:
rdma_destroy_id(queue->cm_id);
nvme_rdma_destroy_queue_ib(queue);
+out_destroy_mutex:
+ mutex_destroy(&queue->queue_lock);
return ret;
}
@@ -576,9 +643,10 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
{
- if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
- return;
- __nvme_rdma_stop_queue(queue);
+ mutex_lock(&queue->queue_lock);
+ if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
+ __nvme_rdma_stop_queue(queue);
+ mutex_unlock(&queue->queue_lock);
}
static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
@@ -586,8 +654,9 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
return;
- nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id);
+ nvme_rdma_destroy_queue_ib(queue);
+ mutex_destroy(&queue->queue_lock);
}
static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
@@ -609,11 +678,10 @@ static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl)
static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
{
struct nvme_rdma_queue *queue = &ctrl->queues[idx];
- bool poll = nvme_rdma_poll_queue(queue);
int ret;
if (idx)
- ret = nvmf_connect_io_queue(&ctrl->ctrl, idx, poll);
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, idx);
else
ret = nvmf_connect_admin_queue(&ctrl->ctrl);
@@ -628,11 +696,12 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
return ret;
}
-static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl)
+static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl,
+ int first, int last)
{
int i, ret = 0;
- for (i = 1; i < ctrl->ctrl.queue_count; i++) {
+ for (i = first; i < last; i++) {
ret = nvme_rdma_start_queue(ctrl, i);
if (ret)
goto out_stop_queues;
@@ -641,7 +710,7 @@ static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl)
return 0;
out_stop_queues:
- for (i--; i >= 1; i--)
+ for (i--; i >= first; i--)
nvme_rdma_stop_queue(&ctrl->queues[i]);
return ret;
}
@@ -665,10 +734,13 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
if (ret)
return ret;
- ctrl->ctrl.queue_count = nr_io_queues + 1;
- if (ctrl->ctrl.queue_count < 2)
- return 0;
+ if (nr_io_queues == 0) {
+ dev_err(ctrl->ctrl.device,
+ "unable to set any I/O queues\n");
+ return -ENOMEM;
+ }
+ ctrl->ctrl.queue_count = nr_io_queues + 1;
dev_info(ctrl->ctrl.device,
"creating %d I/O queues.\n", nr_io_queues);
@@ -716,58 +788,23 @@ out_free_queues:
return ret;
}
-static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
- bool admin)
+static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl)
{
- struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
- struct blk_mq_tag_set *set;
- int ret;
+ unsigned int cmd_size = sizeof(struct nvme_rdma_request) +
+ NVME_RDMA_DATA_SGL_SIZE;
- if (admin) {
- set = &ctrl->admin_tag_set;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_rdma_admin_mq_ops;
- set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- set->reserved_tags = 2; /* connect + keep-alive */
- set->numa_node = nctrl->numa_node;
- set->cmd_size = sizeof(struct nvme_rdma_request) +
- NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
- set->driver_data = ctrl;
- set->nr_hw_queues = 1;
- set->timeout = ADMIN_TIMEOUT;
- set->flags = BLK_MQ_F_NO_SCHED;
- } else {
- set = &ctrl->tag_set;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_rdma_mq_ops;
- set->queue_depth = nctrl->sqsize + 1;
- set->reserved_tags = 1; /* fabric connect */
- set->numa_node = nctrl->numa_node;
- set->flags = BLK_MQ_F_SHOULD_MERGE;
- set->cmd_size = sizeof(struct nvme_rdma_request) +
- NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
- set->driver_data = ctrl;
- set->nr_hw_queues = nctrl->queue_count - 1;
- set->timeout = NVME_IO_TIMEOUT;
- set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
- }
-
- ret = blk_mq_alloc_tag_set(set);
- if (ret)
- return ERR_PTR(ret);
+ if (ctrl->max_integrity_segments)
+ cmd_size += sizeof(struct nvme_rdma_sgl) +
+ NVME_RDMA_METADATA_SGL_SIZE;
- return set;
+ return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set,
+ &nvme_rdma_mq_ops, BLK_MQ_F_SHOULD_MERGE, cmd_size);
}
-static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
- bool remove)
+static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
{
- if (remove) {
- blk_cleanup_queue(ctrl->ctrl.admin_q);
- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
- blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
- }
if (ctrl->async_event_sqe.data) {
+ cancel_work_sync(&ctrl->ctrl.async_event_work);
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE);
ctrl->async_event_sqe.data = NULL;
@@ -778,6 +815,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool new)
{
+ bool pi_capable = false;
int error;
error = nvme_rdma_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
@@ -785,9 +823,15 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
return error;
ctrl->device = ctrl->queues[0].device;
- ctrl->ctrl.numa_node = dev_to_node(ctrl->device->dev->dma_device);
+ ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev);
- ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
+ /* T10-PI support */
+ if (ctrl->device->dev->attrs.kernel_cap_flags &
+ IBK_INTEGRITY_HANDOVER)
+ pi_capable = true;
+
+ ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev,
+ pi_capable);
/*
* Bind the async event SQE DMA mapping to the admin queue lifetime.
@@ -800,28 +844,19 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
goto out_free_queue;
if (new) {
- ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
- if (IS_ERR(ctrl->ctrl.admin_tagset)) {
- error = PTR_ERR(ctrl->ctrl.admin_tagset);
+ error = nvme_alloc_admin_tag_set(&ctrl->ctrl,
+ &ctrl->admin_tag_set, &nvme_rdma_admin_mq_ops,
+ BLK_MQ_F_NO_SCHED,
+ sizeof(struct nvme_rdma_request) +
+ NVME_RDMA_DATA_SGL_SIZE);
+ if (error)
goto out_free_async_qe;
- }
- ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.fabrics_q)) {
- error = PTR_ERR(ctrl->ctrl.fabrics_q);
- goto out_free_tagset;
- }
-
- ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.admin_q)) {
- error = PTR_ERR(ctrl->ctrl.admin_q);
- goto out_cleanup_fabrics_q;
- }
}
error = nvme_rdma_start_queue(ctrl, 0);
if (error)
- goto out_cleanup_queue;
+ goto out_remove_admin_tag_set;
error = nvme_enable_ctrl(&ctrl->ctrl);
if (error)
@@ -829,26 +864,28 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
ctrl->ctrl.max_segments = ctrl->max_fr_pages;
ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
+ if (pi_capable)
+ ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages;
+ else
+ ctrl->ctrl.max_integrity_segments = 0;
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_start_admin_queue(&ctrl->ctrl);
- error = nvme_init_identify(&ctrl->ctrl);
+ error = nvme_init_ctrl_finish(&ctrl->ctrl);
if (error)
- goto out_stop_queue;
+ goto out_quiesce_queue;
return 0;
+out_quiesce_queue:
+ nvme_stop_admin_queue(&ctrl->ctrl);
+ blk_sync_queue(ctrl->ctrl.admin_q);
out_stop_queue:
nvme_rdma_stop_queue(&ctrl->queues[0]);
-out_cleanup_queue:
- if (new)
- blk_cleanup_queue(ctrl->ctrl.admin_q);
-out_cleanup_fabrics_q:
- if (new)
- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
-out_free_tagset:
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
+out_remove_admin_tag_set:
if (new)
- blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
out_free_async_qe:
if (ctrl->async_event_sqe.data) {
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
@@ -860,53 +897,65 @@ out_free_queue:
return error;
}
-static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
- bool remove)
-{
- if (remove) {
- blk_cleanup_queue(ctrl->ctrl.connect_q);
- blk_mq_free_tag_set(ctrl->ctrl.tagset);
- }
- nvme_rdma_free_io_queues(ctrl);
-}
-
static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
{
- int ret;
+ int ret, nr_queues;
ret = nvme_rdma_alloc_io_queues(ctrl);
if (ret)
return ret;
if (new) {
- ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false);
- if (IS_ERR(ctrl->ctrl.tagset)) {
- ret = PTR_ERR(ctrl->ctrl.tagset);
+ ret = nvme_rdma_alloc_tag_set(&ctrl->ctrl);
+ if (ret)
goto out_free_io_queues;
- }
+ }
- ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
- if (IS_ERR(ctrl->ctrl.connect_q)) {
- ret = PTR_ERR(ctrl->ctrl.connect_q);
- goto out_free_tag_set;
+ /*
+ * Only start IO queues for which we have allocated the tagset
+ * and limitted it to the available queues. On reconnects, the
+ * queue number might have changed.
+ */
+ nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count);
+ ret = nvme_rdma_start_io_queues(ctrl, 1, nr_queues);
+ if (ret)
+ goto out_cleanup_tagset;
+
+ if (!new) {
+ nvme_start_queues(&ctrl->ctrl);
+ if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) {
+ /*
+ * If we timed out waiting for freeze we are likely to
+ * be stuck. Fail the controller initialization just
+ * to be safe.
+ */
+ ret = -ENODEV;
+ goto out_wait_freeze_timed_out;
}
- } else {
- blk_mq_update_nr_hw_queues(&ctrl->tag_set,
+ blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset,
ctrl->ctrl.queue_count - 1);
+ nvme_unfreeze(&ctrl->ctrl);
}
- ret = nvme_rdma_start_io_queues(ctrl);
+ /*
+ * If the number of queues has increased (reconnect case)
+ * start all new queues now.
+ */
+ ret = nvme_rdma_start_io_queues(ctrl, nr_queues,
+ ctrl->tag_set.nr_hw_queues + 1);
if (ret)
- goto out_cleanup_connect_q;
+ goto out_wait_freeze_timed_out;
return 0;
-out_cleanup_connect_q:
- if (new)
- blk_cleanup_queue(ctrl->ctrl.connect_q);
-out_free_tag_set:
+out_wait_freeze_timed_out:
+ nvme_stop_queues(&ctrl->ctrl);
+ nvme_sync_io_queues(&ctrl->ctrl);
+ nvme_rdma_stop_io_queues(ctrl);
+out_cleanup_tagset:
+ nvme_cancel_tagset(&ctrl->ctrl);
if (new)
- blk_mq_free_tag_set(ctrl->ctrl.tagset);
+ nvme_remove_io_tag_set(&ctrl->ctrl);
out_free_io_queues:
nvme_rdma_free_io_queues(ctrl);
return ret;
@@ -915,35 +964,42 @@ out_free_io_queues:
static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ nvme_stop_admin_queue(&ctrl->ctrl);
+ blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
- if (ctrl->ctrl.admin_tagset) {
- blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
- nvme_cancel_request, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(ctrl->ctrl.admin_tagset);
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
+ if (remove) {
+ nvme_start_admin_queue(&ctrl->ctrl);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
}
- if (remove)
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
- nvme_rdma_destroy_admin_queue(ctrl, remove);
+ nvme_rdma_destroy_admin_queue(ctrl);
}
static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
if (ctrl->ctrl.queue_count > 1) {
+ nvme_start_freeze(&ctrl->ctrl);
nvme_stop_queues(&ctrl->ctrl);
+ nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
- if (ctrl->ctrl.tagset) {
- blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
- nvme_cancel_request, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(ctrl->ctrl.tagset);
- }
- if (remove)
+ nvme_cancel_tagset(&ctrl->ctrl);
+ if (remove) {
nvme_start_queues(&ctrl->ctrl);
- nvme_rdma_destroy_io_queues(ctrl, remove);
+ nvme_remove_io_tag_set(&ctrl->ctrl);
+ }
+ nvme_rdma_free_io_queues(ctrl);
}
}
+static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
+
+ flush_work(&ctrl->err_work);
+ cancel_delayed_work_sync(&ctrl->reconnect_work);
+}
+
static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
@@ -982,7 +1038,7 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
{
- int ret = -EINVAL;
+ int ret;
bool changed;
ret = nvme_rdma_configure_admin_queue(ctrl, new);
@@ -990,11 +1046,13 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
return ret;
if (ctrl->ctrl.icdoff) {
+ ret = -EOPNOTSUPP;
dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
goto destroy_admin;
}
if (!(ctrl->ctrl.sgls & (1 << 2))) {
+ ret = -EOPNOTSUPP;
dev_err(ctrl->ctrl.device,
"Mandatory keyed sgls are not supported!\n");
goto destroy_admin;
@@ -1006,6 +1064,13 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
}
+ if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) {
+ dev_warn(ctrl->ctrl.device,
+ "ctrl sqsize %u > max queue size %u, clamping down\n",
+ ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE);
+ ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1;
+ }
+
if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
dev_warn(ctrl->ctrl.device,
"sqsize %u > ctrl maxcmd %u, clamping down\n",
@@ -1024,8 +1089,14 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
if (!changed) {
- /* state change failure is ok if we're in DELETING state */
- WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
+ /*
+ * state change failure is ok if we started ctrl delete,
+ * unless we're during creation of a new controller to
+ * avoid races with teardown flow.
+ */
+ WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
+ ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
+ WARN_ON_ONCE(new);
ret = -EINVAL;
goto destroy_io;
}
@@ -1034,11 +1105,23 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
return 0;
destroy_io:
- if (ctrl->ctrl.queue_count > 1)
- nvme_rdma_destroy_io_queues(ctrl, new);
+ if (ctrl->ctrl.queue_count > 1) {
+ nvme_stop_queues(&ctrl->ctrl);
+ nvme_sync_io_queues(&ctrl->ctrl);
+ nvme_rdma_stop_io_queues(ctrl);
+ nvme_cancel_tagset(&ctrl->ctrl);
+ if (new)
+ nvme_remove_io_tag_set(&ctrl->ctrl);
+ nvme_rdma_free_io_queues(ctrl);
+ }
destroy_admin:
+ nvme_stop_admin_queue(&ctrl->ctrl);
+ blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
- nvme_rdma_destroy_admin_queue(ctrl, new);
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
+ if (new)
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
+ nvme_rdma_destroy_admin_queue(ctrl);
return ret;
}
@@ -1070,15 +1153,18 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
struct nvme_rdma_ctrl *ctrl = container_of(work,
struct nvme_rdma_ctrl, err_work);
+ nvme_auth_stop(&ctrl->ctrl);
nvme_stop_keep_alive(&ctrl->ctrl);
+ flush_work(&ctrl->ctrl.async_event_work);
nvme_rdma_teardown_io_queues(ctrl, false);
nvme_start_queues(&ctrl->ctrl);
nvme_rdma_teardown_admin_queue(ctrl, false);
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_start_admin_queue(&ctrl->ctrl);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
- /* state change failure is ok if we're in DELETING state */
- WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
+ /* state change failure is ok if we started ctrl delete */
+ WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
+ ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
return;
}
@@ -1090,13 +1176,24 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
return;
+ dev_warn(ctrl->ctrl.device, "starting error recovery\n");
queue_work(nvme_reset_wq, &ctrl->err_work);
}
+static void nvme_rdma_end_request(struct nvme_rdma_request *req)
+{
+ struct request *rq = blk_mq_rq_from_pdu(req);
+
+ if (!refcount_dec_and_test(&req->ref))
+ return;
+ if (!nvme_try_complete_req(rq, req->status, req->result))
+ nvme_rdma_complete_rq(rq);
+}
+
static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
const char *op)
{
- struct nvme_rdma_queue *queue = cq->cq_context;
+ struct nvme_rdma_queue *queue = wc->qp->qp_context;
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
@@ -1117,16 +1214,11 @@ static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct nvme_rdma_request *req =
container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe);
- struct request *rq = blk_mq_rq_from_pdu(req);
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (unlikely(wc->status != IB_WC_SUCCESS))
nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
- return;
- }
-
- if (refcount_dec_and_test(&req->ref))
- nvme_end_request(rq, req->status, req->result);
-
+ else
+ nvme_rdma_end_request(req);
}
static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
@@ -1146,23 +1238,42 @@ static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
return ib_post_send(queue->qp, &wr, NULL);
}
+static void nvme_rdma_dma_unmap_req(struct ib_device *ibdev, struct request *rq)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+
+ if (blk_integrity_rq(rq)) {
+ ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
+ req->metadata_sgl->nents, rq_dma_dir(rq));
+ sg_free_table_chained(&req->metadata_sgl->sg_table,
+ NVME_INLINE_METADATA_SG_CNT);
+ }
+
+ ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
+ rq_dma_dir(rq));
+ sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
+}
+
static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
struct request *rq)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_device *dev = queue->device;
struct ib_device *ibdev = dev->dev;
+ struct list_head *pool = &queue->qp->rdma_mrs;
if (!blk_rq_nr_phys_segments(rq))
return;
+ if (req->use_sig_mr)
+ pool = &queue->qp->sig_mrs;
+
if (req->mr) {
- ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
+ ib_mr_pool_put(queue->qp, pool, req->mr);
req->mr = NULL;
}
- ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
- sg_free_table_chained(&req->sg_table, NVME_INLINE_SG_CNT);
+ nvme_rdma_dma_unmap_req(ibdev, rq);
}
static int nvme_rdma_set_sg_null(struct nvme_command *c)
@@ -1181,16 +1292,17 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
int count)
{
struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
- struct scatterlist *sgl = req->sg_table.sgl;
struct ib_sge *sge = &req->sge[1];
+ struct scatterlist *sgl;
u32 len = 0;
int i;
- for (i = 0; i < count; i++, sgl++, sge++) {
+ for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) {
sge->addr = sg_dma_address(sgl);
sge->length = sg_dma_len(sgl);
sge->lkey = queue->device->pd->local_dma_lkey;
len += sge->length;
+ sge++;
}
sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
@@ -1206,8 +1318,8 @@ static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue,
{
struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
- sg->addr = cpu_to_le64(sg_dma_address(req->sg_table.sgl));
- put_unaligned_le24(sg_dma_len(req->sg_table.sgl), sg->length);
+ sg->addr = cpu_to_le64(sg_dma_address(req->data_sgl.sg_table.sgl));
+ put_unaligned_le24(sg_dma_len(req->data_sgl.sg_table.sgl), sg->length);
put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key);
sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
return 0;
@@ -1228,7 +1340,8 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
* Align the MR to a 4K page size to match the ctrl page size and
* the block virtual boundary.
*/
- nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K);
+ nr = ib_map_mr_sg(req->mr, req->data_sgl.sg_table.sgl, count, NULL,
+ SZ_4K);
if (unlikely(nr < count)) {
ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
req->mr = NULL;
@@ -1259,12 +1372,185 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
return 0;
}
+static void nvme_rdma_set_sig_domain(struct blk_integrity *bi,
+ struct nvme_command *cmd, struct ib_sig_domain *domain,
+ u16 control, u8 pi_type)
+{
+ domain->sig_type = IB_SIG_TYPE_T10_DIF;
+ domain->sig.dif.bg_type = IB_T10DIF_CRC;
+ domain->sig.dif.pi_interval = 1 << bi->interval_exp;
+ domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag);
+ if (control & NVME_RW_PRINFO_PRCHK_REF)
+ domain->sig.dif.ref_remap = true;
+
+ domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag);
+ domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask);
+ domain->sig.dif.app_escape = true;
+ if (pi_type == NVME_NS_DPS_PI_TYPE3)
+ domain->sig.dif.ref_escape = true;
+}
+
+static void nvme_rdma_set_sig_attrs(struct blk_integrity *bi,
+ struct nvme_command *cmd, struct ib_sig_attrs *sig_attrs,
+ u8 pi_type)
+{
+ u16 control = le16_to_cpu(cmd->rw.control);
+
+ memset(sig_attrs, 0, sizeof(*sig_attrs));
+ if (control & NVME_RW_PRINFO_PRACT) {
+ /* for WRITE_INSERT/READ_STRIP no memory domain */
+ sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
+ nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
+ pi_type);
+ /* Clear the PRACT bit since HCA will generate/verify the PI */
+ control &= ~NVME_RW_PRINFO_PRACT;
+ cmd->rw.control = cpu_to_le16(control);
+ } else {
+ /* for WRITE_PASS/READ_PASS both wire/memory domains exist */
+ nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
+ pi_type);
+ nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
+ pi_type);
+ }
+}
+
+static void nvme_rdma_set_prot_checks(struct nvme_command *cmd, u8 *mask)
+{
+ *mask = 0;
+ if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_REF)
+ *mask |= IB_SIG_CHECK_REFTAG;
+ if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_GUARD)
+ *mask |= IB_SIG_CHECK_GUARD;
+}
+
+static void nvme_rdma_sig_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ nvme_rdma_wr_error(cq, wc, "SIG");
+}
+
+static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_request *req, struct nvme_command *c,
+ int count, int pi_count)
+{
+ struct nvme_rdma_sgl *sgl = &req->data_sgl;
+ struct ib_reg_wr *wr = &req->reg_wr;
+ struct request *rq = blk_mq_rq_from_pdu(req);
+ struct nvme_ns *ns = rq->q->queuedata;
+ struct bio *bio = rq->bio;
+ struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
+ int nr;
+
+ req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs);
+ if (WARN_ON_ONCE(!req->mr))
+ return -EAGAIN;
+
+ nr = ib_map_mr_sg_pi(req->mr, sgl->sg_table.sgl, count, NULL,
+ req->metadata_sgl->sg_table.sgl, pi_count, NULL,
+ SZ_4K);
+ if (unlikely(nr))
+ goto mr_put;
+
+ nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c,
+ req->mr->sig_attrs, ns->pi_type);
+ nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask);
+
+ ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
+
+ req->reg_cqe.done = nvme_rdma_sig_done;
+ memset(wr, 0, sizeof(*wr));
+ wr->wr.opcode = IB_WR_REG_MR_INTEGRITY;
+ wr->wr.wr_cqe = &req->reg_cqe;
+ wr->wr.num_sge = 0;
+ wr->wr.send_flags = 0;
+ wr->mr = req->mr;
+ wr->key = req->mr->rkey;
+ wr->access = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE;
+
+ sg->addr = cpu_to_le64(req->mr->iova);
+ put_unaligned_le24(req->mr->length, sg->length);
+ put_unaligned_le32(req->mr->rkey, sg->key);
+ sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
+
+ return 0;
+
+mr_put:
+ ib_mr_pool_put(queue->qp, &queue->qp->sig_mrs, req->mr);
+ req->mr = NULL;
+ if (nr < 0)
+ return nr;
+ return -EINVAL;
+}
+
+static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
+ int *count, int *pi_count)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ int ret;
+
+ req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1);
+ ret = sg_alloc_table_chained(&req->data_sgl.sg_table,
+ blk_rq_nr_phys_segments(rq), req->data_sgl.sg_table.sgl,
+ NVME_INLINE_SG_CNT);
+ if (ret)
+ return -ENOMEM;
+
+ req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
+ req->data_sgl.sg_table.sgl);
+
+ *count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
+ req->data_sgl.nents, rq_dma_dir(rq));
+ if (unlikely(*count <= 0)) {
+ ret = -EIO;
+ goto out_free_table;
+ }
+
+ if (blk_integrity_rq(rq)) {
+ req->metadata_sgl->sg_table.sgl =
+ (struct scatterlist *)(req->metadata_sgl + 1);
+ ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table,
+ blk_rq_count_integrity_sg(rq->q, rq->bio),
+ req->metadata_sgl->sg_table.sgl,
+ NVME_INLINE_METADATA_SG_CNT);
+ if (unlikely(ret)) {
+ ret = -ENOMEM;
+ goto out_unmap_sg;
+ }
+
+ req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
+ rq->bio, req->metadata_sgl->sg_table.sgl);
+ *pi_count = ib_dma_map_sg(ibdev,
+ req->metadata_sgl->sg_table.sgl,
+ req->metadata_sgl->nents,
+ rq_dma_dir(rq));
+ if (unlikely(*pi_count <= 0)) {
+ ret = -EIO;
+ goto out_free_pi_table;
+ }
+ }
+
+ return 0;
+
+out_free_pi_table:
+ sg_free_table_chained(&req->metadata_sgl->sg_table,
+ NVME_INLINE_METADATA_SG_CNT);
+out_unmap_sg:
+ ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
+ rq_dma_dir(rq));
+out_free_table:
+ sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
+ return ret;
+}
+
static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
struct request *rq, struct nvme_command *c)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_device *dev = queue->device;
struct ib_device *ibdev = dev->dev;
+ int pi_count = 0;
int count, ret;
req->num_sge = 1;
@@ -1275,20 +1561,13 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
if (!blk_rq_nr_phys_segments(rq))
return nvme_rdma_set_sg_null(c);
- req->sg_table.sgl = req->first_sgl;
- ret = sg_alloc_table_chained(&req->sg_table,
- blk_rq_nr_phys_segments(rq), req->sg_table.sgl,
- NVME_INLINE_SG_CNT);
- if (ret)
- return -ENOMEM;
-
- req->nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl);
+ ret = nvme_rdma_dma_map_req(ibdev, rq, &count, &pi_count);
+ if (unlikely(ret))
+ return ret;
- count = ib_dma_map_sg(ibdev, req->sg_table.sgl, req->nents,
- rq_dma_dir(rq));
- if (unlikely(count <= 0)) {
- ret = -EIO;
- goto out_free_table;
+ if (req->use_sig_mr) {
+ ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count);
+ goto out;
}
if (count <= dev->num_inline_segments) {
@@ -1309,14 +1588,12 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
ret = nvme_rdma_map_sg_fr(queue, req, c, count);
out:
if (unlikely(ret))
- goto out_unmap_sg;
+ goto out_dma_unmap_req;
return 0;
-out_unmap_sg:
- ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
-out_free_table:
- sg_free_table_chained(&req->sg_table, NVME_INLINE_SG_CNT);
+out_dma_unmap_req:
+ nvme_rdma_dma_unmap_req(ibdev, rq);
return ret;
}
@@ -1326,15 +1603,11 @@ static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
struct nvme_rdma_request *req =
container_of(qe, struct nvme_rdma_request, sqe);
- struct request *rq = blk_mq_rq_from_pdu(req);
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (unlikely(wc->status != IB_WC_SUCCESS))
nvme_rdma_wr_error(cq, wc, "SEND");
- return;
- }
-
- if (refcount_dec_and_test(&req->ref))
- nvme_end_request(rq, req->status, req->result);
+ else
+ nvme_rdma_end_request(req);
}
static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
@@ -1345,7 +1618,7 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
int ret;
sge->addr = qe->dma;
- sge->length = sizeof(struct nvme_command),
+ sge->length = sizeof(struct nvme_command);
sge->lkey = queue->device->pd->local_dma_lkey;
wr.next = NULL;
@@ -1442,10 +1715,10 @@ static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
struct request *rq;
struct nvme_rdma_request *req;
- rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
+ rq = nvme_find_rq(nvme_rdma_tagset(queue), cqe->command_id);
if (!rq) {
dev_err(queue->ctrl->ctrl.device,
- "tag 0x%x on QP %#x not found\n",
+ "got bad command_id %#x on QP %#x\n",
cqe->command_id, queue->qp->qp_num);
nvme_rdma_error_recovery(queue->ctrl);
return;
@@ -1456,10 +1729,11 @@ static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
req->result = cqe->result;
if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
- if (unlikely(wc->ex.invalidate_rkey != req->mr->rkey)) {
+ if (unlikely(!req->mr ||
+ wc->ex.invalidate_rkey != req->mr->rkey)) {
dev_err(queue->ctrl->ctrl.device,
"Bogus remote invalidation for rkey %#x\n",
- req->mr->rkey);
+ req->mr ? req->mr->rkey : 0);
nvme_rdma_error_recovery(queue->ctrl);
}
} else if (req->mr) {
@@ -1476,15 +1750,14 @@ static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
return;
}
- if (refcount_dec_and_test(&req->ref))
- nvme_end_request(rq, req->status, req->result);
+ nvme_rdma_end_request(req);
}
static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct nvme_rdma_qe *qe =
container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
- struct nvme_rdma_queue *queue = cq->cq_context;
+ struct nvme_rdma_queue *queue = wc->qp->qp_context;
struct ib_device *ibdev = queue->device->dev;
struct nvme_completion *cqe = qe->data;
const size_t len = sizeof(struct nvme_completion);
@@ -1494,6 +1767,14 @@ static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
return;
}
+ /* sanity checking for received data length */
+ if (unlikely(wc->byte_len < len)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "Unexpected nvme completion length(%d)\n", wc->byte_len);
+ nvme_rdma_error_recovery(queue->ctrl);
+ return;
+ }
+
ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE);
/*
* AEN requests are special as they don't time out and can
@@ -1519,14 +1800,10 @@ static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
for (i = 0; i < queue->queue_size; i++) {
ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
if (ret)
- goto out_destroy_queue_ib;
+ return ret;
}
return 0;
-
-out_destroy_queue_ib:
- nvme_rdma_destroy_queue_ib(queue);
- return ret;
}
static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
@@ -1566,7 +1843,7 @@ static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
if (ctrl->opts->tos >= 0)
rdma_set_service_type(queue->cm_id, ctrl->opts->tos);
- ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS);
+ ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CM_TIMEOUT_MS);
if (ret) {
dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n",
queue->cm_error);
@@ -1616,18 +1893,14 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
}
- ret = rdma_connect(queue->cm_id, &param);
+ ret = rdma_connect_locked(queue->cm_id, &param);
if (ret) {
dev_err(ctrl->ctrl.device,
- "rdma_connect failed (%d).\n", ret);
- goto out_destroy_queue_ib;
+ "rdma_connect_locked failed (%d).\n", ret);
+ return ret;
}
return 0;
-
-out_destroy_queue_ib:
- nvme_rdma_destroy_queue_ib(queue);
- return ret;
}
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
@@ -1653,14 +1926,11 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
complete(&queue->cm_done);
return 0;
case RDMA_CM_EVENT_REJECTED:
- nvme_rdma_destroy_queue_ib(queue);
cm_error = nvme_rdma_conn_rejected(queue, ev);
break;
case RDMA_CM_EVENT_ROUTE_ERROR:
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
- nvme_rdma_destroy_queue_ib(queue);
- /* fall through */
case RDMA_CM_EVENT_ADDR_ERROR:
dev_dbg(queue->ctrl->ctrl.device,
"CM error event %d\n", ev->event);
@@ -1691,8 +1961,16 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
return 0;
}
-static enum blk_eh_timer_return
-nvme_rdma_timeout(struct request *rq, bool reserved)
+static void nvme_rdma_complete_timed_out(struct request *rq)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_queue *queue = req->queue;
+
+ nvme_rdma_stop_queue(queue);
+ nvmf_complete_timed_out_request(rq);
+}
+
+static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_queue *queue = req->queue;
@@ -1701,29 +1979,29 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
rq->tag, nvme_rdma_queue_idx(queue));
- /*
- * Restart the timer if a controller reset is already scheduled. Any
- * timed out commands would be handled before entering the connecting
- * state.
- */
- if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
- return BLK_EH_RESET_TIMER;
-
if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
/*
- * Teardown immediately if controller times out while starting
- * or we are already started error recovery. all outstanding
- * requests are completed on shutdown, so we return BLK_EH_DONE.
+ * If we are resetting, connecting or deleting we should
+ * complete immediately because we may block controller
+ * teardown or setup sequence
+ * - ctrl disable/shutdown fabrics requests
+ * - connect requests
+ * - initialization admin requests
+ * - I/O requests that entered after unquiescing and
+ * the controller stopped responding
+ *
+ * All other requests should be cancelled by the error
+ * recovery work, so it's fine that we fail it here.
*/
- flush_work(&ctrl->err_work);
- nvme_rdma_teardown_io_queues(ctrl, false);
- nvme_rdma_teardown_admin_queue(ctrl, false);
+ nvme_rdma_complete_timed_out(rq);
return BLK_EH_DONE;
}
- dev_warn(ctrl->ctrl.device, "starting error recovery\n");
+ /*
+ * LIVE state should trigger the normal error recovery which will
+ * handle completing this request.
+ */
nvme_rdma_error_recovery(ctrl);
-
return BLK_EH_RESET_TIMER;
}
@@ -1735,7 +2013,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
struct request *rq = bd->rq;
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_qe *sqe = &req->sqe;
- struct nvme_command *c = sqe->data;
+ struct nvme_command *c = nvme_req(rq)->cmd;
struct ib_device *dev;
bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags);
blk_status_t ret;
@@ -1743,8 +2021,8 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
WARN_ON_ONCE(rq->tag < 0);
- if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
- return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
+ if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
+ return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
dev = queue->device->dev;
@@ -1758,12 +2036,21 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
ib_dma_sync_single_for_cpu(dev, sqe->dma,
sizeof(struct nvme_command), DMA_TO_DEVICE);
- ret = nvme_setup_cmd(ns, rq, c);
+ ret = nvme_setup_cmd(ns, rq);
if (ret)
goto unmap_qe;
blk_mq_start_request(rq);
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
+ queue->pi_support &&
+ (c->common.opcode == nvme_cmd_write ||
+ c->common.opcode == nvme_cmd_read) &&
+ nvme_ns_has_pi(ns))
+ req->use_sig_mr = true;
+ else
+ req->use_sig_mr = false;
+
err = nvme_rdma_map_data(queue, rq, c);
if (unlikely(err < 0)) {
dev_err(queue->ctrl->ctrl.device,
@@ -1786,7 +2073,9 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
err_unmap:
nvme_rdma_unmap_data(queue, rq);
err:
- if (err == -ENOMEM || err == -EAGAIN)
+ if (err == -EIO)
+ ret = nvme_host_path_error(rq);
+ else if (err == -ENOMEM || err == -EAGAIN)
ret = BLK_STS_RESOURCE;
else
ret = BLK_STS_IOERR;
@@ -1797,28 +2086,62 @@ unmap_qe:
return ret;
}
-static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx)
+static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct nvme_rdma_queue *queue = hctx->driver_data;
return ib_process_cq_direct(queue->ib_cq, -1);
}
+static void nvme_rdma_check_pi_status(struct nvme_rdma_request *req)
+{
+ struct request *rq = blk_mq_rq_from_pdu(req);
+ struct ib_mr_status mr_status;
+ int ret;
+
+ ret = ib_check_mr_status(req->mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
+ if (ret) {
+ pr_err("ib_check_mr_status failed, ret %d\n", ret);
+ nvme_req(rq)->status = NVME_SC_INVALID_PI;
+ return;
+ }
+
+ if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
+ switch (mr_status.sig_err.err_type) {
+ case IB_SIG_BAD_GUARD:
+ nvme_req(rq)->status = NVME_SC_GUARD_CHECK;
+ break;
+ case IB_SIG_BAD_REFTAG:
+ nvme_req(rq)->status = NVME_SC_REFTAG_CHECK;
+ break;
+ case IB_SIG_BAD_APPTAG:
+ nvme_req(rq)->status = NVME_SC_APPTAG_CHECK;
+ break;
+ }
+ pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n",
+ mr_status.sig_err.err_type, mr_status.sig_err.expected,
+ mr_status.sig_err.actual);
+ }
+}
+
static void nvme_rdma_complete_rq(struct request *rq)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_queue *queue = req->queue;
struct ib_device *ibdev = queue->device->dev;
+ if (req->use_sig_mr)
+ nvme_rdma_check_pi_status(req);
+
nvme_rdma_unmap_data(queue, rq);
ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command),
DMA_TO_DEVICE);
nvme_complete_rq(rq);
}
-static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
+static void nvme_rdma_map_queues(struct blk_mq_tag_set *set)
{
- struct nvme_rdma_ctrl *ctrl = set->driver_data;
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
@@ -1859,8 +2182,6 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
ctrl->io_queues[HCTX_TYPE_DEFAULT],
ctrl->io_queues[HCTX_TYPE_READ],
ctrl->io_queues[HCTX_TYPE_POLL]);
-
- return 0;
}
static const struct blk_mq_ops nvme_rdma_mq_ops = {
@@ -1885,11 +2206,8 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
{
- cancel_work_sync(&ctrl->err_work);
- cancel_delayed_work_sync(&ctrl->reconnect_work);
-
nvme_rdma_teardown_io_queues(ctrl, shutdown);
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ nvme_stop_admin_queue(&ctrl->ctrl);
if (shutdown)
nvme_shutdown_ctrl(&ctrl->ctrl);
else
@@ -1929,7 +2247,7 @@ out_fail:
static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
.name = "rdma",
.module = THIS_MODULE,
- .flags = NVME_F_FABRICS,
+ .flags = NVME_F_FABRICS | NVME_F_METADATA_SUPPORTED,
.reg_read32 = nvmf_reg_read32,
.reg_read64 = nvmf_reg_read64,
.reg_write32 = nvmf_reg_write32,
@@ -1937,6 +2255,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
.submit_async_event = nvme_rdma_submit_async_event,
.delete_ctrl = nvme_rdma_delete_ctrl,
.get_address = nvmf_get_address,
+ .stop_ctrl = nvme_rdma_stop_ctrl,
};
/*
@@ -2043,9 +2362,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
goto out_uninit_ctrl;
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
- ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
-
- nvme_get_ctrl(&ctrl->ctrl);
+ nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
mutex_lock(&nvme_rdma_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 49d4373b84eb..9b47dcb2a7d9 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -20,6 +20,54 @@
struct nvme_tcp_queue;
+/* Define the socket priority to use for connections were it is desirable
+ * that the NIC consider performing optimized packet processing or filtering.
+ * A non-zero value being sufficient to indicate general consideration of any
+ * possible optimization. Making it a module param allows for alternative
+ * values that may be unique for some NIC implementations.
+ */
+static int so_priority;
+module_param(so_priority, int, 0644);
+MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+/* lockdep can detect a circular dependency of the form
+ * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
+ * because dependencies are tracked for both nvme-tcp and user contexts. Using
+ * a separate class prevents lockdep from conflating nvme-tcp socket use with
+ * user-space socket API use.
+ */
+static struct lock_class_key nvme_tcp_sk_key[2];
+static struct lock_class_key nvme_tcp_slock_key[2];
+
+static void nvme_tcp_reclassify_socket(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
+ return;
+
+ switch (sk->sk_family) {
+ case AF_INET:
+ sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME",
+ &nvme_tcp_slock_key[0],
+ "sk_lock-AF_INET-NVME",
+ &nvme_tcp_sk_key[0]);
+ break;
+ case AF_INET6:
+ sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME",
+ &nvme_tcp_slock_key[1],
+ "sk_lock-AF_INET6-NVME",
+ &nvme_tcp_sk_key[1]);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+#else
+static void nvme_tcp_reclassify_socket(struct socket *sock) { }
+#endif
+
enum nvme_tcp_send_state {
NVME_TCP_SEND_CMD_PDU = 0,
NVME_TCP_SEND_H2C_PDU,
@@ -34,8 +82,12 @@ struct nvme_tcp_request {
u32 data_len;
u32 pdu_len;
u32 pdu_sent;
+ u32 h2cdata_left;
+ u32 h2cdata_offset;
u16 ttag;
+ __le16 status;
struct list_head entry;
+ struct llist_node lentry;
__le32 ddgst;
struct bio *curr_bio;
@@ -50,6 +102,7 @@ struct nvme_tcp_request {
enum nvme_tcp_queue_flags {
NVME_TCP_Q_ALLOCATED = 0,
NVME_TCP_Q_LIVE = 1,
+ NVME_TCP_Q_POLLING = 2,
};
enum nvme_tcp_recv_state {
@@ -64,7 +117,9 @@ struct nvme_tcp_queue {
struct work_struct io_work;
int io_cpu;
- spinlock_t lock;
+ struct mutex queue_lock;
+ struct mutex send_mutex;
+ struct llist_head req_list;
struct list_head send_list;
/* recv state */
@@ -78,7 +133,7 @@ struct nvme_tcp_queue {
/* send state */
struct nvme_tcp_request *request;
- int queue_size;
+ u32 maxh2cdata;
size_t cmnd_capsule_len;
struct nvme_tcp_ctrl *ctrl;
unsigned long flags;
@@ -119,8 +174,9 @@ struct nvme_tcp_ctrl {
static LIST_HEAD(nvme_tcp_ctrl_list);
static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
static struct workqueue_struct *nvme_tcp_wq;
-static struct blk_mq_ops nvme_tcp_mq_ops;
-static struct blk_mq_ops nvme_tcp_admin_mq_ops;
+static const struct blk_mq_ops nvme_tcp_mq_ops;
+static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
+static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
{
@@ -151,9 +207,11 @@ static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
}
-static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
+static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
{
- return queue->cmnd_capsule_len - sizeof(struct nvme_command);
+ if (nvme_is_fabrics(req->req.cmd))
+ return NVME_TCP_ADMIN_CCSZ;
+ return req->queue->cmnd_capsule_len - sizeof(struct nvme_command);
}
static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
@@ -164,16 +222,14 @@ static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
{
struct request *rq;
- unsigned int bytes;
if (unlikely(nvme_tcp_async_req(req)))
return false; /* async events don't have a request */
rq = blk_mq_rq_from_pdu(req);
- bytes = blk_rq_payload_bytes(rq);
- return rq_data_dir(rq) == WRITE && bytes &&
- bytes <= nvme_tcp_inline_data_size(req->queue);
+ return rq_data_dir(rq) == WRITE && req->data_len &&
+ req->data_len <= nvme_tcp_inline_data_size(req);
}
static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
@@ -188,15 +244,10 @@ static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
{
- return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
+ return min_t(size_t, iov_iter_single_seg_count(&req->iter),
req->pdu_len - req->pdu_sent);
}
-static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
-{
- return req->iter.iov_offset;
-}
-
static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
{
return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
@@ -215,24 +266,29 @@ static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
struct request *rq = blk_mq_rq_from_pdu(req);
struct bio_vec *vec;
unsigned int size;
- int nsegs;
+ int nr_bvec;
size_t offset;
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
vec = &rq->special_vec;
- nsegs = 1;
+ nr_bvec = 1;
size = blk_rq_payload_bytes(rq);
offset = 0;
} else {
struct bio *bio = req->curr_bio;
+ struct bvec_iter bi;
+ struct bio_vec bv;
vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
- nsegs = bio_segments(bio);
+ nr_bvec = 0;
+ bio_for_each_bvec(bv, bio, bi) {
+ nr_bvec++;
+ }
size = bio->bi_iter.bi_size;
offset = bio->bi_iter.bi_bvec_done;
}
- iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
+ iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
req->iter.iov_offset = offset;
}
@@ -249,15 +305,55 @@ static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
}
}
-static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req)
+static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
+{
+ int ret;
+
+ /* drain the send queue as much as we can... */
+ do {
+ ret = nvme_tcp_try_send(queue);
+ } while (ret > 0);
+}
+
+static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+{
+ return !list_empty(&queue->send_list) ||
+ !llist_empty(&queue->req_list);
+}
+
+static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
+ bool sync, bool last)
{
struct nvme_tcp_queue *queue = req->queue;
+ bool empty;
- spin_lock(&queue->lock);
- list_add_tail(&req->entry, &queue->send_list);
- spin_unlock(&queue->lock);
+ empty = llist_add(&req->lentry, &queue->req_list) &&
+ list_empty(&queue->send_list) && !queue->request;
- queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+ /*
+ * if we're the first on the send_list and we can try to send
+ * directly, otherwise queue io_work. Also, only do that if we
+ * are on the same cpu, so we don't introduce contention.
+ */
+ if (queue->io_cpu == raw_smp_processor_id() &&
+ sync && empty && mutex_trylock(&queue->send_mutex)) {
+ nvme_tcp_send_all(queue);
+ mutex_unlock(&queue->send_mutex);
+ }
+
+ if (last && nvme_tcp_queue_more(queue))
+ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+}
+
+static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_request *req;
+ struct llist_node *node;
+
+ for (node = llist_del_all(&queue->req_list); node; node = node->next) {
+ req = llist_entry(node, struct nvme_tcp_request, lentry);
+ list_add(&req->entry, &queue->send_list);
+ }
}
static inline struct nvme_tcp_request *
@@ -265,13 +361,17 @@ nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
{
struct nvme_tcp_request *req;
- spin_lock(&queue->lock);
req = list_first_entry_or_null(&queue->send_list,
struct nvme_tcp_request, entry);
- if (req)
- list_del(&req->entry);
- spin_unlock(&queue->lock);
+ if (!req) {
+ nvme_tcp_process_req_list(queue);
+ req = list_first_entry_or_null(&queue->send_list,
+ struct nvme_tcp_request, entry);
+ if (unlikely(!req))
+ return NULL;
+ }
+ list_del(&req->entry);
return req;
}
@@ -287,7 +387,7 @@ static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
{
struct scatterlist sg;
- sg_init_marker(&sg, 1);
+ sg_init_table(&sg, 1);
sg_set_page(&sg, page, len, off);
ahash_request_set_crypt(hash, &sg, NULL, len);
crypto_ahash_update(hash);
@@ -362,8 +462,9 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx,
unsigned int numa_node)
{
- struct nvme_tcp_ctrl *ctrl = set->driver_data;
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_tcp_cmd_pdu *pdu;
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
u8 hdgst = nvme_tcp_hdgst_len(queue);
@@ -374,8 +475,10 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
if (!req->pdu)
return -ENOMEM;
+ pdu = req->pdu;
req->queue = queue;
nvme_req(rq)->ctrl = &ctrl->ctrl;
+ nvme_req(rq)->cmd = &pdu->cmd;
return 0;
}
@@ -383,7 +486,7 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_tcp_ctrl *ctrl = data;
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
hctx->driver_data = queue;
@@ -393,7 +496,7 @@ static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_tcp_ctrl *ctrl = data;
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
struct nvme_tcp_queue *queue = &ctrl->queues[0];
hctx->driver_data = queue;
@@ -422,24 +525,31 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
return;
+ dev_warn(ctrl->device, "starting error recovery\n");
queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
}
static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
struct nvme_completion *cqe)
{
+ struct nvme_tcp_request *req;
struct request *rq;
- rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
+ rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
if (!rq) {
dev_err(queue->ctrl->ctrl.device,
- "queue %d tag 0x%x not found\n",
- nvme_tcp_queue_id(queue), cqe->command_id);
+ "got bad cqe.command_id %#x on queue %d\n",
+ cqe->command_id, nvme_tcp_queue_id(queue));
nvme_tcp_error_recovery(&queue->ctrl->ctrl);
return -EINVAL;
}
- nvme_end_request(rq, cqe->status, cqe->result);
+ req = blk_mq_rq_to_pdu(rq);
+ if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
+ req->status = cqe->status;
+
+ if (!nvme_try_complete_req(rq, req->status, cqe->result))
+ nvme_complete_rq(rq);
queue->nr_cqe++;
return 0;
@@ -450,11 +560,11 @@ static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
{
struct request *rq;
- rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
+ rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
if (!rq) {
dev_err(queue->ctrl->ctrl.device,
- "queue %d tag %#x not found\n",
- nvme_tcp_queue_id(queue), pdu->command_id);
+ "got bad c2hdata.command_id %#x on queue %d\n",
+ pdu->command_id, nvme_tcp_queue_id(queue));
return -ENOENT;
}
@@ -501,37 +611,26 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
return ret;
}
-static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
- struct nvme_tcp_r2t_pdu *pdu)
+static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
{
struct nvme_tcp_data_pdu *data = req->pdu;
struct nvme_tcp_queue *queue = req->queue;
struct request *rq = blk_mq_rq_from_pdu(req);
+ u32 h2cdata_sent = req->pdu_len;
u8 hdgst = nvme_tcp_hdgst_len(queue);
u8 ddgst = nvme_tcp_ddgst_len(queue);
- req->pdu_len = le32_to_cpu(pdu->r2t_length);
+ req->state = NVME_TCP_SEND_H2C_PDU;
+ req->offset = 0;
+ req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata);
req->pdu_sent = 0;
-
- if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
- dev_err(queue->ctrl->ctrl.device,
- "req %d r2t len %u exceeded data len %u (%zu sent)\n",
- rq->tag, req->pdu_len, req->data_len,
- req->data_sent);
- return -EPROTO;
- }
-
- if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
- dev_err(queue->ctrl->ctrl.device,
- "req %d unexpected r2t offset %u (expected %zu)\n",
- rq->tag, le32_to_cpu(pdu->r2t_offset),
- req->data_sent);
- return -EPROTO;
- }
+ req->h2cdata_left -= req->pdu_len;
+ req->h2cdata_offset += h2cdata_sent;
memset(data, 0, sizeof(*data));
data->hdr.type = nvme_tcp_h2c_data;
- data->hdr.flags = NVME_TCP_F_DATA_LAST;
+ if (!req->h2cdata_left)
+ data->hdr.flags = NVME_TCP_F_DATA_LAST;
if (queue->hdr_digest)
data->hdr.flags |= NVME_TCP_F_HDGST;
if (queue->data_digest)
@@ -540,11 +639,10 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
data->hdr.pdo = data->hdr.hlen + hdgst;
data->hdr.plen =
cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
- data->ttag = pdu->ttag;
- data->command_id = rq->tag;
- data->data_offset = cpu_to_le32(req->data_sent);
+ data->ttag = req->ttag;
+ data->command_id = nvme_cid(rq);
+ data->data_offset = cpu_to_le32(req->h2cdata_offset);
data->data_length = cpu_to_le32(req->pdu_len);
- return 0;
}
static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
@@ -552,25 +650,46 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
{
struct nvme_tcp_request *req;
struct request *rq;
- int ret;
+ u32 r2t_length = le32_to_cpu(pdu->r2t_length);
+ u32 r2t_offset = le32_to_cpu(pdu->r2t_offset);
- rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
+ rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
if (!rq) {
dev_err(queue->ctrl->ctrl.device,
- "queue %d tag %#x not found\n",
- nvme_tcp_queue_id(queue), pdu->command_id);
+ "got bad r2t.command_id %#x on queue %d\n",
+ pdu->command_id, nvme_tcp_queue_id(queue));
return -ENOENT;
}
req = blk_mq_rq_to_pdu(rq);
- ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
- if (unlikely(ret))
- return ret;
+ if (unlikely(!r2t_length)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "req %d r2t len is %u, probably a bug...\n",
+ rq->tag, r2t_length);
+ return -EPROTO;
+ }
- req->state = NVME_TCP_SEND_H2C_PDU;
- req->offset = 0;
+ if (unlikely(req->data_sent + r2t_length > req->data_len)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "req %d r2t len %u exceeded data len %u (%zu sent)\n",
+ rq->tag, r2t_length, req->data_len, req->data_sent);
+ return -EPROTO;
+ }
+
+ if (unlikely(r2t_offset < req->data_sent)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "req %d unexpected r2t offset %u (expected %zu)\n",
+ rq->tag, r2t_offset, req->data_sent);
+ return -EPROTO;
+ }
+
+ req->pdu_len = 0;
+ req->h2cdata_left = r2t_length;
+ req->h2cdata_offset = r2t_offset;
+ req->ttag = pdu->ttag;
- nvme_tcp_queue_request(req);
+ nvme_tcp_setup_h2c_data_pdu(req);
+ nvme_tcp_queue_request(req, false, true);
return 0;
}
@@ -629,24 +748,17 @@ static inline void nvme_tcp_end_request(struct request *rq, u16 status)
{
union nvme_result res = {};
- nvme_end_request(rq, cpu_to_le16(status << 1), res);
+ if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
+ nvme_complete_rq(rq);
}
static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
unsigned int *offset, size_t *len)
{
struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
- struct nvme_tcp_request *req;
- struct request *rq;
-
- rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
- if (!rq) {
- dev_err(queue->ctrl->ctrl.device,
- "queue %d tag %#x not found\n",
- nvme_tcp_queue_id(queue), pdu->command_id);
- return -ENOENT;
- }
- req = blk_mq_rq_to_pdu(rq);
+ struct request *rq =
+ nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
while (true) {
int recv_len, ret;
@@ -700,7 +812,8 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
} else {
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
- nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
+ nvme_tcp_end_request(rq,
+ le16_to_cpu(req->status));
queue->nr_cqe++;
}
nvme_tcp_init_recv_ctx(queue);
@@ -730,18 +843,24 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
return 0;
if (queue->recv_ddgst != queue->exp_ddgst) {
+ struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
+ pdu->command_id);
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+
+ req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
+
dev_err(queue->ctrl->ctrl.device,
"data digest error: recv %#x expected %#x\n",
le32_to_cpu(queue->recv_ddgst),
le32_to_cpu(queue->exp_ddgst));
- return -EIO;
}
if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
- struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
- pdu->command_id);
+ struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
+ pdu->command_id);
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
- nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
+ nvme_tcp_end_request(rq, le16_to_cpu(req->status));
queue->nr_cqe++;
}
@@ -786,11 +905,12 @@ static void nvme_tcp_data_ready(struct sock *sk)
{
struct nvme_tcp_queue *queue;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
queue = sk->sk_user_data;
- if (likely(queue && queue->rd_enabled))
+ if (likely(queue && queue->rd_enabled) &&
+ !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
static void nvme_tcp_write_space(struct sock *sk)
@@ -810,7 +930,7 @@ static void nvme_tcp_state_change(struct sock *sk)
{
struct nvme_tcp_queue *queue;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
queue = sk->sk_user_data;
if (!queue)
goto done;
@@ -821,7 +941,6 @@ static void nvme_tcp_state_change(struct sock *sk)
case TCP_LAST_ACK:
case TCP_FIN_WAIT1:
case TCP_FIN_WAIT2:
- /* fallthrough */
nvme_tcp_error_recovery(&queue->ctrl->ctrl);
break;
default:
@@ -832,7 +951,7 @@ static void nvme_tcp_state_change(struct sock *sk)
queue->state_change(sk);
done:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
@@ -842,42 +961,59 @@ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
{
- nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
+ if (nvme_tcp_async_req(req)) {
+ union nvme_result res = {};
+
+ nvme_complete_async_event(&req->queue->ctrl->ctrl,
+ cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
+ } else {
+ nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
+ NVME_SC_HOST_PATH_ERROR);
+ }
}
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
{
struct nvme_tcp_queue *queue = req->queue;
+ int req_data_len = req->data_len;
+ u32 h2cdata_left = req->h2cdata_left;
while (true) {
struct page *page = nvme_tcp_req_cur_page(req);
size_t offset = nvme_tcp_req_cur_offset(req);
size_t len = nvme_tcp_req_cur_length(req);
bool last = nvme_tcp_pdu_last_send(req, len);
+ int req_data_sent = req->data_sent;
int ret, flags = MSG_DONTWAIT;
- if (last && !queue->data_digest)
+ if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
flags |= MSG_EOR;
else
- flags |= MSG_MORE;
+ flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
- /* can't zcopy slab pages */
- if (unlikely(PageSlab(page))) {
- ret = sock_no_sendpage(queue->sock, page, offset, len,
+ if (sendpage_ok(page)) {
+ ret = kernel_sendpage(queue->sock, page, offset, len,
flags);
} else {
- ret = kernel_sendpage(queue->sock, page, offset, len,
+ ret = sock_no_sendpage(queue->sock, page, offset, len,
flags);
}
if (ret <= 0)
return ret;
- nvme_tcp_advance_req(req, ret);
if (queue->data_digest)
nvme_tcp_ddgst_update(queue->snd_hash, page,
offset, ret);
- /* fully successful last write*/
+ /*
+ * update the request iterator except for the last payload send
+ * in the request where we don't want to modify it as we may
+ * compete with the RX path completing the request.
+ */
+ if (req_data_sent + ret < req_data_len)
+ nvme_tcp_advance_req(req, ret);
+
+ /* fully successful last send in current PDU */
if (last && ret == len) {
if (queue->data_digest) {
nvme_tcp_ddgst_final(queue->snd_hash,
@@ -885,7 +1021,10 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
req->state = NVME_TCP_SEND_DDGST;
req->offset = 0;
} else {
- nvme_tcp_done_send_req(queue);
+ if (h2cdata_left)
+ nvme_tcp_setup_h2c_data_pdu(req);
+ else
+ nvme_tcp_done_send_req(queue);
}
return 1;
}
@@ -898,11 +1037,16 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
struct nvme_tcp_queue *queue = req->queue;
struct nvme_tcp_cmd_pdu *pdu = req->pdu;
bool inline_data = nvme_tcp_has_inline_data(req);
- int flags = MSG_DONTWAIT | (inline_data ? MSG_MORE : MSG_EOR);
u8 hdgst = nvme_tcp_hdgst_len(queue);
int len = sizeof(*pdu) + hdgst - req->offset;
+ int flags = MSG_DONTWAIT;
int ret;
+ if (inline_data || nvme_tcp_queue_more(queue))
+ flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
+ else
+ flags |= MSG_EOR;
+
if (queue->hdr_digest && !req->offset)
nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
@@ -917,7 +1061,6 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
req->state = NVME_TCP_SEND_DATA;
if (queue->data_digest)
crypto_ahash_init(queue->snd_hash);
- nvme_tcp_init_iter(req, WRITE);
} else {
nvme_tcp_done_send_req(queue);
}
@@ -939,9 +1082,14 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
if (queue->hdr_digest && !req->offset)
nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
- ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
- offset_in_page(pdu) + req->offset, len,
- MSG_DONTWAIT | MSG_MORE);
+ if (!req->h2cdata_left)
+ ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
+ offset_in_page(pdu) + req->offset, len,
+ MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
+ else
+ ret = sock_no_sendpage(queue->sock, virt_to_page(pdu),
+ offset_in_page(pdu) + req->offset, len,
+ MSG_DONTWAIT | MSG_MORE);
if (unlikely(ret <= 0))
return ret;
@@ -950,8 +1098,6 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
req->state = NVME_TCP_SEND_DATA;
if (queue->data_digest)
crypto_ahash_init(queue->snd_hash);
- if (!req->data_sent)
- nvme_tcp_init_iter(req, WRITE);
return 1;
}
req->offset += ret;
@@ -962,19 +1108,29 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
{
struct nvme_tcp_queue *queue = req->queue;
+ size_t offset = req->offset;
+ u32 h2cdata_left = req->h2cdata_left;
int ret;
- struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
struct kvec iov = {
- .iov_base = &req->ddgst + req->offset,
+ .iov_base = (u8 *)&req->ddgst + req->offset,
.iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
};
+ if (nvme_tcp_queue_more(queue))
+ msg.msg_flags |= MSG_MORE;
+ else
+ msg.msg_flags |= MSG_EOR;
+
ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
if (unlikely(ret <= 0))
return ret;
- if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
- nvme_tcp_done_send_req(queue);
+ if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
+ if (h2cdata_left)
+ nvme_tcp_setup_h2c_data_pdu(req);
+ else
+ nvme_tcp_done_send_req(queue);
return 1;
}
@@ -985,6 +1141,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
{
struct nvme_tcp_request *req;
+ unsigned int noreclaim_flag;
int ret = 1;
if (!queue->request) {
@@ -994,12 +1151,13 @@ static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
}
req = queue->request;
+ noreclaim_flag = memalloc_noreclaim_save();
if (req->state == NVME_TCP_SEND_CMD_PDU) {
ret = nvme_tcp_try_send_cmd_pdu(req);
if (ret <= 0)
goto done;
if (!nvme_tcp_has_inline_data(req))
- return ret;
+ goto out;
}
if (req->state == NVME_TCP_SEND_H2C_PDU) {
@@ -1017,8 +1175,16 @@ static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
if (req->state == NVME_TCP_SEND_DDGST)
ret = nvme_tcp_try_send_ddgst(req);
done:
- if (ret == -EAGAIN)
+ if (ret == -EAGAIN) {
ret = 0;
+ } else if (ret < 0) {
+ dev_err(queue->ctrl->ctrl.device,
+ "failed to send request %d\n", ret);
+ nvme_tcp_fail_request(queue->request);
+ nvme_tcp_done_send_req(queue);
+ }
+out:
+ memalloc_noreclaim_restore(noreclaim_flag);
return ret;
}
@@ -1048,28 +1214,22 @@ static void nvme_tcp_io_work(struct work_struct *w)
bool pending = false;
int result;
- result = nvme_tcp_try_send(queue);
- if (result > 0) {
- pending = true;
- } else if (unlikely(result < 0)) {
- dev_err(queue->ctrl->ctrl.device,
- "failed to send request %d\n", result);
-
- /*
- * Fail the request unless peer closed the connection,
- * in which case error recovery flow will complete all.
- */
- if ((result != -EPIPE) && (result != -ECONNRESET))
- nvme_tcp_fail_request(queue->request);
- nvme_tcp_done_send_req(queue);
- return;
+ if (mutex_trylock(&queue->send_mutex)) {
+ result = nvme_tcp_try_send(queue);
+ mutex_unlock(&queue->send_mutex);
+ if (result > 0)
+ pending = true;
+ else if (unlikely(result < 0))
+ break;
}
result = nvme_tcp_try_recv(queue);
if (result > 0)
pending = true;
+ else if (unlikely(result < 0))
+ return;
- if (!pending)
+ if (!pending || !queue->rd_enabled)
return;
} while (!time_after(jiffies, deadline)); /* quota is exhausted */
@@ -1137,8 +1297,10 @@ static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
{
+ struct page *page;
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
+ unsigned int noreclaim_flag;
if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
return;
@@ -1146,8 +1308,19 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
if (queue->hdr_digest || queue->data_digest)
nvme_tcp_free_crypto(queue);
+ if (queue->pf_cache.va) {
+ page = virt_to_head_page(queue->pf_cache.va);
+ __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
+ queue->pf_cache.va = NULL;
+ }
+
+ noreclaim_flag = memalloc_noreclaim_save();
sock_release(queue->sock);
+ memalloc_noreclaim_restore(noreclaim_flag);
+
kfree(queue->pdu);
+ mutex_destroy(&queue->send_mutex);
+ mutex_destroy(&queue->queue_lock);
}
static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
@@ -1157,6 +1330,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
struct msghdr msg = {};
struct kvec iov;
bool ctrl_hdgst, ctrl_ddgst;
+ u32 maxh2cdata;
int ret;
icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
@@ -1240,6 +1414,14 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
goto free_icresp;
}
+ maxh2cdata = le32_to_cpu(icresp->maxdata);
+ if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) {
+ pr_err("queue %d: invalid maxh2cdata returned %u\n",
+ nvme_tcp_queue_id(queue), maxh2cdata);
+ goto free_icresp;
+ }
+ queue->maxh2cdata = maxh2cdata;
+
ret = 0;
free_icresp:
kfree(icresp);
@@ -1248,19 +1430,72 @@ free_icreq:
return ret;
}
-static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
- int qid, size_t queue_size)
+static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
+{
+ return nvme_tcp_queue_id(queue) == 0;
+}
+
+static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+ int qid = nvme_tcp_queue_id(queue);
+
+ return !nvme_tcp_admin_queue(queue) &&
+ qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
+}
+
+static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+ int qid = nvme_tcp_queue_id(queue);
+
+ return !nvme_tcp_admin_queue(queue) &&
+ !nvme_tcp_default_queue(queue) &&
+ qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
+ ctrl->io_queues[HCTX_TYPE_READ];
+}
+
+static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+ int qid = nvme_tcp_queue_id(queue);
+
+ return !nvme_tcp_admin_queue(queue) &&
+ !nvme_tcp_default_queue(queue) &&
+ !nvme_tcp_read_queue(queue) &&
+ qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
+ ctrl->io_queues[HCTX_TYPE_READ] +
+ ctrl->io_queues[HCTX_TYPE_POLL];
+}
+
+static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_ctrl *ctrl = queue->ctrl;
+ int qid = nvme_tcp_queue_id(queue);
+ int n = 0;
+
+ if (nvme_tcp_default_queue(queue))
+ n = qid - 1;
+ else if (nvme_tcp_read_queue(queue))
+ n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
+ else if (nvme_tcp_poll_queue(queue))
+ n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
+ ctrl->io_queues[HCTX_TYPE_READ] - 1;
+ queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+}
+
+static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
- struct linger sol = { .l_onoff = 1, .l_linger = 0 };
- int ret, opt, rcv_pdu_size, n;
+ int ret, rcv_pdu_size;
+ mutex_init(&queue->queue_lock);
queue->ctrl = ctrl;
+ init_llist_head(&queue->req_list);
INIT_LIST_HEAD(&queue->send_list);
- spin_lock_init(&queue->lock);
+ mutex_init(&queue->send_mutex);
INIT_WORK(&queue->io_work, nvme_tcp_io_work);
- queue->queue_size = queue_size;
if (qid > 0)
queue->cmnd_capsule_len = nctrl->ioccsz * 16;
@@ -1273,60 +1508,36 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
if (ret) {
dev_err(nctrl->device,
"failed to create socket: %d\n", ret);
- return ret;
+ goto err_destroy_mutex;
}
+ nvme_tcp_reclassify_socket(queue->sock);
+
/* Single syn retry */
- opt = 1;
- ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, TCP_SYNCNT,
- (char *)&opt, sizeof(opt));
- if (ret) {
- dev_err(nctrl->device,
- "failed to set TCP_SYNCNT sock opt %d\n", ret);
- goto err_sock;
- }
+ tcp_sock_set_syncnt(queue->sock->sk, 1);
/* Set TCP no delay */
- opt = 1;
- ret = kernel_setsockopt(queue->sock, IPPROTO_TCP,
- TCP_NODELAY, (char *)&opt, sizeof(opt));
- if (ret) {
- dev_err(nctrl->device,
- "failed to set TCP_NODELAY sock opt %d\n", ret);
- goto err_sock;
- }
+ tcp_sock_set_nodelay(queue->sock->sk);
/*
* Cleanup whatever is sitting in the TCP transmit queue on socket
* close. This is done to prevent stale data from being sent should
* the network connection be restored before TCP times out.
*/
- ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_LINGER,
- (char *)&sol, sizeof(sol));
- if (ret) {
- dev_err(nctrl->device,
- "failed to set SO_LINGER sock opt %d\n", ret);
- goto err_sock;
- }
+ sock_no_linger(queue->sock->sk);
+
+ if (so_priority > 0)
+ sock_set_priority(queue->sock->sk, so_priority);
/* Set socket type of service */
- if (nctrl->opts->tos >= 0) {
- opt = nctrl->opts->tos;
- ret = kernel_setsockopt(queue->sock, SOL_IP, IP_TOS,
- (char *)&opt, sizeof(opt));
- if (ret) {
- dev_err(nctrl->device,
- "failed to set IP_TOS sock opt %d\n", ret);
- goto err_sock;
- }
- }
+ if (nctrl->opts->tos >= 0)
+ ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
+
+ /* Set 10 seconds timeout for icresp recvmsg */
+ queue->sock->sk->sk_rcvtimeo = 10 * HZ;
queue->sock->sk->sk_allocation = GFP_ATOMIC;
- if (!qid)
- n = 0;
- else
- n = (qid - 1) % num_online_cpus();
- queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+ nvme_tcp_set_queue_io_cpu(queue);
queue->request = NULL;
queue->data_remaining = 0;
queue->ddgst_remaining = 0;
@@ -1345,6 +1556,20 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
}
}
+ if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) {
+ char *iface = nctrl->opts->host_iface;
+ sockptr_t optval = KERNEL_SOCKPTR(iface);
+
+ ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
+ optval, strlen(iface));
+ if (ret) {
+ dev_err(nctrl->device,
+ "failed to bind to interface %s queue %d err %d\n",
+ iface, qid, ret);
+ goto err_sock;
+ }
+ }
+
queue->hdr_digest = nctrl->opts->hdr_digest;
queue->data_digest = nctrl->opts->data_digest;
if (queue->hdr_digest || queue->data_digest) {
@@ -1408,6 +1633,9 @@ err_crypto:
err_sock:
sock_release(queue->sock);
queue->sock = NULL;
+err_destroy_mutex:
+ mutex_destroy(&queue->send_mutex);
+ mutex_destroy(&queue->queue_lock);
return ret;
}
@@ -1435,10 +1663,13 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
- if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
return;
- __nvme_tcp_stop_queue(queue);
+ mutex_lock(&queue->queue_lock);
+ if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ __nvme_tcp_stop_queue(queue);
+ mutex_unlock(&queue->queue_lock);
}
static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
@@ -1447,7 +1678,7 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
int ret;
if (idx)
- ret = nvmf_connect_io_queue(nctrl, idx, false);
+ ret = nvmf_connect_io_queue(nctrl, idx);
else
ret = nvmf_connect_admin_queue(nctrl);
@@ -1462,49 +1693,10 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
return ret;
}
-static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
- bool admin)
-{
- struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
- struct blk_mq_tag_set *set;
- int ret;
-
- if (admin) {
- set = &ctrl->admin_tag_set;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_tcp_admin_mq_ops;
- set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- set->reserved_tags = 2; /* connect + keep-alive */
- set->numa_node = NUMA_NO_NODE;
- set->cmd_size = sizeof(struct nvme_tcp_request);
- set->driver_data = ctrl;
- set->nr_hw_queues = 1;
- set->timeout = ADMIN_TIMEOUT;
- } else {
- set = &ctrl->tag_set;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_tcp_mq_ops;
- set->queue_depth = nctrl->sqsize + 1;
- set->reserved_tags = 1; /* fabric connect */
- set->numa_node = NUMA_NO_NODE;
- set->flags = BLK_MQ_F_SHOULD_MERGE;
- set->cmd_size = sizeof(struct nvme_tcp_request);
- set->driver_data = ctrl;
- set->nr_hw_queues = nctrl->queue_count - 1;
- set->timeout = NVME_IO_TIMEOUT;
- set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
- }
-
- ret = blk_mq_alloc_tag_set(set);
- if (ret)
- return ERR_PTR(ret);
-
- return set;
-}
-
static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
{
if (to_tcp_ctrl(ctrl)->async_req.pdu) {
+ cancel_work_sync(&ctrl->async_event_work);
nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
}
@@ -1528,11 +1720,12 @@ static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
nvme_tcp_stop_queue(ctrl, i);
}
-static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
+static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
+ int first, int last)
{
- int i, ret = 0;
+ int i, ret;
- for (i = 1; i < ctrl->queue_count; i++) {
+ for (i = first; i < last; i++) {
ret = nvme_tcp_start_queue(ctrl, i);
if (ret)
goto out_stop_queues;
@@ -1541,7 +1734,7 @@ static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
return 0;
out_stop_queues:
- for (i--; i >= 1; i--)
+ for (i--; i >= first; i--)
nvme_tcp_stop_queue(ctrl, i);
return ret;
}
@@ -1550,7 +1743,7 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
{
int ret;
- ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
+ ret = nvme_tcp_alloc_queue(ctrl, 0);
if (ret)
return ret;
@@ -1570,8 +1763,7 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
int i, ret;
for (i = 1; i < ctrl->queue_count; i++) {
- ret = nvme_tcp_alloc_queue(ctrl, i,
- ctrl->sqsize + 1);
+ ret = nvme_tcp_alloc_queue(ctrl, i);
if (ret)
goto out_free_queues;
}
@@ -1641,10 +1833,13 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
if (ret)
return ret;
- ctrl->queue_count = nr_io_queues + 1;
- if (ctrl->queue_count < 2)
- return 0;
+ if (nr_io_queues == 0) {
+ dev_err(ctrl->device,
+ "unable to set any I/O queues\n");
+ return -ENOMEM;
+ }
+ ctrl->queue_count = nr_io_queues + 1;
dev_info(ctrl->device,
"creating %d I/O queues.\n", nr_io_queues);
@@ -1656,50 +1851,73 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
{
nvme_tcp_stop_io_queues(ctrl);
- if (remove) {
- blk_cleanup_queue(ctrl->connect_q);
- blk_mq_free_tag_set(ctrl->tagset);
- }
+ if (remove)
+ nvme_remove_io_tag_set(ctrl);
nvme_tcp_free_io_queues(ctrl);
}
static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
{
- int ret;
+ int ret, nr_queues;
ret = nvme_tcp_alloc_io_queues(ctrl);
if (ret)
return ret;
if (new) {
- ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
- if (IS_ERR(ctrl->tagset)) {
- ret = PTR_ERR(ctrl->tagset);
+ ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
+ &nvme_tcp_mq_ops,
+ BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING,
+ sizeof(struct nvme_tcp_request));
+ if (ret)
goto out_free_io_queues;
- }
+ }
+
+ /*
+ * Only start IO queues for which we have allocated the tagset
+ * and limitted it to the available queues. On reconnects, the
+ * queue number might have changed.
+ */
+ nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count);
+ ret = nvme_tcp_start_io_queues(ctrl, 1, nr_queues);
+ if (ret)
+ goto out_cleanup_connect_q;
- ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
- if (IS_ERR(ctrl->connect_q)) {
- ret = PTR_ERR(ctrl->connect_q);
- goto out_free_tag_set;
+ if (!new) {
+ nvme_start_queues(ctrl);
+ if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
+ /*
+ * If we timed out waiting for freeze we are likely to
+ * be stuck. Fail the controller initialization just
+ * to be safe.
+ */
+ ret = -ENODEV;
+ goto out_wait_freeze_timed_out;
}
- } else {
blk_mq_update_nr_hw_queues(ctrl->tagset,
ctrl->queue_count - 1);
+ nvme_unfreeze(ctrl);
}
- ret = nvme_tcp_start_io_queues(ctrl);
+ /*
+ * If the number of queues has increased (reconnect case)
+ * start all new queues now.
+ */
+ ret = nvme_tcp_start_io_queues(ctrl, nr_queues,
+ ctrl->tagset->nr_hw_queues + 1);
if (ret)
- goto out_cleanup_connect_q;
+ goto out_wait_freeze_timed_out;
return 0;
+out_wait_freeze_timed_out:
+ nvme_stop_queues(ctrl);
+ nvme_sync_io_queues(ctrl);
+ nvme_tcp_stop_io_queues(ctrl);
out_cleanup_connect_q:
+ nvme_cancel_tagset(ctrl);
if (new)
- blk_cleanup_queue(ctrl->connect_q);
-out_free_tag_set:
- if (new)
- blk_mq_free_tag_set(ctrl->tagset);
+ nvme_remove_io_tag_set(ctrl);
out_free_io_queues:
nvme_tcp_free_io_queues(ctrl);
return ret;
@@ -1708,11 +1926,8 @@ out_free_io_queues:
static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
{
nvme_tcp_stop_queue(ctrl, 0);
- if (remove) {
- blk_cleanup_queue(ctrl->admin_q);
- blk_cleanup_queue(ctrl->fabrics_q);
- blk_mq_free_tag_set(ctrl->admin_tagset);
- }
+ if (remove)
+ nvme_remove_admin_tag_set(ctrl);
nvme_tcp_free_admin_queue(ctrl);
}
@@ -1725,52 +1940,39 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
return error;
if (new) {
- ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
- if (IS_ERR(ctrl->admin_tagset)) {
- error = PTR_ERR(ctrl->admin_tagset);
+ error = nvme_alloc_admin_tag_set(ctrl,
+ &to_tcp_ctrl(ctrl)->admin_tag_set,
+ &nvme_tcp_admin_mq_ops, BLK_MQ_F_BLOCKING,
+ sizeof(struct nvme_tcp_request));
+ if (error)
goto out_free_queue;
- }
-
- ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
- if (IS_ERR(ctrl->fabrics_q)) {
- error = PTR_ERR(ctrl->fabrics_q);
- goto out_free_tagset;
- }
-
- ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
- if (IS_ERR(ctrl->admin_q)) {
- error = PTR_ERR(ctrl->admin_q);
- goto out_cleanup_fabrics_q;
- }
}
error = nvme_tcp_start_queue(ctrl, 0);
if (error)
- goto out_cleanup_queue;
+ goto out_cleanup_tagset;
error = nvme_enable_ctrl(ctrl);
if (error)
goto out_stop_queue;
- blk_mq_unquiesce_queue(ctrl->admin_q);
+ nvme_start_admin_queue(ctrl);
- error = nvme_init_identify(ctrl);
+ error = nvme_init_ctrl_finish(ctrl);
if (error)
- goto out_stop_queue;
+ goto out_quiesce_queue;
return 0;
+out_quiesce_queue:
+ nvme_stop_admin_queue(ctrl);
+ blk_sync_queue(ctrl->admin_q);
out_stop_queue:
nvme_tcp_stop_queue(ctrl, 0);
-out_cleanup_queue:
+ nvme_cancel_admin_tagset(ctrl);
+out_cleanup_tagset:
if (new)
- blk_cleanup_queue(ctrl->admin_q);
-out_cleanup_fabrics_q:
- if (new)
- blk_cleanup_queue(ctrl->fabrics_q);
-out_free_tagset:
- if (new)
- blk_mq_free_tag_set(ctrl->admin_tagset);
+ nvme_remove_admin_tag_set(ctrl);
out_free_queue:
nvme_tcp_free_admin_queue(ctrl);
return error;
@@ -1779,15 +1981,12 @@ out_free_queue:
static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
bool remove)
{
- blk_mq_quiesce_queue(ctrl->admin_q);
+ nvme_stop_admin_queue(ctrl);
+ blk_sync_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0);
- if (ctrl->admin_tagset) {
- blk_mq_tagset_busy_iter(ctrl->admin_tagset,
- nvme_cancel_request, ctrl);
- blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
- }
+ nvme_cancel_admin_tagset(ctrl);
if (remove)
- blk_mq_unquiesce_queue(ctrl->admin_q);
+ nvme_start_admin_queue(ctrl);
nvme_tcp_destroy_admin_queue(ctrl, remove);
}
@@ -1796,13 +1995,12 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
{
if (ctrl->queue_count <= 1)
return;
+ nvme_stop_admin_queue(ctrl);
+ nvme_start_freeze(ctrl);
nvme_stop_queues(ctrl);
+ nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
- if (ctrl->tagset) {
- blk_mq_tagset_busy_iter(ctrl->tagset,
- nvme_cancel_request, ctrl);
- blk_mq_tagset_wait_completed_request(ctrl->tagset);
- }
+ nvme_cancel_tagset(ctrl);
if (remove)
nvme_start_queues(ctrl);
nvme_tcp_destroy_io_queues(ctrl, remove);
@@ -1838,10 +2036,17 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
return ret;
if (ctrl->icdoff) {
+ ret = -EOPNOTSUPP;
dev_err(ctrl->device, "icdoff is not supported!\n");
goto destroy_admin;
}
+ if (!nvme_ctrl_sgl_supported(ctrl)) {
+ ret = -EOPNOTSUPP;
+ dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
+ goto destroy_admin;
+ }
+
if (opts->queue_size > ctrl->sqsize + 1)
dev_warn(ctrl->device,
"queue_size %zu > ctrl sqsize %u, clamping down\n",
@@ -1861,8 +2066,14 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
}
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
- /* state change failure is ok if we're in DELETING state */
- WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
+ /*
+ * state change failure is ok if we started ctrl delete,
+ * unless we're during creation of a new controller to
+ * avoid races with teardown flow.
+ */
+ WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
+ ctrl->state != NVME_CTRL_DELETING_NOIO);
+ WARN_ON_ONCE(new);
ret = -EINVAL;
goto destroy_io;
}
@@ -1871,10 +2082,18 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
return 0;
destroy_io:
- if (ctrl->queue_count > 1)
+ if (ctrl->queue_count > 1) {
+ nvme_stop_queues(ctrl);
+ nvme_sync_io_queues(ctrl);
+ nvme_tcp_stop_io_queues(ctrl);
+ nvme_cancel_tagset(ctrl);
nvme_tcp_destroy_io_queues(ctrl, new);
+ }
destroy_admin:
+ nvme_stop_admin_queue(ctrl);
+ blk_sync_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0);
+ nvme_cancel_admin_tagset(ctrl);
nvme_tcp_destroy_admin_queue(ctrl, new);
return ret;
}
@@ -1909,16 +2128,19 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
struct nvme_tcp_ctrl, err_work);
struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
+ nvme_auth_stop(ctrl);
nvme_stop_keep_alive(ctrl);
+ flush_work(&ctrl->async_event_work);
nvme_tcp_teardown_io_queues(ctrl, false);
/* unquiesce to fail fast pending requests */
nvme_start_queues(ctrl);
nvme_tcp_teardown_admin_queue(ctrl, false);
- blk_mq_unquiesce_queue(ctrl->admin_q);
+ nvme_start_admin_queue(ctrl);
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
- /* state change failure is ok if we're in DELETING state */
- WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
+ /* state change failure is ok if we started ctrl delete */
+ WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
+ ctrl->state != NVME_CTRL_DELETING_NOIO);
return;
}
@@ -1927,11 +2149,8 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
{
- cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
- cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
-
nvme_tcp_teardown_io_queues(ctrl, shutdown);
- blk_mq_quiesce_queue(ctrl->admin_q);
+ nvme_stop_admin_queue(ctrl);
if (shutdown)
nvme_shutdown_ctrl(ctrl);
else
@@ -1953,8 +2172,9 @@ static void nvme_reset_ctrl_work(struct work_struct *work)
nvme_tcp_teardown_ctrl(ctrl, false);
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
- /* state change failure is ok if we're in DELETING state */
- WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
+ /* state change failure is ok if we started ctrl delete */
+ WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
+ ctrl->state != NVME_CTRL_DELETING_NOIO);
return;
}
@@ -1968,6 +2188,12 @@ out_fail:
nvme_tcp_reconnect_or_remove(ctrl);
}
+static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
+{
+ flush_work(&to_tcp_ctrl(ctrl)->err_work);
+ cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
+}
+
static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
@@ -2041,43 +2267,51 @@ static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
ctrl->async_req.curr_bio = NULL;
ctrl->async_req.data_len = 0;
- nvme_tcp_queue_request(&ctrl->async_req);
+ nvme_tcp_queue_request(&ctrl->async_req, true, true);
}
-static enum blk_eh_timer_return
-nvme_tcp_timeout(struct request *rq, bool reserved)
+static void nvme_tcp_complete_timed_out(struct request *rq)
{
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
- struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
- struct nvme_tcp_cmd_pdu *pdu = req->pdu;
+ struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
- /*
- * Restart the timer if a controller reset is already scheduled. Any
- * timed out commands would be handled before entering the connecting
- * state.
- */
- if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
- return BLK_EH_RESET_TIMER;
+ nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
+ nvmf_complete_timed_out_request(rq);
+}
- dev_warn(ctrl->ctrl.device,
+static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
+{
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
+ struct nvme_tcp_cmd_pdu *pdu = req->pdu;
+
+ dev_warn(ctrl->device,
"queue %d: timeout request %#x type %d\n",
nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
- if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
+ if (ctrl->state != NVME_CTRL_LIVE) {
/*
- * Teardown immediately if controller times out while starting
- * or we are already started error recovery. all outstanding
- * requests are completed on shutdown, so we return BLK_EH_DONE.
+ * If we are resetting, connecting or deleting we should
+ * complete immediately because we may block controller
+ * teardown or setup sequence
+ * - ctrl disable/shutdown fabrics requests
+ * - connect requests
+ * - initialization admin requests
+ * - I/O requests that entered after unquiescing and
+ * the controller stopped responding
+ *
+ * All other requests should be cancelled by the error
+ * recovery work, so it's fine that we fail it here.
*/
- flush_work(&ctrl->err_work);
- nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
- nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
+ nvme_tcp_complete_timed_out(rq);
return BLK_EH_DONE;
}
- dev_warn(ctrl->ctrl.device, "starting error recovery\n");
- nvme_tcp_error_recovery(&ctrl->ctrl);
-
+ /*
+ * LIVE state should trigger the normal error recovery which will
+ * handle completing this request.
+ */
+ nvme_tcp_error_recovery(ctrl);
return BLK_EH_RESET_TIMER;
}
@@ -2090,8 +2324,10 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
c->common.flags |= NVME_CMD_SGL_METABUF;
- if (rq_data_dir(rq) == WRITE && req->data_len &&
- req->data_len <= nvme_tcp_inline_data_size(queue))
+ if (!blk_rq_nr_phys_segments(rq))
+ nvme_tcp_set_sg_null(c);
+ else if (rq_data_dir(rq) == WRITE &&
+ req->data_len <= nvme_tcp_inline_data_size(req))
nvme_tcp_set_sg_inline(queue, c, req->data_len);
else
nvme_tcp_set_sg_host_data(c, req->data_len);
@@ -2108,23 +2344,26 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
blk_status_t ret;
- ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
+ ret = nvme_setup_cmd(ns, rq);
if (ret)
return ret;
req->state = NVME_TCP_SEND_CMD_PDU;
+ req->status = cpu_to_le16(NVME_SC_SUCCESS);
req->offset = 0;
req->data_sent = 0;
req->pdu_len = 0;
req->pdu_sent = 0;
- req->data_len = blk_rq_payload_bytes(rq);
+ req->h2cdata_left = 0;
+ req->data_len = blk_rq_nr_phys_segments(rq) ?
+ blk_rq_payload_bytes(rq) : 0;
req->curr_bio = rq->bio;
+ if (req->curr_bio && req->data_len)
+ nvme_tcp_init_iter(req, rq_data_dir(rq));
if (rq_data_dir(rq) == WRITE &&
- req->data_len <= nvme_tcp_inline_data_size(queue))
+ req->data_len <= nvme_tcp_inline_data_size(req))
req->pdu_len = req->data_len;
- else if (req->curr_bio)
- nvme_tcp_init_iter(req, READ);
pdu->hdr.type = nvme_tcp_cmd;
pdu->hdr.flags = 0;
@@ -2150,6 +2389,14 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
return 0;
}
+static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
+{
+ struct nvme_tcp_queue *queue = hctx->driver_data;
+
+ if (!llist_empty(&queue->req_list))
+ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+}
+
static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -2160,8 +2407,8 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
blk_status_t ret;
- if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
- return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
+ if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
+ return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
ret = nvme_tcp_setup_cmd_pdu(ns, rq);
if (unlikely(ret))
@@ -2169,14 +2416,14 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(rq);
- nvme_tcp_queue_request(req);
+ nvme_tcp_queue_request(req, true, bd->last);
return BLK_STS_OK;
}
-static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
+static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
{
- struct nvme_tcp_ctrl *ctrl = set->driver_data;
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
@@ -2215,23 +2462,46 @@ static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
ctrl->io_queues[HCTX_TYPE_DEFAULT],
ctrl->io_queues[HCTX_TYPE_READ],
ctrl->io_queues[HCTX_TYPE_POLL]);
-
- return 0;
}
-static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
+static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct nvme_tcp_queue *queue = hctx->driver_data;
struct sock *sk = queue->sock->sk;
+ if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ return 0;
+
+ set_bit(NVME_TCP_Q_POLLING, &queue->flags);
if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
sk_busy_loop(sk, true);
nvme_tcp_try_recv(queue);
+ clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
return queue->nr_cqe;
}
-static struct blk_mq_ops nvme_tcp_mq_ops = {
+static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
+{
+ struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0];
+ struct sockaddr_storage src_addr;
+ int ret, len;
+
+ len = nvmf_get_address(ctrl, buf, size);
+
+ ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr);
+ if (ret > 0) {
+ if (len > 0)
+ len--; /* strip trailing newline */
+ len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n",
+ (len) ? "," : "", &src_addr);
+ }
+
+ return len;
+}
+
+static const struct blk_mq_ops nvme_tcp_mq_ops = {
.queue_rq = nvme_tcp_queue_rq,
+ .commit_rqs = nvme_tcp_commit_rqs,
.complete = nvme_complete_rq,
.init_request = nvme_tcp_init_request,
.exit_request = nvme_tcp_exit_request,
@@ -2241,7 +2511,7 @@ static struct blk_mq_ops nvme_tcp_mq_ops = {
.poll = nvme_tcp_poll,
};
-static struct blk_mq_ops nvme_tcp_admin_mq_ops = {
+static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
.queue_rq = nvme_tcp_queue_rq,
.complete = nvme_complete_rq,
.init_request = nvme_tcp_init_request,
@@ -2260,7 +2530,8 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
.free_ctrl = nvme_tcp_free_ctrl,
.submit_async_event = nvme_tcp_submit_async_event,
.delete_ctrl = nvme_tcp_delete_ctrl,
- .get_address = nvmf_get_address,
+ .get_address = nvme_tcp_get_address,
+ .stop_ctrl = nvme_tcp_stop_ctrl,
};
static bool
@@ -2330,6 +2601,15 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
}
}
+ if (opts->mask & NVMF_OPT_HOST_IFACE) {
+ if (!__dev_get_by_name(&init_net, opts->host_iface)) {
+ pr_err("invalid interface passed: %s\n",
+ opts->host_iface);
+ ret = -ENODEV;
+ goto out_free_ctrl;
+ }
+ }
+
if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
ret = -EALREADY;
goto out_free_ctrl;
@@ -2357,9 +2637,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
goto out_uninit_ctrl;
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
- ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
-
- nvme_get_ctrl(&ctrl->ctrl);
+ nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
mutex_lock(&nvme_tcp_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
@@ -2388,7 +2666,7 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
- NVMF_OPT_TOS,
+ NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
.create_ctrl = nvme_tcp_create_ctrl,
};
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 5c3cb6928f3c..1c36fcedea20 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -72,6 +72,20 @@ static const char *nvme_trace_admin_identify(struct trace_seq *p, u8 *cdw10)
return ret;
}
+static const char *nvme_trace_admin_set_features(struct trace_seq *p,
+ u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 fid = cdw10[0];
+ u8 sv = cdw10[3] & 0x8;
+ u32 cdw11 = get_unaligned_le32(cdw10 + 4);
+
+ trace_seq_printf(p, "fid=0x%x, sv=0x%x, cdw11=0x%x", fid, sv, cdw11);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
static const char *nvme_trace_admin_get_features(struct trace_seq *p,
u8 *cdw10)
{
@@ -80,7 +94,7 @@ static const char *nvme_trace_admin_get_features(struct trace_seq *p,
u8 sel = cdw10[1] & 0x7;
u32 cdw11 = get_unaligned_le32(cdw10 + 4);
- trace_seq_printf(p, "fid=0x%x sel=0x%x cdw11=0x%x", fid, sel, cdw11);
+ trace_seq_printf(p, "fid=0x%x, sel=0x%x, cdw11=0x%x", fid, sel, cdw11);
trace_seq_putc(p, 0);
return ret;
@@ -102,6 +116,23 @@ static const char *nvme_trace_get_lba_status(struct trace_seq *p,
return ret;
}
+static const char *nvme_trace_admin_format_nvm(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 lbaf = cdw10[0] & 0xF;
+ u8 mset = (cdw10[0] >> 4) & 0x1;
+ u8 pi = (cdw10[0] >> 5) & 0x7;
+ u8 pil = cdw10[1] & 0x1;
+ u8 ses = (cdw10[1] >> 1) & 0x7;
+
+ trace_seq_printf(p, "lbaf=%u, mset=%u, pi=%u, pil=%u, ses=%u",
+ lbaf, mset, pi, pil, ses);
+
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -131,6 +162,35 @@ static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10)
return ret;
}
+static const char *nvme_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u8 zsa = cdw10[12];
+ u8 all = cdw10[13];
+
+ trace_seq_printf(p, "slba=%llu, zsa=%u, all=%u", slba, zsa, all);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u32 numd = get_unaligned_le32(cdw10 + 8);
+ u8 zra = cdw10[12];
+ u8 zrasf = cdw10[13];
+ u8 pr = cdw10[14];
+
+ trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u, pr=%u",
+ slba, numd, zra, zrasf, pr);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
static const char *nvme_trace_common(struct trace_seq *p, u8 *cdw10)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -155,10 +215,14 @@ const char *nvme_trace_parse_admin_cmd(struct trace_seq *p,
return nvme_trace_create_cq(p, cdw10);
case nvme_admin_identify:
return nvme_trace_admin_identify(p, cdw10);
+ case nvme_admin_set_features:
+ return nvme_trace_admin_set_features(p, cdw10);
case nvme_admin_get_features:
return nvme_trace_admin_get_features(p, cdw10);
case nvme_admin_get_lba_status:
return nvme_trace_get_lba_status(p, cdw10);
+ case nvme_admin_format_nvm:
+ return nvme_trace_admin_format_nvm(p, cdw10);
default:
return nvme_trace_common(p, cdw10);
}
@@ -171,9 +235,14 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
case nvme_cmd_read:
case nvme_cmd_write:
case nvme_cmd_write_zeroes:
+ case nvme_cmd_zone_append:
return nvme_trace_read_write(p, cdw10);
case nvme_cmd_dsm:
return nvme_trace_dsm(p, cdw10);
+ case nvme_cmd_zone_mgmt_send:
+ return nvme_trace_zone_mgmt_send(p, cdw10);
+ case nvme_cmd_zone_mgmt_recv:
+ return nvme_trace_zone_mgmt_recv(p, cdw10);
default:
return nvme_trace_common(p, cdw10);
}
@@ -218,6 +287,34 @@ static const char *nvme_trace_fabrics_property_get(struct trace_seq *p, u8 *spc)
return ret;
}
+static const char *nvme_trace_fabrics_auth_send(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 spsp0 = spc[1];
+ u8 spsp1 = spc[2];
+ u8 secp = spc[3];
+ u32 tl = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, tl=%u",
+ spsp0, spsp1, secp, tl);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvme_trace_fabrics_auth_receive(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 spsp0 = spc[1];
+ u8 spsp1 = spc[2];
+ u8 secp = spc[3];
+ u32 al = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, al=%u",
+ spsp0, spsp1, secp, al);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
static const char *nvme_trace_fabrics_common(struct trace_seq *p, u8 *spc)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -237,6 +334,10 @@ const char *nvme_trace_parse_fabrics_cmd(struct trace_seq *p,
return nvme_trace_fabrics_connect(p, spc);
case nvme_fabrics_type_property_get:
return nvme_trace_fabrics_property_get(p, spc);
+ case nvme_fabrics_type_auth_send:
+ return nvme_trace_fabrics_auth_send(p, spc);
+ case nvme_fabrics_type_auth_receive:
+ return nvme_trace_fabrics_auth_receive(p, spc);
default:
return nvme_trace_fabrics_common(p, spc);
}
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index daaf700eae79..6f0eaf6a1528 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -56,7 +56,7 @@ TRACE_EVENT(nvme_setup_cmd,
__field(u8, fctype)
__field(u16, cid)
__field(u32, nsid)
- __field(u64, metadata)
+ __field(bool, metadata)
__array(u8, cdw10, 24)
),
TP_fast_assign(
@@ -66,13 +66,13 @@ TRACE_EVENT(nvme_setup_cmd,
__entry->flags = cmd->common.flags;
__entry->cid = cmd->common.command_id;
__entry->nsid = le32_to_cpu(cmd->common.nsid);
- __entry->metadata = le64_to_cpu(cmd->common.metadata);
+ __entry->metadata = !!blk_integrity_rq(req);
__entry->fctype = cmd->fabrics.fctype;
- __assign_disk_name(__entry->disk, req->rq_disk);
- memcpy(__entry->cdw10, &cmd->common.cdw10,
+ __assign_disk_name(__entry->disk, req->q->disk);
+ memcpy(__entry->cdw10, &cmd->common.cdws,
sizeof(__entry->cdw10));
),
- TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
+ TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%x, cmd=(%s %s)",
__entry->ctrl_id, __print_disk_name(__entry->disk),
__entry->qid, __entry->cid, __entry->nsid,
__entry->flags, __entry->metadata,
@@ -98,12 +98,12 @@ TRACE_EVENT(nvme_complete_rq,
TP_fast_assign(
__entry->ctrl_id = nvme_req(req)->ctrl->instance;
__entry->qid = nvme_req_qid(req);
- __entry->cid = req->tag;
+ __entry->cid = nvme_req(req)->cmd->common.command_id;
__entry->result = le64_to_cpu(nvme_req(req)->result.u64);
__entry->retries = nvme_req(req)->retries;
__entry->flags = nvme_req(req)->flags;
__entry->status = nvme_req(req)->status;
- __assign_disk_name(__entry->disk, req->rq_disk);
+ __assign_disk_name(__entry->disk, req->q->disk);
),
TP_printk("nvme%d: %sqid=%d, cmdid=%u, res=%#llx, retries=%u, flags=0x%x, status=%#x",
__entry->ctrl_id, __print_disk_name(__entry->disk),
@@ -153,7 +153,7 @@ TRACE_EVENT(nvme_sq,
),
TP_fast_assign(
__entry->ctrl_id = nvme_req(req)->ctrl->instance;
- __assign_disk_name(__entry->disk, req->rq_disk);
+ __assign_disk_name(__entry->disk, req->q->disk);
__entry->qid = nvme_req_qid(req);
__entry->sq_head = le16_to_cpu(sq_head);
__entry->sq_tail = sq_tail;
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
new file mode 100644
index 000000000000..12316ab51bda
--- /dev/null
+++ b/drivers/nvme/host/zns.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/vmalloc.h>
+#include "nvme.h"
+
+int nvme_revalidate_zones(struct nvme_ns *ns)
+{
+ struct request_queue *q = ns->queue;
+ int ret;
+
+ ret = blk_revalidate_disk_zones(ns->disk, NULL);
+ if (!ret)
+ blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
+ return ret;
+}
+
+static int nvme_set_max_append(struct nvme_ctrl *ctrl)
+{
+ struct nvme_command c = { };
+ struct nvme_id_ctrl_zns *id;
+ int status;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return -ENOMEM;
+
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.cns = NVME_ID_CNS_CS_CTRL;
+ c.identify.csi = NVME_CSI_ZNS;
+
+ status = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
+ if (status) {
+ kfree(id);
+ return status;
+ }
+
+ if (id->zasl)
+ ctrl->max_zone_append = 1 << (id->zasl + 3);
+ else
+ ctrl->max_zone_append = ctrl->max_hw_sectors;
+ kfree(id);
+ return 0;
+}
+
+int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
+{
+ struct nvme_effects_log *log = ns->head->effects;
+ struct request_queue *q = ns->queue;
+ struct nvme_command c = { };
+ struct nvme_id_ns_zns *id;
+ int status;
+
+ /* Driver requires zone append support */
+ if ((le32_to_cpu(log->iocs[nvme_cmd_zone_append]) &
+ NVME_CMD_EFFECTS_CSUPP)) {
+ if (test_and_clear_bit(NVME_NS_FORCE_RO, &ns->flags))
+ dev_warn(ns->ctrl->device,
+ "Zone Append supported for zoned namespace:%d. Remove read-only mode\n",
+ ns->head->ns_id);
+ } else {
+ set_bit(NVME_NS_FORCE_RO, &ns->flags);
+ dev_warn(ns->ctrl->device,
+ "Zone Append not supported for zoned namespace:%d. Forcing to read-only mode\n",
+ ns->head->ns_id);
+ }
+
+ /* Lazily query controller append limit for the first zoned namespace */
+ if (!ns->ctrl->max_zone_append) {
+ status = nvme_set_max_append(ns->ctrl);
+ if (status)
+ return status;
+ }
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return -ENOMEM;
+
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.nsid = cpu_to_le32(ns->head->ns_id);
+ c.identify.cns = NVME_ID_CNS_CS_NS;
+ c.identify.csi = NVME_CSI_ZNS;
+
+ status = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, id, sizeof(*id));
+ if (status)
+ goto free_data;
+
+ /*
+ * We currently do not handle devices requiring any of the zoned
+ * operation characteristics.
+ */
+ if (id->zoc) {
+ dev_warn(ns->ctrl->device,
+ "zone operations:%x not supported for namespace:%u\n",
+ le16_to_cpu(id->zoc), ns->head->ns_id);
+ status = -ENODEV;
+ goto free_data;
+ }
+
+ ns->zsze = nvme_lba_to_sect(ns, le64_to_cpu(id->lbafe[lbaf].zsze));
+ if (!is_power_of_2(ns->zsze)) {
+ dev_warn(ns->ctrl->device,
+ "invalid zone size:%llu for namespace:%u\n",
+ ns->zsze, ns->head->ns_id);
+ status = -ENODEV;
+ goto free_data;
+ }
+
+ disk_set_zoned(ns->disk, BLK_ZONED_HM);
+ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
+ disk_set_max_open_zones(ns->disk, le32_to_cpu(id->mor) + 1);
+ disk_set_max_active_zones(ns->disk, le32_to_cpu(id->mar) + 1);
+free_data:
+ kfree(id);
+ return status;
+}
+
+static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
+ unsigned int nr_zones, size_t *buflen)
+{
+ struct request_queue *q = ns->disk->queue;
+ size_t bufsize;
+ void *buf;
+
+ const size_t min_bufsize = sizeof(struct nvme_zone_report) +
+ sizeof(struct nvme_zone_descriptor);
+
+ nr_zones = min_t(unsigned int, nr_zones,
+ get_capacity(ns->disk) >> ilog2(ns->zsze));
+
+ bufsize = sizeof(struct nvme_zone_report) +
+ nr_zones * sizeof(struct nvme_zone_descriptor);
+ bufsize = min_t(size_t, bufsize,
+ queue_max_hw_sectors(q) << SECTOR_SHIFT);
+ bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
+
+ while (bufsize >= min_bufsize) {
+ buf = __vmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY);
+ if (buf) {
+ *buflen = bufsize;
+ return buf;
+ }
+ bufsize >>= 1;
+ }
+ return NULL;
+}
+
+static int nvme_zone_parse_entry(struct nvme_ns *ns,
+ struct nvme_zone_descriptor *entry,
+ unsigned int idx, report_zones_cb cb,
+ void *data)
+{
+ struct blk_zone zone = { };
+
+ if ((entry->zt & 0xf) != NVME_ZONE_TYPE_SEQWRITE_REQ) {
+ dev_err(ns->ctrl->device, "invalid zone type %#x\n",
+ entry->zt);
+ return -EINVAL;
+ }
+
+ zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
+ zone.cond = entry->zs >> 4;
+ zone.len = ns->zsze;
+ zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap));
+ zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba));
+ if (zone.cond == BLK_ZONE_COND_FULL)
+ zone.wp = zone.start + zone.len;
+ else
+ zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp));
+
+ return cb(&zone, idx, data);
+}
+
+int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data)
+{
+ struct nvme_zone_report *report;
+ struct nvme_command c = { };
+ int ret, zone_idx = 0;
+ unsigned int nz, i;
+ size_t buflen;
+
+ if (ns->head->ids.csi != NVME_CSI_ZNS)
+ return -EINVAL;
+
+ report = nvme_zns_alloc_report_buffer(ns, nr_zones, &buflen);
+ if (!report)
+ return -ENOMEM;
+
+ c.zmr.opcode = nvme_cmd_zone_mgmt_recv;
+ c.zmr.nsid = cpu_to_le32(ns->head->ns_id);
+ c.zmr.numd = cpu_to_le32(nvme_bytes_to_numd(buflen));
+ c.zmr.zra = NVME_ZRA_ZONE_REPORT;
+ c.zmr.zrasf = NVME_ZRASF_ZONE_REPORT_ALL;
+ c.zmr.pr = NVME_REPORT_ZONE_PARTIAL;
+
+ sector &= ~(ns->zsze - 1);
+ while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) {
+ memset(report, 0, buflen);
+
+ c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns, sector));
+ ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen);
+ if (ret) {
+ if (ret > 0)
+ ret = -EIO;
+ goto out_free;
+ }
+
+ nz = min((unsigned int)le64_to_cpu(report->nr_zones), nr_zones);
+ if (!nz)
+ break;
+
+ for (i = 0; i < nz && zone_idx < nr_zones; i++) {
+ ret = nvme_zone_parse_entry(ns, &report->entries[i],
+ zone_idx, cb, data);
+ if (ret)
+ goto out_free;
+ zone_idx++;
+ }
+
+ sector += ns->zsze * nz;
+ }
+
+ if (zone_idx > 0)
+ ret = zone_idx;
+ else
+ ret = -EINVAL;
+out_free:
+ kvfree(report);
+ return ret;
+}
+
+blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
+ struct nvme_command *c, enum nvme_zone_mgmt_action action)
+{
+ memset(c, 0, sizeof(*c));
+
+ c->zms.opcode = nvme_cmd_zone_mgmt_send;
+ c->zms.nsid = cpu_to_le32(ns->head->ns_id);
+ c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
+ c->zms.zsa = action;
+
+ if (req_op(req) == REQ_OP_ZONE_RESET_ALL)
+ c->zms.select_all = 1;
+
+ return BLK_STS_OK;
+}
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index d7f48c0fb311..79fc64035ee3 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -4,6 +4,7 @@ config NVME_TARGET
tristate "NVMe Target support"
depends on BLOCK
depends on CONFIGFS_FS
+ select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
select SGL_ALLOC
help
This enabled target side support for the NVMe protocol, that is
@@ -15,10 +16,21 @@ config NVME_TARGET
To configure the NVMe target you probably want to use the nvmetcli
tool from http://git.infradead.org/users/hch/nvmetcli.git.
+config NVME_TARGET_PASSTHRU
+ bool "NVMe Target Passthrough support"
+ depends on NVME_TARGET
+ depends on NVME_CORE=y || NVME_CORE=NVME_TARGET
+ help
+ This enables target side NVMe passthru controller support for the
+ NVMe Over Fabrics protocol. It allows for hosts to manage and
+ directly access an actual NVMe controller residing on the target
+ side, including executing Vendor Unique Commands.
+
+ If unsure, say N.
+
config NVME_TARGET_LOOP
tristate "NVMe loopback device support"
depends on NVME_TARGET
- select NVME_CORE
select NVME_FABRICS
select SG_POOL
help
@@ -52,7 +64,6 @@ config NVME_TARGET_FC
config NVME_TARGET_FCLOOP
tristate "NVMe over Fabrics FC Transport Loopback Test driver"
depends on NVME_TARGET
- select NVME_CORE
select NVME_FABRICS
select SG_POOL
depends on NVME_FC
@@ -72,3 +83,18 @@ config NVME_TARGET_TCP
devices over TCP.
If unsure, say N.
+
+config NVME_TARGET_AUTH
+ bool "NVMe over Fabrics In-band Authentication support"
+ depends on NVME_TARGET
+ select NVME_COMMON
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_DH
+ select CRYPTO_DH_RFC7919_GROUPS
+ help
+ This enables support for NVMe over Fabrics In-band Authentication
+
+ If unsure, say N.
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index 2b33836f3d3e..c66820102493 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -11,6 +11,9 @@ obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o
nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
discovery.o io-cmd-file.o io-cmd-bdev.o
+nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o
+nvmet-$(CONFIG_BLK_DEV_ZONED) += zns.o
+nvmet-$(CONFIG_NVME_TARGET_AUTH) += fabrics-cmd-auth.o auth.o
nvme-loop-y += loop.o
nvmet-rdma-y += rdma.o
nvmet-fc-y += fc.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 72a7e41f3018..c8a061ce3ee5 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/rculist.h>
+#include <linux/part_stat.h>
#include <generated/utsrelease.h>
#include <asm/unaligned.h>
@@ -73,34 +74,28 @@ static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
struct nvme_smart_log *slog)
{
- struct nvmet_ns *ns;
u64 host_reads, host_writes, data_units_read, data_units_written;
+ u16 status;
- ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
- if (!ns) {
- pr_err("Could not find namespace id : %d\n",
- le32_to_cpu(req->cmd->get_log_page.nsid));
- req->error_loc = offsetof(struct nvme_rw_command, nsid);
- return NVME_SC_INVALID_NS;
- }
+ status = nvmet_req_find_ns(req);
+ if (status)
+ return status;
/* we don't have the right data for file backed ns */
- if (!ns->bdev)
- goto out;
+ if (!req->ns->bdev)
+ return NVME_SC_SUCCESS;
- host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
- data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
- sectors[READ]), 1000);
- host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
- data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
- sectors[WRITE]), 1000);
+ host_reads = part_stat_read(req->ns->bdev, ios[READ]);
+ data_units_read =
+ DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
+ host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
+ data_units_written =
+ DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
put_unaligned_le64(host_reads, &slog->host_reads[0]);
put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
put_unaligned_le64(host_writes, &slog->host_writes[0]);
put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
-out:
- nvmet_put_namespace(ns);
return NVME_SC_SUCCESS;
}
@@ -112,23 +107,20 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
u64 data_units_read = 0, data_units_written = 0;
struct nvmet_ns *ns;
struct nvmet_ctrl *ctrl;
+ unsigned long idx;
ctrl = req->sq->ctrl;
-
- rcu_read_lock();
- list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
/* we don't have the right data for file backed ns */
if (!ns->bdev)
continue;
- host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
+ host_reads += part_stat_read(ns->bdev, ios[READ]);
data_units_read += DIV_ROUND_UP(
- part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
- host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
+ part_stat_read(ns->bdev, sectors[READ]), 1000);
+ host_writes += part_stat_read(ns->bdev, ios[WRITE]);
data_units_written += DIV_ROUND_UP(
- part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
-
+ part_stat_read(ns->bdev, sectors[WRITE]), 1000);
}
- rcu_read_unlock();
put_unaligned_le64(host_reads, &slog->host_reads[0]);
put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
@@ -170,15 +162,8 @@ out:
nvmet_req_complete(req, status);
}
-static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
+static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
{
- u16 status = NVME_SC_INTERNAL;
- struct nvme_effects_log *log;
-
- log = kzalloc(sizeof(*log), GFP_KERNEL);
- if (!log)
- goto out;
-
log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
@@ -192,9 +177,45 @@ static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
+}
- status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
+static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
+{
+ log->iocs[nvme_cmd_zone_append] = cpu_to_le32(1 << 0);
+ log->iocs[nvme_cmd_zone_mgmt_send] = cpu_to_le32(1 << 0);
+ log->iocs[nvme_cmd_zone_mgmt_recv] = cpu_to_le32(1 << 0);
+}
+static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
+{
+ struct nvme_effects_log *log;
+ u16 status = NVME_SC_SUCCESS;
+
+ log = kzalloc(sizeof(*log), GFP_KERNEL);
+ if (!log) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ switch (req->cmd->get_log_page.csi) {
+ case NVME_CSI_NVM:
+ nvmet_get_cmd_effects_nvm(log);
+ break;
+ case NVME_CSI_ZNS:
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ status = NVME_SC_INVALID_IO_CMD_SET;
+ goto free;
+ }
+ nvmet_get_cmd_effects_nvm(log);
+ nvmet_get_cmd_effects_zns(log);
+ break;
+ default:
+ status = NVME_SC_INVALID_LOG_PAGE;
+ goto free;
+ }
+
+ status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
+free:
kfree(log);
out:
nvmet_req_complete(req, status);
@@ -229,14 +250,13 @@ static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmet_ns *ns;
+ unsigned long idx;
u32 count = 0;
if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
- rcu_read_lock();
- list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns)
if (ns->anagrpid == grpid)
desc->nsids[count++] = cpu_to_le32(ns->nsid);
- rcu_read_unlock();
}
desc->grpid = cpu_to_le32(grpid);
@@ -244,7 +264,7 @@ static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
desc->state = req->port->ana_state[grpid];
memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
- return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
+ return struct_size(desc, nsids, count);
}
static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
@@ -258,8 +278,8 @@ static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
u16 status;
status = NVME_SC_INTERNAL;
- desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
- NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
+ desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES),
+ GFP_KERNEL);
if (!desc)
goto out;
@@ -294,7 +314,7 @@ out:
static void nvmet_execute_get_log_page(struct nvmet_req *req)
{
- if (!nvmet_check_data_len(req, nvmet_get_log_page_len(req->cmd)))
+ if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
return;
switch (req->cmd->get_log_page.lid) {
@@ -316,7 +336,7 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
case NVME_LOG_ANA:
return nvmet_execute_get_log_page_ana(req);
}
- pr_err("unhandled lid %d on qid %d\n",
+ pr_debug("unhandled lid %d on qid %d\n",
req->cmd->get_log_page.lid, req->sq->qid);
req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
@@ -325,9 +345,16 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_subsys *subsys = ctrl->subsys;
struct nvme_id_ctrl *id;
+ u32 cmd_capsule_size;
u16 status = 0;
- const char model[] = "Linux";
+
+ if (!subsys->subsys_discovered) {
+ mutex_lock(&subsys->lock);
+ subsys->subsys_discovered = true;
+ mutex_unlock(&subsys->lock);
+ }
id = kzalloc(sizeof(*id), GFP_KERNEL);
if (!id) {
@@ -339,25 +366,34 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->vid = 0;
id->ssvid = 0;
- memset(id->sn, ' ', sizeof(id->sn));
- bin2hex(id->sn, &ctrl->subsys->serial,
- min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
- memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
+ memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
+ memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
+ strlen(subsys->model_number), ' ');
memcpy_and_pad(id->fr, sizeof(id->fr),
UTS_RELEASE, strlen(UTS_RELEASE), ' ');
id->rab = 6;
+ if (nvmet_is_disc_subsys(ctrl->subsys))
+ id->cntrltype = NVME_CTRL_DISC;
+ else
+ id->cntrltype = NVME_CTRL_IO;
+
/*
* XXX: figure out how we can assign a IEEE OUI, but until then
* the safest is to leave it as zeroes.
*/
/* we support multiple ports, multiples hosts and ANA: */
- id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
+ id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
+ NVME_CTRL_CMIC_ANA;
+
+ /* Limit MDTS according to transport capability */
+ if (ctrl->ops->get_mdts)
+ id->mdts = ctrl->ops->get_mdts(ctrl);
+ else
+ id->mdts = 0;
- /* no limit on data transfer sizes for now */
- id->mdts = 0;
id->cntlid = cpu_to_le16(ctrl->cntlid);
id->ver = cpu_to_le32(ctrl->subsys->ver);
@@ -392,7 +428,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
- id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
+ id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
NVME_CTRL_ONCS_WRITE_ZEROES);
@@ -408,16 +444,22 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->awupf = 0;
id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
- if (ctrl->ops->has_keyed_sgls)
+ if (ctrl->ops->flags & NVMF_KEYED_SGLS)
id->sgls |= cpu_to_le32(1 << 2);
if (req->port->inline_data_size)
id->sgls |= cpu_to_le32(1 << 20);
- strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
+ strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
+
+ /*
+ * Max command capsule size is sqe + in-capsule data size.
+ * Disable in-capsule data for Metadata capable controllers.
+ */
+ cmd_capsule_size = sizeof(struct nvme_command);
+ if (!ctrl->pi_support)
+ cmd_capsule_size += req->port->inline_data_size;
+ id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
- /* Max command capsule size is sqe + single page of in-capsule data */
- id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
- req->port->inline_data_size) / 16);
/* Max response capsule size is cqe */
id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
@@ -447,9 +489,8 @@ out:
static void nvmet_execute_identify_ns(struct nvmet_req *req)
{
- struct nvmet_ns *ns;
struct nvme_id_ns *id;
- u16 status = 0;
+ u16 status;
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
req->error_loc = offsetof(struct nvme_identify, nsid);
@@ -464,26 +505,35 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
}
/* return an all zeroed buffer if we can't find an active namespace */
- ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
- if (!ns)
+ status = nvmet_req_find_ns(req);
+ if (status) {
+ status = 0;
goto done;
+ }
+
+ if (nvmet_ns_revalidate(req->ns)) {
+ mutex_lock(&req->ns->subsys->lock);
+ nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
+ mutex_unlock(&req->ns->subsys->lock);
+ }
/*
* nuse = ncap = nsze isn't always true, but we have no way to find
* that out from the underlying device.
*/
- id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
- switch (req->port->ana_state[ns->anagrpid]) {
+ id->ncap = id->nsze =
+ cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
+ switch (req->port->ana_state[req->ns->anagrpid]) {
case NVME_ANA_INACCESSIBLE:
case NVME_ANA_PERSISTENT_LOSS:
break;
default:
id->nuse = id->nsze;
break;
- }
+ }
- if (ns->bdev)
- nvmet_bdev_set_limits(ns->bdev, id);
+ if (req->ns->bdev)
+ nvmet_bdev_set_limits(req->ns->bdev, id);
/*
* We just provide a single LBA format that matches what the
@@ -496,18 +546,29 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
* Our namespace might always be shared. Not just with other
* controllers, but also with any other user of the block device.
*/
- id->nmic = (1 << 0);
- id->anagrpid = cpu_to_le32(ns->anagrpid);
+ id->nmic = NVME_NS_NMIC_SHARED;
+ id->anagrpid = cpu_to_le32(req->ns->anagrpid);
- memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
+ memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
- id->lbaf[0].ds = ns->blksize_shift;
+ id->lbaf[0].ds = req->ns->blksize_shift;
- if (ns->readonly)
+ if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
+ id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
+ NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
+ NVME_NS_DPC_PI_TYPE3;
+ id->mc = NVME_MC_EXTENDED_LBA;
+ id->dps = req->ns->pi_type;
+ id->flbas = NVME_NS_FLBAS_META_EXT;
+ id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
+ }
+
+ if (req->ns->readonly)
id->nsattr |= (1 << 0);
- nvmet_put_namespace(ns);
done:
- status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+ if (!status)
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
kfree(id);
out:
nvmet_req_complete(req, status);
@@ -518,6 +579,7 @@ static void nvmet_execute_identify_nslist(struct nvmet_req *req)
static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmet_ns *ns;
+ unsigned long idx;
u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
__le32 *list;
u16 status = 0;
@@ -529,15 +591,13 @@ static void nvmet_execute_identify_nslist(struct nvmet_req *req)
goto out;
}
- rcu_read_lock();
- list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
if (ns->nsid <= min_nsid)
continue;
list[i++] = cpu_to_le32(ns->nsid);
if (i == buf_size / sizeof(__le32))
break;
}
- rcu_read_unlock();
status = nvmet_copy_to_sgl(req, 0, list, buf_size);
@@ -570,61 +630,114 @@ static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
static void nvmet_execute_identify_desclist(struct nvmet_req *req)
{
- struct nvmet_ns *ns;
- u16 status = 0;
off_t off = 0;
+ u16 status;
- ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
- if (!ns) {
- req->error_loc = offsetof(struct nvme_identify, nsid);
- status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ status = nvmet_req_find_ns(req);
+ if (status)
goto out;
- }
- if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
+ if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
NVME_NIDT_UUID_LEN,
- &ns->uuid, &off);
+ &req->ns->uuid, &off);
if (status)
- goto out_put_ns;
+ goto out;
}
- if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
+ if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
NVME_NIDT_NGUID_LEN,
- &ns->nguid, &off);
+ &req->ns->nguid, &off);
if (status)
- goto out_put_ns;
+ goto out;
}
+ status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
+ NVME_NIDT_CSI_LEN,
+ &req->ns->csi, &off);
+ if (status)
+ goto out;
+
if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
off) != NVME_IDENTIFY_DATA_SIZE - off)
status = NVME_SC_INTERNAL | NVME_SC_DNR;
-out_put_ns:
- nvmet_put_namespace(ns);
+
out:
nvmet_req_complete(req, status);
}
+static bool nvmet_handle_identify_desclist(struct nvmet_req *req)
+{
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_NVM:
+ nvmet_execute_identify_desclist(req);
+ return true;
+ case NVME_CSI_ZNS:
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ nvmet_execute_identify_desclist(req);
+ return true;
+ }
+ return false;
+ default:
+ return false;
+ }
+}
+
static void nvmet_execute_identify(struct nvmet_req *req)
{
- if (!nvmet_check_data_len(req, NVME_IDENTIFY_DATA_SIZE))
+ if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
return;
switch (req->cmd->identify.cns) {
case NVME_ID_CNS_NS:
- return nvmet_execute_identify_ns(req);
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_NVM:
+ return nvmet_execute_identify_ns(req);
+ default:
+ break;
+ }
+ break;
+ case NVME_ID_CNS_CS_NS:
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_ZNS:
+ return nvmet_execute_identify_cns_cs_ns(req);
+ default:
+ break;
+ }
+ }
+ break;
case NVME_ID_CNS_CTRL:
- return nvmet_execute_identify_ctrl(req);
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_NVM:
+ return nvmet_execute_identify_ctrl(req);
+ }
+ break;
+ case NVME_ID_CNS_CS_CTRL:
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_ZNS:
+ return nvmet_execute_identify_cns_cs_ctrl(req);
+ default:
+ break;
+ }
+ }
+ break;
case NVME_ID_CNS_NS_ACTIVE_LIST:
- return nvmet_execute_identify_nslist(req);
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_NVM:
+ return nvmet_execute_identify_nslist(req);
+ default:
+ break;
+ }
+ break;
case NVME_ID_CNS_NS_DESC_LIST:
- return nvmet_execute_identify_desclist(req);
+ if (nvmet_handle_identify_desclist(req) == true)
+ return;
+ break;
}
- pr_err("unhandled identify cns %d on qid %d\n",
- req->cmd->identify.cns, req->sq->qid);
- req->error_loc = offsetof(struct nvme_identify, cns);
- nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+ nvmet_req_cns_error_complete(req);
}
/*
@@ -636,7 +749,7 @@ static void nvmet_execute_identify(struct nvmet_req *req)
*/
static void nvmet_execute_abort(struct nvmet_req *req)
{
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
nvmet_set_result(req, 1);
nvmet_req_complete(req, 0);
@@ -659,14 +772,12 @@ static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
{
u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
- struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
- u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
+ u16 status;
- req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
- if (unlikely(!req->ns)) {
- req->error_loc = offsetof(struct nvme_common_command, nsid);
+ status = nvmet_req_find_ns(req);
+ if (status)
return status;
- }
mutex_lock(&subsys->lock);
switch (write_protect) {
@@ -694,7 +805,9 @@ u16 nvmet_set_feat_kato(struct nvmet_req *req)
{
u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
+ nvmet_stop_keep_alive_timer(req->sq->ctrl);
req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
+ nvmet_start_keep_alive_timer(req->sq->ctrl);
nvmet_set_result(req, req->sq->ctrl->kato);
@@ -716,17 +829,26 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
return 0;
}
-static void nvmet_execute_set_features(struct nvmet_req *req)
+void nvmet_execute_set_features(struct nvmet_req *req)
{
- struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
+ u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
u16 status = 0;
+ u16 nsqr;
+ u16 ncqr;
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
switch (cdw10 & 0xff) {
case NVME_FEAT_NUM_QUEUES:
+ ncqr = (cdw11 >> 16) & 0xffff;
+ nsqr = cdw11 & 0xffff;
+ if (ncqr == 0xffff || nsqr == 0xffff) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
nvmet_set_result(req,
(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
break;
@@ -753,14 +875,13 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
{
- struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
u32 result;
- req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
- if (!req->ns) {
- req->error_loc = offsetof(struct nvme_common_command, nsid);
- return NVME_SC_INVALID_NS | NVME_SC_DNR;
- }
+ result = nvmet_req_find_ns(req);
+ if (result)
+ return result;
+
mutex_lock(&subsys->lock);
if (req->ns->readonly == true)
result = NVME_NS_WRITE_PROTECT;
@@ -782,13 +903,13 @@ void nvmet_get_feat_async_event(struct nvmet_req *req)
nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
}
-static void nvmet_execute_get_features(struct nvmet_req *req)
+void nvmet_execute_get_features(struct nvmet_req *req)
{
- struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 status = 0;
- if (!nvmet_check_data_len(req, nvmet_feat_data_len(req, cdw10)))
+ if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
return;
switch (cdw10 & 0xff) {
@@ -855,7 +976,7 @@ void nvmet_execute_async_event(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
mutex_lock(&ctrl->lock);
@@ -867,21 +988,27 @@ void nvmet_execute_async_event(struct nvmet_req *req)
ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
mutex_unlock(&ctrl->lock);
- schedule_work(&ctrl->async_event_work);
+ queue_work(nvmet_wq, &ctrl->async_event_work);
}
void nvmet_execute_keep_alive(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u16 status = 0;
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
+ if (!ctrl->kato) {
+ status = NVME_SC_KA_TIMEOUT_INVALID;
+ goto out;
+ }
+
pr_debug("ctrl %d update keep-alive timer for %d secs\n",
ctrl->cntlid, ctrl->kato);
-
mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
- nvmet_req_complete(req, 0);
+out:
+ nvmet_req_complete(req, status);
}
u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
@@ -890,14 +1017,19 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
u16 ret;
if (nvme_is_fabrics(cmd))
- return nvmet_parse_fabrics_cmd(req);
- if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
+ return nvmet_parse_fabrics_admin_cmd(req);
+ if (unlikely(!nvmet_check_auth_status(req)))
+ return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
+ if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
return nvmet_parse_discovery_cmd(req);
- ret = nvmet_check_ctrl_status(req, cmd);
+ ret = nvmet_check_ctrl_status(req);
if (unlikely(ret))
return ret;
+ if (nvmet_is_passthru_req(req))
+ return nvmet_parse_passthru_admin_cmd(req);
+
switch (cmd->common.opcode) {
case nvme_admin_get_log_page:
req->execute = nvmet_execute_get_log_page;
@@ -920,10 +1052,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
case nvme_admin_keep_alive:
req->execute = nvmet_execute_keep_alive;
return 0;
+ default:
+ return nvmet_report_invalid_opcode(req);
}
-
- pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
- req->sq->qid);
- req->error_loc = offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
new file mode 100644
index 000000000000..c4113b43dbfe
--- /dev/null
+++ b/drivers/nvme/target/auth.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics DH-HMAC-CHAP authentication.
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions.
+ * All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <crypto/hash.h>
+#include <linux/crc32.h>
+#include <linux/base64.h>
+#include <linux/ctype.h>
+#include <linux/random.h>
+#include <linux/nvme-auth.h>
+#include <asm/unaligned.h>
+
+#include "nvmet.h"
+
+int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
+ bool set_ctrl)
+{
+ unsigned char key_hash;
+ char *dhchap_secret;
+
+ if (sscanf(secret, "DHHC-1:%hhd:%*s", &key_hash) != 1)
+ return -EINVAL;
+ if (key_hash > 3) {
+ pr_warn("Invalid DH-HMAC-CHAP hash id %d\n",
+ key_hash);
+ return -EINVAL;
+ }
+ if (key_hash > 0) {
+ /* Validate selected hash algorithm */
+ const char *hmac = nvme_auth_hmac_name(key_hash);
+
+ if (!crypto_has_shash(hmac, 0, 0)) {
+ pr_err("DH-HMAC-CHAP hash %s unsupported\n", hmac);
+ return -ENOTSUPP;
+ }
+ }
+ dhchap_secret = kstrdup(secret, GFP_KERNEL);
+ if (!dhchap_secret)
+ return -ENOMEM;
+ if (set_ctrl) {
+ host->dhchap_ctrl_secret = strim(dhchap_secret);
+ host->dhchap_ctrl_key_hash = key_hash;
+ } else {
+ host->dhchap_secret = strim(dhchap_secret);
+ host->dhchap_key_hash = key_hash;
+ }
+ return 0;
+}
+
+int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id)
+{
+ const char *dhgroup_kpp;
+ int ret = 0;
+
+ pr_debug("%s: ctrl %d selecting dhgroup %d\n",
+ __func__, ctrl->cntlid, dhgroup_id);
+
+ if (ctrl->dh_tfm) {
+ if (ctrl->dh_gid == dhgroup_id) {
+ pr_debug("%s: ctrl %d reuse existing DH group %d\n",
+ __func__, ctrl->cntlid, dhgroup_id);
+ return 0;
+ }
+ crypto_free_kpp(ctrl->dh_tfm);
+ ctrl->dh_tfm = NULL;
+ ctrl->dh_gid = 0;
+ }
+
+ if (dhgroup_id == NVME_AUTH_DHGROUP_NULL)
+ return 0;
+
+ dhgroup_kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
+ if (!dhgroup_kpp) {
+ pr_debug("%s: ctrl %d invalid DH group %d\n",
+ __func__, ctrl->cntlid, dhgroup_id);
+ return -EINVAL;
+ }
+ ctrl->dh_tfm = crypto_alloc_kpp(dhgroup_kpp, 0, 0);
+ if (IS_ERR(ctrl->dh_tfm)) {
+ pr_debug("%s: ctrl %d failed to setup DH group %d, err %ld\n",
+ __func__, ctrl->cntlid, dhgroup_id,
+ PTR_ERR(ctrl->dh_tfm));
+ ret = PTR_ERR(ctrl->dh_tfm);
+ ctrl->dh_tfm = NULL;
+ ctrl->dh_gid = 0;
+ } else {
+ ctrl->dh_gid = dhgroup_id;
+ pr_debug("%s: ctrl %d setup DH group %d\n",
+ __func__, ctrl->cntlid, ctrl->dh_gid);
+ ret = nvme_auth_gen_privkey(ctrl->dh_tfm, ctrl->dh_gid);
+ if (ret < 0) {
+ pr_debug("%s: ctrl %d failed to generate private key, err %d\n",
+ __func__, ctrl->cntlid, ret);
+ kfree_sensitive(ctrl->dh_key);
+ return ret;
+ }
+ ctrl->dh_keysize = crypto_kpp_maxsize(ctrl->dh_tfm);
+ kfree_sensitive(ctrl->dh_key);
+ ctrl->dh_key = kzalloc(ctrl->dh_keysize, GFP_KERNEL);
+ if (!ctrl->dh_key) {
+ pr_warn("ctrl %d failed to allocate public key\n",
+ ctrl->cntlid);
+ return -ENOMEM;
+ }
+ ret = nvme_auth_gen_pubkey(ctrl->dh_tfm, ctrl->dh_key,
+ ctrl->dh_keysize);
+ if (ret < 0) {
+ pr_warn("ctrl %d failed to generate public key\n",
+ ctrl->cntlid);
+ kfree(ctrl->dh_key);
+ ctrl->dh_key = NULL;
+ }
+ }
+
+ return ret;
+}
+
+int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
+{
+ int ret = 0;
+ struct nvmet_host_link *p;
+ struct nvmet_host *host = NULL;
+ const char *hash_name;
+
+ down_read(&nvmet_config_sem);
+ if (nvmet_is_disc_subsys(ctrl->subsys))
+ goto out_unlock;
+
+ if (ctrl->subsys->allow_any_host)
+ goto out_unlock;
+
+ list_for_each_entry(p, &ctrl->subsys->hosts, entry) {
+ pr_debug("check %s\n", nvmet_host_name(p->host));
+ if (strcmp(nvmet_host_name(p->host), ctrl->hostnqn))
+ continue;
+ host = p->host;
+ break;
+ }
+ if (!host) {
+ pr_debug("host %s not found\n", ctrl->hostnqn);
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
+ ret = nvmet_setup_dhgroup(ctrl, host->dhchap_dhgroup_id);
+ if (ret < 0)
+ pr_warn("Failed to setup DH group");
+
+ if (!host->dhchap_secret) {
+ pr_debug("No authentication provided\n");
+ goto out_unlock;
+ }
+
+ if (host->dhchap_hash_id == ctrl->shash_id) {
+ pr_debug("Re-use existing hash ID %d\n",
+ ctrl->shash_id);
+ } else {
+ hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
+ if (!hash_name) {
+ pr_warn("Hash ID %d invalid\n", host->dhchap_hash_id);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ ctrl->shash_id = host->dhchap_hash_id;
+ }
+
+ /* Skip the 'DHHC-1:XX:' prefix */
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = nvme_auth_extract_key(host->dhchap_secret + 10,
+ host->dhchap_key_hash);
+ if (IS_ERR(ctrl->host_key)) {
+ ret = PTR_ERR(ctrl->host_key);
+ ctrl->host_key = NULL;
+ goto out_free_hash;
+ }
+ pr_debug("%s: using hash %s key %*ph\n", __func__,
+ ctrl->host_key->hash > 0 ?
+ nvme_auth_hmac_name(ctrl->host_key->hash) : "none",
+ (int)ctrl->host_key->len, ctrl->host_key->key);
+
+ nvme_auth_free_key(ctrl->ctrl_key);
+ if (!host->dhchap_ctrl_secret) {
+ ctrl->ctrl_key = NULL;
+ goto out_unlock;
+ }
+
+ ctrl->ctrl_key = nvme_auth_extract_key(host->dhchap_ctrl_secret + 10,
+ host->dhchap_ctrl_key_hash);
+ if (IS_ERR(ctrl->ctrl_key)) {
+ ret = PTR_ERR(ctrl->ctrl_key);
+ ctrl->ctrl_key = NULL;
+ goto out_free_hash;
+ }
+ pr_debug("%s: using ctrl hash %s key %*ph\n", __func__,
+ ctrl->ctrl_key->hash > 0 ?
+ nvme_auth_hmac_name(ctrl->ctrl_key->hash) : "none",
+ (int)ctrl->ctrl_key->len, ctrl->ctrl_key->key);
+
+out_free_hash:
+ if (ret) {
+ if (ctrl->host_key) {
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = NULL;
+ }
+ ctrl->shash_id = 0;
+ }
+out_unlock:
+ up_read(&nvmet_config_sem);
+
+ return ret;
+}
+
+void nvmet_auth_sq_free(struct nvmet_sq *sq)
+{
+ cancel_delayed_work(&sq->auth_expired_work);
+ kfree(sq->dhchap_c1);
+ sq->dhchap_c1 = NULL;
+ kfree(sq->dhchap_c2);
+ sq->dhchap_c2 = NULL;
+ kfree(sq->dhchap_skey);
+ sq->dhchap_skey = NULL;
+}
+
+void nvmet_destroy_auth(struct nvmet_ctrl *ctrl)
+{
+ ctrl->shash_id = 0;
+
+ if (ctrl->dh_tfm) {
+ crypto_free_kpp(ctrl->dh_tfm);
+ ctrl->dh_tfm = NULL;
+ ctrl->dh_gid = 0;
+ }
+ kfree_sensitive(ctrl->dh_key);
+ ctrl->dh_key = NULL;
+
+ if (ctrl->host_key) {
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = NULL;
+ }
+ if (ctrl->ctrl_key) {
+ nvme_auth_free_key(ctrl->ctrl_key);
+ ctrl->ctrl_key = NULL;
+ }
+}
+
+bool nvmet_check_auth_status(struct nvmet_req *req)
+{
+ if (req->sq->ctrl->host_key &&
+ !req->sq->authenticated)
+ return false;
+ return true;
+}
+
+int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+ unsigned int shash_len)
+{
+ struct crypto_shash *shash_tfm;
+ struct shash_desc *shash;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ const char *hash_name;
+ u8 *challenge = req->sq->dhchap_c1, *host_response;
+ u8 buf[4];
+ int ret;
+
+ hash_name = nvme_auth_hmac_name(ctrl->shash_id);
+ if (!hash_name) {
+ pr_warn("Hash ID %d invalid\n", ctrl->shash_id);
+ return -EINVAL;
+ }
+
+ shash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(shash_tfm)) {
+ pr_err("failed to allocate shash %s\n", hash_name);
+ return PTR_ERR(shash_tfm);
+ }
+
+ if (shash_len != crypto_shash_digestsize(shash_tfm)) {
+ pr_debug("%s: hash len mismatch (len %d digest %d)\n",
+ __func__, shash_len,
+ crypto_shash_digestsize(shash_tfm));
+ ret = -EINVAL;
+ goto out_free_tfm;
+ }
+
+ host_response = nvme_auth_transform_key(ctrl->host_key, ctrl->hostnqn);
+ if (IS_ERR(host_response)) {
+ ret = PTR_ERR(host_response);
+ goto out_free_tfm;
+ }
+
+ ret = crypto_shash_setkey(shash_tfm, host_response,
+ ctrl->host_key->len);
+ if (ret)
+ goto out_free_response;
+
+ if (ctrl->dh_gid != NVME_AUTH_DHGROUP_NULL) {
+ challenge = kmalloc(shash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ ret = nvme_auth_augmented_challenge(ctrl->shash_id,
+ req->sq->dhchap_skey,
+ req->sq->dhchap_skey_len,
+ req->sq->dhchap_c1,
+ challenge, shash_len);
+ if (ret)
+ goto out_free_response;
+ }
+
+ pr_debug("ctrl %d qid %d host response seq %u transaction %d\n",
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
+ req->sq->dhchap_tid);
+
+ shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm),
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ shash->tfm = shash_tfm;
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, shash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(req->sq->dhchap_s1, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(req->sq->dhchap_tid, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, 4);
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "HostHost", 8);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->subsysnqn,
+ strlen(ctrl->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, response);
+out:
+ if (challenge != req->sq->dhchap_c1)
+ kfree(challenge);
+ kfree(shash);
+out_free_response:
+ kfree_sensitive(host_response);
+out_free_tfm:
+ crypto_free_shash(shash_tfm);
+ return 0;
+}
+
+int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
+ unsigned int shash_len)
+{
+ struct crypto_shash *shash_tfm;
+ struct shash_desc *shash;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ const char *hash_name;
+ u8 *challenge = req->sq->dhchap_c2, *ctrl_response;
+ u8 buf[4];
+ int ret;
+
+ hash_name = nvme_auth_hmac_name(ctrl->shash_id);
+ if (!hash_name) {
+ pr_warn("Hash ID %d invalid\n", ctrl->shash_id);
+ return -EINVAL;
+ }
+
+ shash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(shash_tfm)) {
+ pr_err("failed to allocate shash %s\n", hash_name);
+ return PTR_ERR(shash_tfm);
+ }
+
+ if (shash_len != crypto_shash_digestsize(shash_tfm)) {
+ pr_debug("%s: hash len mismatch (len %d digest %d)\n",
+ __func__, shash_len,
+ crypto_shash_digestsize(shash_tfm));
+ ret = -EINVAL;
+ goto out_free_tfm;
+ }
+
+ ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key,
+ ctrl->subsysnqn);
+ if (IS_ERR(ctrl_response)) {
+ ret = PTR_ERR(ctrl_response);
+ goto out_free_tfm;
+ }
+
+ ret = crypto_shash_setkey(shash_tfm, ctrl_response,
+ ctrl->ctrl_key->len);
+ if (ret)
+ goto out_free_response;
+
+ if (ctrl->dh_gid != NVME_AUTH_DHGROUP_NULL) {
+ challenge = kmalloc(shash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ ret = nvme_auth_augmented_challenge(ctrl->shash_id,
+ req->sq->dhchap_skey,
+ req->sq->dhchap_skey_len,
+ req->sq->dhchap_c2,
+ challenge, shash_len);
+ if (ret)
+ goto out_free_response;
+ }
+
+ shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm),
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ shash->tfm = shash_tfm;
+
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, shash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(req->sq->dhchap_s2, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(req->sq->dhchap_tid, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, 4);
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "Controller", 10);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->subsysnqn,
+ strlen(ctrl->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, response);
+out:
+ if (challenge != req->sq->dhchap_c2)
+ kfree(challenge);
+ kfree(shash);
+out_free_response:
+ kfree_sensitive(ctrl_response);
+out_free_tfm:
+ crypto_free_shash(shash_tfm);
+ return 0;
+}
+
+int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
+ u8 *buf, int buf_size)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int ret = 0;
+
+ if (!ctrl->dh_key) {
+ pr_warn("ctrl %d no DH public key!\n", ctrl->cntlid);
+ return -ENOKEY;
+ }
+ if (buf_size != ctrl->dh_keysize) {
+ pr_warn("ctrl %d DH public key size mismatch, need %zu is %d\n",
+ ctrl->cntlid, ctrl->dh_keysize, buf_size);
+ ret = -EINVAL;
+ } else {
+ memcpy(buf, ctrl->dh_key, buf_size);
+ pr_debug("%s: ctrl %d public key %*ph\n", __func__,
+ ctrl->cntlid, (int)buf_size, buf);
+ }
+
+ return ret;
+}
+
+int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
+ u8 *pkey, int pkey_size)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int ret;
+
+ req->sq->dhchap_skey_len = ctrl->dh_keysize;
+ req->sq->dhchap_skey = kzalloc(req->sq->dhchap_skey_len, GFP_KERNEL);
+ if (!req->sq->dhchap_skey)
+ return -ENOMEM;
+ ret = nvme_auth_gen_shared_secret(ctrl->dh_tfm,
+ pkey, pkey_size,
+ req->sq->dhchap_skey,
+ req->sq->dhchap_skey_len);
+ if (ret)
+ pr_debug("failed to compute shared secret, err %d\n", ret);
+ else
+ pr_debug("%s: shared secret %*ph\n", __func__,
+ (int)req->sq->dhchap_skey_len,
+ req->sq->dhchap_skey);
+
+ return ret;
+}
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 98613a45bd3b..6a2816f3b4e8 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -11,6 +11,11 @@
#include <linux/ctype.h>
#include <linux/pci.h>
#include <linux/pci-p2pdma.h>
+#ifdef CONFIG_NVME_TARGET_AUTH
+#include <linux/nvme-auth.h>
+#endif
+#include <crypto/hash.h>
+#include <crypto/kpp.h>
#include "nvmet.h"
@@ -20,61 +25,72 @@ static const struct config_item_type nvmet_subsys_type;
static LIST_HEAD(nvmet_ports_list);
struct list_head *nvmet_ports = &nvmet_ports_list;
-static const struct nvmet_transport_name {
+struct nvmet_type_name_map {
u8 type;
const char *name;
-} nvmet_transport_names[] = {
+};
+
+static struct nvmet_type_name_map nvmet_transport[] = {
{ NVMF_TRTYPE_RDMA, "rdma" },
{ NVMF_TRTYPE_FC, "fc" },
{ NVMF_TRTYPE_TCP, "tcp" },
{ NVMF_TRTYPE_LOOP, "loop" },
};
+static const struct nvmet_type_name_map nvmet_addr_family[] = {
+ { NVMF_ADDR_FAMILY_PCI, "pcie" },
+ { NVMF_ADDR_FAMILY_IP4, "ipv4" },
+ { NVMF_ADDR_FAMILY_IP6, "ipv6" },
+ { NVMF_ADDR_FAMILY_IB, "ib" },
+ { NVMF_ADDR_FAMILY_FC, "fc" },
+ { NVMF_ADDR_FAMILY_LOOP, "loop" },
+};
+
+static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
+{
+ if (p->enabled)
+ pr_err("Disable port '%u' before changing attribute in %s\n",
+ le16_to_cpu(p->disc_addr.portid), caller);
+ return p->enabled;
+}
+
/*
* nvmet_port Generic ConfigFS definitions.
* Used in any place in the ConfigFS tree that refers to an address.
*/
-static ssize_t nvmet_addr_adrfam_show(struct config_item *item,
- char *page)
+static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
{
- switch (to_nvmet_port(item)->disc_addr.adrfam) {
- case NVMF_ADDR_FAMILY_IP4:
- return sprintf(page, "ipv4\n");
- case NVMF_ADDR_FAMILY_IP6:
- return sprintf(page, "ipv6\n");
- case NVMF_ADDR_FAMILY_IB:
- return sprintf(page, "ib\n");
- case NVMF_ADDR_FAMILY_FC:
- return sprintf(page, "fc\n");
- default:
- return sprintf(page, "\n");
+ u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam;
+ int i;
+
+ for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
+ if (nvmet_addr_family[i].type == adrfam)
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ nvmet_addr_family[i].name);
}
+
+ return snprintf(page, PAGE_SIZE, "\n");
}
static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_port *port = to_nvmet_port(item);
+ int i;
- if (port->enabled) {
- pr_err("Cannot modify address while enabled\n");
- pr_err("Disable the address before modifying\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- }
- if (sysfs_streq(page, "ipv4")) {
- port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP4;
- } else if (sysfs_streq(page, "ipv6")) {
- port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP6;
- } else if (sysfs_streq(page, "ib")) {
- port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IB;
- } else if (sysfs_streq(page, "fc")) {
- port->disc_addr.adrfam = NVMF_ADDR_FAMILY_FC;
- } else {
- pr_err("Invalid value '%s' for adrfam\n", page);
- return -EINVAL;
+ for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
+ if (sysfs_streq(page, nvmet_addr_family[i].name))
+ goto found;
}
+ pr_err("Invalid value '%s' for adrfam\n", page);
+ return -EINVAL;
+
+found:
+ port->disc_addr.adrfam = nvmet_addr_family[i].type;
return count;
}
@@ -83,10 +99,9 @@ CONFIGFS_ATTR(nvmet_, addr_adrfam);
static ssize_t nvmet_addr_portid_show(struct config_item *item,
char *page)
{
- struct nvmet_port *port = to_nvmet_port(item);
+ __le16 portid = to_nvmet_port(item)->disc_addr.portid;
- return snprintf(page, PAGE_SIZE, "%d\n",
- le16_to_cpu(port->disc_addr.portid));
+ return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid));
}
static ssize_t nvmet_addr_portid_store(struct config_item *item,
@@ -100,11 +115,9 @@ static ssize_t nvmet_addr_portid_store(struct config_item *item,
return -EINVAL;
}
- if (port->enabled) {
- pr_err("Cannot modify address while enabled\n");
- pr_err("Disable the address before modifying\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- }
+
port->disc_addr.portid = cpu_to_le16(portid);
return count;
}
@@ -116,8 +129,7 @@ static ssize_t nvmet_addr_traddr_show(struct config_item *item,
{
struct nvmet_port *port = to_nvmet_port(item);
- return snprintf(page, PAGE_SIZE, "%s\n",
- port->disc_addr.traddr);
+ return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr);
}
static ssize_t nvmet_addr_traddr_store(struct config_item *item,
@@ -130,11 +142,8 @@ static ssize_t nvmet_addr_traddr_store(struct config_item *item,
return -EINVAL;
}
- if (port->enabled) {
- pr_err("Cannot modify address while enabled\n");
- pr_err("Disable the address before modifying\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- }
if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
return -EINVAL;
@@ -143,20 +152,25 @@ static ssize_t nvmet_addr_traddr_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_, addr_traddr);
-static ssize_t nvmet_addr_treq_show(struct config_item *item,
- char *page)
+static const struct nvmet_type_name_map nvmet_addr_treq[] = {
+ { NVMF_TREQ_NOT_SPECIFIED, "not specified" },
+ { NVMF_TREQ_REQUIRED, "required" },
+ { NVMF_TREQ_NOT_REQUIRED, "not required" },
+};
+
+static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
{
- switch (to_nvmet_port(item)->disc_addr.treq &
- NVME_TREQ_SECURE_CHANNEL_MASK) {
- case NVMF_TREQ_NOT_SPECIFIED:
- return sprintf(page, "not specified\n");
- case NVMF_TREQ_REQUIRED:
- return sprintf(page, "required\n");
- case NVMF_TREQ_NOT_REQUIRED:
- return sprintf(page, "not required\n");
- default:
- return sprintf(page, "\n");
+ u8 treq = to_nvmet_port(item)->disc_addr.treq &
+ NVME_TREQ_SECURE_CHANNEL_MASK;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
+ if (treq == nvmet_addr_treq[i].type)
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ nvmet_addr_treq[i].name);
}
+
+ return snprintf(page, PAGE_SIZE, "\n");
}
static ssize_t nvmet_addr_treq_store(struct config_item *item,
@@ -164,25 +178,22 @@ static ssize_t nvmet_addr_treq_store(struct config_item *item,
{
struct nvmet_port *port = to_nvmet_port(item);
u8 treq = port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK;
+ int i;
- if (port->enabled) {
- pr_err("Cannot modify address while enabled\n");
- pr_err("Disable the address before modifying\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- }
- if (sysfs_streq(page, "not specified")) {
- treq |= NVMF_TREQ_NOT_SPECIFIED;
- } else if (sysfs_streq(page, "required")) {
- treq |= NVMF_TREQ_REQUIRED;
- } else if (sysfs_streq(page, "not required")) {
- treq |= NVMF_TREQ_NOT_REQUIRED;
- } else {
- pr_err("Invalid value '%s' for treq\n", page);
- return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
+ if (sysfs_streq(page, nvmet_addr_treq[i].name))
+ goto found;
}
- port->disc_addr.treq = treq;
+ pr_err("Invalid value '%s' for treq\n", page);
+ return -EINVAL;
+
+found:
+ treq |= nvmet_addr_treq[i].type;
+ port->disc_addr.treq = treq;
return count;
}
@@ -193,8 +204,7 @@ static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
{
struct nvmet_port *port = to_nvmet_port(item);
- return snprintf(page, PAGE_SIZE, "%s\n",
- port->disc_addr.trsvcid);
+ return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid);
}
static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
@@ -206,11 +216,8 @@ static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
pr_err("Invalid value '%s' for trsvcid\n", page);
return -EINVAL;
}
- if (port->enabled) {
- pr_err("Cannot modify address while enabled\n");
- pr_err("Disable the address before modifying\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- }
if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
return -EINVAL;
@@ -233,11 +240,8 @@ static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
struct nvmet_port *port = to_nvmet_port(item);
int ret;
- if (port->enabled) {
- pr_err("Cannot modify inline_data_size while port enabled\n");
- pr_err("Disable the port before modifying\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- }
ret = kstrtoint(page, 0, &port->inline_data_size);
if (ret) {
pr_err("Invalid value '%s' for inline_data_size\n", page);
@@ -248,16 +252,44 @@ static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_, param_inline_data_size);
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable);
+}
+
+static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ bool val;
+
+ if (strtobool(page, &val))
+ return -EINVAL;
+
+ if (nvmet_is_port_enabled(port, __func__))
+ return -EACCES;
+
+ port->pi_enable = val;
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, param_pi_enable);
+#endif
+
static ssize_t nvmet_addr_trtype_show(struct config_item *item,
char *page)
{
struct nvmet_port *port = to_nvmet_port(item);
int i;
- for (i = 0; i < ARRAY_SIZE(nvmet_transport_names); i++) {
- if (port->disc_addr.trtype != nvmet_transport_names[i].type)
- continue;
- return sprintf(page, "%s\n", nvmet_transport_names[i].name);
+ for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
+ if (port->disc_addr.trtype == nvmet_transport[i].type)
+ return snprintf(page, PAGE_SIZE,
+ "%s\n", nvmet_transport[i].name);
}
return sprintf(page, "\n");
@@ -276,22 +308,20 @@ static ssize_t nvmet_addr_trtype_store(struct config_item *item,
struct nvmet_port *port = to_nvmet_port(item);
int i;
- if (port->enabled) {
- pr_err("Cannot modify address while enabled\n");
- pr_err("Disable the address before modifying\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- }
- for (i = 0; i < ARRAY_SIZE(nvmet_transport_names); i++) {
- if (sysfs_streq(page, nvmet_transport_names[i].name))
+ for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
+ if (sysfs_streq(page, nvmet_transport[i].name))
goto found;
}
pr_err("Invalid value '%s' for trtype\n", page);
return -EINVAL;
+
found:
memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
- port->disc_addr.trtype = nvmet_transport_names[i].type;
+ port->disc_addr.trtype = nvmet_transport[i].type;
if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
nvmet_port_init_tsas_rdma(port);
return count;
@@ -327,7 +357,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
kfree(ns->device_path);
ret = -ENOMEM;
- ns->device_path = kstrndup(page, len, GFP_KERNEL);
+ ns->device_path = kmemdup_nul(page, len, GFP_KERNEL);
if (!ns->device_path)
goto out_unlock;
@@ -395,14 +425,12 @@ static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
struct nvmet_subsys *subsys = ns->subsys;
int ret = 0;
-
mutex_lock(&subsys->lock);
if (ns->enabled) {
ret = -EBUSY;
goto out_unlock;
}
-
if (uuid_parse(page, &ns->uuid))
ret = -EINVAL;
@@ -545,6 +573,32 @@ static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_ns_, buffered_io);
+static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ bool val;
+
+ if (strtobool(page, &val))
+ return -EINVAL;
+
+ if (!val)
+ return -EINVAL;
+
+ mutex_lock(&ns->subsys->lock);
+ if (!ns->enabled) {
+ pr_err("enable ns before revalidate.\n");
+ mutex_unlock(&ns->subsys->lock);
+ return -EINVAL;
+ }
+ if (nvmet_ns_revalidate(ns))
+ nvmet_ns_changed(ns->subsys, ns->nsid);
+ mutex_unlock(&ns->subsys->lock);
+ return count;
+}
+
+CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
+
static struct configfs_attribute *nvmet_ns_attrs[] = {
&nvmet_ns_attr_device_path,
&nvmet_ns_attr_device_nguid,
@@ -552,6 +606,7 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
&nvmet_ns_attr_ana_grpid,
&nvmet_ns_attr_enable,
&nvmet_ns_attr_buffered_io,
+ &nvmet_ns_attr_revalidate_size,
#ifdef CONFIG_PCI_P2PDMA
&nvmet_ns_attr_p2pmem,
#endif
@@ -615,6 +670,163 @@ static const struct config_item_type nvmet_namespaces_type = {
.ct_owner = THIS_MODULE,
};
+#ifdef CONFIG_NVME_TARGET_PASSTHRU
+
+static ssize_t nvmet_passthru_device_path_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+
+ return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path);
+}
+
+static ssize_t nvmet_passthru_device_path_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+ size_t len;
+ int ret;
+
+ mutex_lock(&subsys->lock);
+
+ ret = -EBUSY;
+ if (subsys->passthru_ctrl)
+ goto out_unlock;
+
+ ret = -EINVAL;
+ len = strcspn(page, "\n");
+ if (!len)
+ goto out_unlock;
+
+ kfree(subsys->passthru_ctrl_path);
+ ret = -ENOMEM;
+ subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL);
+ if (!subsys->passthru_ctrl_path)
+ goto out_unlock;
+
+ mutex_unlock(&subsys->lock);
+
+ return count;
+out_unlock:
+ mutex_unlock(&subsys->lock);
+ return ret;
+}
+CONFIGFS_ATTR(nvmet_passthru_, device_path);
+
+static ssize_t nvmet_passthru_enable_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+
+ return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0);
+}
+
+static ssize_t nvmet_passthru_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+ bool enable;
+ int ret = 0;
+
+ if (strtobool(page, &enable))
+ return -EINVAL;
+
+ if (enable)
+ ret = nvmet_passthru_ctrl_enable(subsys);
+ else
+ nvmet_passthru_ctrl_disable(subsys);
+
+ return ret ? ret : count;
+}
+CONFIGFS_ATTR(nvmet_passthru_, enable);
+
+static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout);
+}
+
+static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+ unsigned int timeout;
+
+ if (kstrtouint(page, 0, &timeout))
+ return -EINVAL;
+ subsys->admin_timeout = timeout;
+ return count;
+}
+CONFIGFS_ATTR(nvmet_passthru_, admin_timeout);
+
+static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%u\n", to_subsys(item->ci_parent)->io_timeout);
+}
+
+static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+ unsigned int timeout;
+
+ if (kstrtouint(page, 0, &timeout))
+ return -EINVAL;
+ subsys->io_timeout = timeout;
+ return count;
+}
+CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
+
+static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
+}
+
+static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+ unsigned int clear_ids;
+
+ if (kstrtouint(page, 0, &clear_ids))
+ return -EINVAL;
+ subsys->clear_ids = clear_ids;
+ return count;
+}
+CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
+
+static struct configfs_attribute *nvmet_passthru_attrs[] = {
+ &nvmet_passthru_attr_device_path,
+ &nvmet_passthru_attr_enable,
+ &nvmet_passthru_attr_admin_timeout,
+ &nvmet_passthru_attr_io_timeout,
+ &nvmet_passthru_attr_clear_ids,
+ NULL,
+};
+
+static const struct config_item_type nvmet_passthru_type = {
+ .ct_attrs = nvmet_passthru_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
+{
+ config_group_init_type_name(&subsys->passthru_group,
+ "passthru", &nvmet_passthru_type);
+ configfs_add_default_group(&subsys->passthru_group,
+ &subsys->group);
+}
+
+#else /* CONFIG_NVME_TARGET_PASSTHRU */
+
+static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
+{
+}
+
+#endif /* CONFIG_NVME_TARGET_PASSTHRU */
+
static int nvmet_port_subsys_allow_link(struct config_item *parent,
struct config_item *target)
{
@@ -811,61 +1023,302 @@ static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
struct nvmet_subsys *subsys = to_subsys(item);
if (NVME_TERTIARY(subsys->ver))
- return snprintf(page, PAGE_SIZE, "%d.%d.%d\n",
- (int)NVME_MAJOR(subsys->ver),
- (int)NVME_MINOR(subsys->ver),
- (int)NVME_TERTIARY(subsys->ver));
- else
- return snprintf(page, PAGE_SIZE, "%d.%d\n",
- (int)NVME_MAJOR(subsys->ver),
- (int)NVME_MINOR(subsys->ver));
+ return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n",
+ NVME_MAJOR(subsys->ver),
+ NVME_MINOR(subsys->ver),
+ NVME_TERTIARY(subsys->ver));
+
+ return snprintf(page, PAGE_SIZE, "%llu.%llu\n",
+ NVME_MAJOR(subsys->ver),
+ NVME_MINOR(subsys->ver));
}
-static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
- const char *page, size_t count)
+static ssize_t
+nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
+ const char *page, size_t count)
{
- struct nvmet_subsys *subsys = to_subsys(item);
int major, minor, tertiary = 0;
int ret;
+ if (subsys->subsys_discovered) {
+ if (NVME_TERTIARY(subsys->ver))
+ pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n",
+ NVME_MAJOR(subsys->ver),
+ NVME_MINOR(subsys->ver),
+ NVME_TERTIARY(subsys->ver));
+ else
+ pr_err("Can't set version number. %llu.%llu is already assigned\n",
+ NVME_MAJOR(subsys->ver),
+ NVME_MINOR(subsys->ver));
+ return -EINVAL;
+ }
+
+ /* passthru subsystems use the underlying controller's version */
+ if (nvmet_is_passthru_subsys(subsys))
+ return -EINVAL;
ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
if (ret != 2 && ret != 3)
return -EINVAL;
- down_write(&nvmet_config_sem);
subsys->ver = NVME_VS(major, minor, tertiary);
- up_write(&nvmet_config_sem);
return count;
}
+
+static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ ssize_t ret;
+
+ down_write(&nvmet_config_sem);
+ mutex_lock(&subsys->lock);
+ ret = nvmet_subsys_attr_version_store_locked(subsys, page, count);
+ mutex_unlock(&subsys->lock);
+ up_write(&nvmet_config_sem);
+
+ return ret;
+}
CONFIGFS_ATTR(nvmet_subsys_, attr_version);
+/* See Section 1.5 of NVMe 1.4 */
+static bool nvmet_is_ascii(const char c)
+{
+ return c >= 0x20 && c <= 0x7e;
+}
+
static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
char *page)
{
struct nvmet_subsys *subsys = to_subsys(item);
- return snprintf(page, PAGE_SIZE, "%llx\n", subsys->serial);
+ return snprintf(page, PAGE_SIZE, "%.*s\n",
+ NVMET_SN_MAX_SIZE, subsys->serial);
+}
+
+static ssize_t
+nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys,
+ const char *page, size_t count)
+{
+ int pos, len = strcspn(page, "\n");
+
+ if (subsys->subsys_discovered) {
+ pr_err("Can't set serial number. %s is already assigned\n",
+ subsys->serial);
+ return -EINVAL;
+ }
+
+ if (!len || len > NVMET_SN_MAX_SIZE) {
+ pr_err("Serial Number can not be empty or exceed %d Bytes\n",
+ NVMET_SN_MAX_SIZE);
+ return -EINVAL;
+ }
+
+ for (pos = 0; pos < len; pos++) {
+ if (!nvmet_is_ascii(page[pos])) {
+ pr_err("Serial Number must contain only ASCII strings\n");
+ return -EINVAL;
+ }
+ }
+
+ memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' ');
+
+ return count;
}
static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_subsys *subsys = to_subsys(item);
+ ssize_t ret;
down_write(&nvmet_config_sem);
- sscanf(page, "%llx\n", &subsys->serial);
+ mutex_lock(&subsys->lock);
+ ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count);
+ mutex_unlock(&subsys->lock);
up_write(&nvmet_config_sem);
- return count;
+ return ret;
}
CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
+static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min);
+}
+
+static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
+ const char *page, size_t cnt)
+{
+ u16 cntlid_min;
+
+ if (sscanf(page, "%hu\n", &cntlid_min) != 1)
+ return -EINVAL;
+
+ if (cntlid_min == 0)
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ if (cntlid_min >= to_subsys(item)->cntlid_max)
+ goto out_unlock;
+ to_subsys(item)->cntlid_min = cntlid_min;
+ up_write(&nvmet_config_sem);
+ return cnt;
+
+out_unlock:
+ up_write(&nvmet_config_sem);
+ return -EINVAL;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min);
+
+static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max);
+}
+
+static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
+ const char *page, size_t cnt)
+{
+ u16 cntlid_max;
+
+ if (sscanf(page, "%hu\n", &cntlid_max) != 1)
+ return -EINVAL;
+
+ if (cntlid_max == 0)
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ if (cntlid_max <= to_subsys(item)->cntlid_min)
+ goto out_unlock;
+ to_subsys(item)->cntlid_max = cntlid_max;
+ up_write(&nvmet_config_sem);
+ return cnt;
+
+out_unlock:
+ up_write(&nvmet_config_sem);
+ return -EINVAL;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
+
+static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+
+ return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number);
+}
+
+static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
+ const char *page, size_t count)
+{
+ int pos = 0, len;
+ char *val;
+
+ if (subsys->subsys_discovered) {
+ pr_err("Can't set model number. %s is already assigned\n",
+ subsys->model_number);
+ return -EINVAL;
+ }
+
+ len = strcspn(page, "\n");
+ if (!len)
+ return -EINVAL;
+
+ if (len > NVMET_MN_MAX_SIZE) {
+ pr_err("Model number size can not exceed %d Bytes\n",
+ NVMET_MN_MAX_SIZE);
+ return -EINVAL;
+ }
+
+ for (pos = 0; pos < len; pos++) {
+ if (!nvmet_is_ascii(page[pos]))
+ return -EINVAL;
+ }
+
+ val = kmemdup_nul(page, len, GFP_KERNEL);
+ if (!val)
+ return -ENOMEM;
+ kfree(subsys->model_number);
+ subsys->model_number = val;
+ return count;
+}
+
+static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ ssize_t ret;
+
+ down_write(&nvmet_config_sem);
+ mutex_lock(&subsys->lock);
+ ret = nvmet_subsys_attr_model_store_locked(subsys, page, count);
+ mutex_unlock(&subsys->lock);
+ up_write(&nvmet_config_sem);
+
+ return ret;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_model);
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support);
+}
+
+static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ bool pi_enable;
+
+ if (strtobool(page, &pi_enable))
+ return -EINVAL;
+
+ subsys->pi_support = pi_enable;
+ return count;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
+#endif
+
+static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->max_qid);
+}
+
+static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item,
+ const char *page, size_t cnt)
+{
+ u16 qid_max;
+
+ if (sscanf(page, "%hu\n", &qid_max) != 1)
+ return -EINVAL;
+
+ if (qid_max < 1 || qid_max > NVMET_NR_QUEUES)
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ to_subsys(item)->max_qid = qid_max;
+ up_write(&nvmet_config_sem);
+ return cnt;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max);
+
static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_allow_any_host,
&nvmet_subsys_attr_attr_version,
&nvmet_subsys_attr_attr_serial,
+ &nvmet_subsys_attr_attr_cntlid_min,
+ &nvmet_subsys_attr_attr_cntlid_max,
+ &nvmet_subsys_attr_attr_model,
+ &nvmet_subsys_attr_attr_qid_max,
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ &nvmet_subsys_attr_attr_pi_enable,
+#endif
NULL,
};
@@ -915,6 +1368,8 @@ static struct config_group *nvmet_subsys_make(struct config_group *group,
configfs_add_default_group(&subsys->allowed_hosts_group,
&subsys->group);
+ nvmet_add_passthru_group(subsys);
+
return &subsys->group;
}
@@ -970,12 +1425,19 @@ static struct configfs_attribute *nvmet_referral_attrs[] = {
NULL,
};
-static void nvmet_referral_release(struct config_item *item)
+static void nvmet_referral_notify(struct config_group *group,
+ struct config_item *item)
{
struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
struct nvmet_port *port = to_nvmet_port(item);
nvmet_referral_disable(parent, port);
+}
+
+static void nvmet_referral_release(struct config_item *item)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
kfree(port);
}
@@ -1006,6 +1468,7 @@ static struct config_group *nvmet_referral_make(
static struct configfs_group_operations nvmet_referral_group_ops = {
.make_group = nvmet_referral_make,
+ .disconnect_notify = nvmet_referral_notify,
};
static const struct config_item_type nvmet_referrals_type = {
@@ -1013,10 +1476,7 @@ static const struct config_item_type nvmet_referrals_type = {
.ct_group_ops = &nvmet_referral_group_ops,
};
-static struct {
- enum nvme_ana_state state;
- const char *name;
-} nvmet_ana_state_names[] = {
+static struct nvmet_type_name_map nvmet_ana_state[] = {
{ NVME_ANA_OPTIMIZED, "optimized" },
{ NVME_ANA_NONOPTIMIZED, "non-optimized" },
{ NVME_ANA_INACCESSIBLE, "inaccessible" },
@@ -1031,10 +1491,9 @@ static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item,
enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
int i;
- for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) {
- if (state != nvmet_ana_state_names[i].state)
- continue;
- return sprintf(page, "%s\n", nvmet_ana_state_names[i].name);
+ for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
+ if (state == nvmet_ana_state[i].type)
+ return sprintf(page, "%s\n", nvmet_ana_state[i].name);
}
return sprintf(page, "\n");
@@ -1044,10 +1503,11 @@ static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_ana_group *grp = to_ana_group(item);
+ enum nvme_ana_state *ana_state = grp->port->ana_state;
int i;
- for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) {
- if (sysfs_streq(page, nvmet_ana_state_names[i].name))
+ for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
+ if (sysfs_streq(page, nvmet_ana_state[i].name))
goto found;
}
@@ -1056,10 +1516,9 @@ static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
found:
down_write(&nvmet_ana_sem);
- grp->port->ana_state[grp->grpid] = nvmet_ana_state_names[i].state;
+ ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type;
nvmet_ana_chgcnt++;
up_write(&nvmet_ana_sem);
-
nvmet_port_send_ana_event(grp->port);
return count;
}
@@ -1148,6 +1607,8 @@ static void nvmet_port_release(struct config_item *item)
{
struct nvmet_port *port = to_nvmet_port(item);
+ /* Let inflight controllers teardown complete */
+ flush_workqueue(nvmet_wq);
list_del(&port->global_entry);
kfree(port->ana_state);
@@ -1161,6 +1622,9 @@ static struct configfs_attribute *nvmet_port_attrs[] = {
&nvmet_attr_addr_trsvcid,
&nvmet_attr_addr_trtype,
&nvmet_attr_param_inline_data_size,
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ &nvmet_attr_param_pi_enable,
+#endif
NULL,
};
@@ -1210,6 +1674,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
port->inline_data_size = -1; /* < 0 == let the transport choose */
port->disc_addr.portid = cpu_to_le16(portid);
+ port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
config_group_init_type_name(&port->group, name, &nvmet_port_type);
@@ -1248,10 +1713,134 @@ static const struct config_item_type nvmet_ports_type = {
static struct config_group nvmet_subsystems_group;
static struct config_group nvmet_ports_group;
+#ifdef CONFIG_NVME_TARGET_AUTH
+static ssize_t nvmet_host_dhchap_key_show(struct config_item *item,
+ char *page)
+{
+ u8 *dhchap_secret = to_host(item)->dhchap_secret;
+
+ if (!dhchap_secret)
+ return sprintf(page, "\n");
+ return sprintf(page, "%s\n", dhchap_secret);
+}
+
+static ssize_t nvmet_host_dhchap_key_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ int ret;
+
+ ret = nvmet_auth_set_key(host, page, false);
+ /*
+ * Re-authentication is a soft state, so keep the
+ * current authentication valid until the host
+ * requests re-authentication.
+ */
+ return ret < 0 ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_key);
+
+static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item,
+ char *page)
+{
+ u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret;
+
+ if (!dhchap_secret)
+ return sprintf(page, "\n");
+ return sprintf(page, "%s\n", dhchap_secret);
+}
+
+static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ int ret;
+
+ ret = nvmet_auth_set_key(host, page, true);
+ /*
+ * Re-authentication is a soft state, so keep the
+ * current authentication valid until the host
+ * requests re-authentication.
+ */
+ return ret < 0 ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key);
+
+static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_host *host = to_host(item);
+ const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
+
+ return sprintf(page, "%s\n", hash_name ? hash_name : "none");
+}
+
+static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ u8 hmac_id;
+
+ hmac_id = nvme_auth_hmac_id(page);
+ if (hmac_id == NVME_AUTH_HASH_INVALID)
+ return -EINVAL;
+ if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0))
+ return -ENOTSUPP;
+ host->dhchap_hash_id = hmac_id;
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_hash);
+
+static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_host *host = to_host(item);
+ const char *dhgroup = nvme_auth_dhgroup_name(host->dhchap_dhgroup_id);
+
+ return sprintf(page, "%s\n", dhgroup ? dhgroup : "none");
+}
+
+static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ int dhgroup_id;
+
+ dhgroup_id = nvme_auth_dhgroup_id(page);
+ if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID)
+ return -EINVAL;
+ if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) {
+ const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
+
+ if (!crypto_has_kpp(kpp, 0, 0))
+ return -EINVAL;
+ }
+ host->dhchap_dhgroup_id = dhgroup_id;
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup);
+
+static struct configfs_attribute *nvmet_host_attrs[] = {
+ &nvmet_host_attr_dhchap_key,
+ &nvmet_host_attr_dhchap_ctrl_key,
+ &nvmet_host_attr_dhchap_hash,
+ &nvmet_host_attr_dhchap_dhgroup,
+ NULL,
+};
+#endif /* CONFIG_NVME_TARGET_AUTH */
+
static void nvmet_host_release(struct config_item *item)
{
struct nvmet_host *host = to_host(item);
+#ifdef CONFIG_NVME_TARGET_AUTH
+ kfree(host->dhchap_secret);
+ kfree(host->dhchap_ctrl_secret);
+#endif
kfree(host);
}
@@ -1261,6 +1850,9 @@ static struct configfs_item_operations nvmet_host_item_ops = {
static const struct config_item_type nvmet_host_type = {
.ct_item_ops = &nvmet_host_item_ops,
+#ifdef CONFIG_NVME_TARGET_AUTH
+ .ct_attrs = nvmet_host_attrs,
+#endif
.ct_owner = THIS_MODULE,
};
@@ -1273,6 +1865,11 @@ static struct config_group *nvmet_hosts_make_group(struct config_group *group,
if (!host)
return ERR_PTR(-ENOMEM);
+#ifdef CONFIG_NVME_TARGET_AUTH
+ /* Default to SHA256 */
+ host->dhchap_hash_id = NVME_AUTH_HASH_SHA256;
+#endif
+
config_group_init_type_name(&host->group, name, &nvmet_host_type);
return &host->group;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 576de773b4db..aecb5853f8da 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -16,9 +16,13 @@
#include "nvmet.h"
struct workqueue_struct *buffered_io_wq;
+struct workqueue_struct *zbd_wq;
static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
static DEFINE_IDA(cntlid_ida);
+struct workqueue_struct *nvmet_wq;
+EXPORT_SYMBOL_GPL(nvmet_wq);
+
/*
* This read/write semaphore is used to synchronize access to configuration
* information on a target system that will result in discovery log page
@@ -43,43 +47,43 @@ DECLARE_RWSEM(nvmet_ana_sem);
inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
{
- u16 status;
-
switch (errno) {
case 0:
- status = NVME_SC_SUCCESS;
- break;
+ return NVME_SC_SUCCESS;
case -ENOSPC:
req->error_loc = offsetof(struct nvme_rw_command, length);
- status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
- break;
+ return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
case -EREMOTEIO:
req->error_loc = offsetof(struct nvme_rw_command, slba);
- status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
- break;
+ return NVME_SC_LBA_RANGE | NVME_SC_DNR;
case -EOPNOTSUPP:
req->error_loc = offsetof(struct nvme_common_command, opcode);
switch (req->cmd->common.opcode) {
case nvme_cmd_dsm:
case nvme_cmd_write_zeroes:
- status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
- break;
+ return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
default:
- status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
break;
case -ENODATA:
req->error_loc = offsetof(struct nvme_rw_command, nsid);
- status = NVME_SC_ACCESS_DENIED;
- break;
+ return NVME_SC_ACCESS_DENIED;
case -EIO:
- /* FALLTHRU */
+ fallthrough;
default:
req->error_loc = offsetof(struct nvme_common_command, opcode);
- status = NVME_SC_INTERNAL | NVME_SC_DNR;
+ return NVME_SC_INTERNAL | NVME_SC_DNR;
}
+}
- return status;
+u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
+{
+ pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode,
+ req->sq->qid);
+
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
@@ -113,15 +117,16 @@ u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
return 0;
}
-static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
+static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
{
- struct nvmet_ns *ns;
+ struct nvmet_ns *cur;
+ unsigned long idx;
+ u32 nsid = 0;
- if (list_empty(&subsys->namespaces))
- return 0;
+ xa_for_each(&subsys->namespaces, idx, cur)
+ nsid = cur->nsid;
- ns = list_last_entry(&subsys->namespaces, struct nvmet_ns, dev_link);
- return ns->nsid;
+ return nsid;
}
static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
@@ -129,42 +134,51 @@ static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
}
-static void nvmet_async_events_process(struct nvmet_ctrl *ctrl, u16 status)
+static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
{
- struct nvmet_async_event *aen;
struct nvmet_req *req;
- while (1) {
+ mutex_lock(&ctrl->lock);
+ while (ctrl->nr_async_event_cmds) {
+ req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
+ mutex_unlock(&ctrl->lock);
+ nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
mutex_lock(&ctrl->lock);
- aen = list_first_entry_or_null(&ctrl->async_events,
- struct nvmet_async_event, entry);
- if (!aen || !ctrl->nr_async_event_cmds) {
- mutex_unlock(&ctrl->lock);
- break;
- }
+ }
+ mutex_unlock(&ctrl->lock);
+}
+static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
+{
+ struct nvmet_async_event *aen;
+ struct nvmet_req *req;
+
+ mutex_lock(&ctrl->lock);
+ while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
+ aen = list_first_entry(&ctrl->async_events,
+ struct nvmet_async_event, entry);
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
- if (status == 0)
- nvmet_set_result(req, nvmet_async_event_result(aen));
+ nvmet_set_result(req, nvmet_async_event_result(aen));
list_del(&aen->entry);
kfree(aen);
mutex_unlock(&ctrl->lock);
- nvmet_req_complete(req, status);
+ trace_nvmet_async_event(ctrl, req->cqe->result.u32);
+ nvmet_req_complete(req, 0);
+ mutex_lock(&ctrl->lock);
}
+ mutex_unlock(&ctrl->lock);
}
static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
{
- struct nvmet_req *req;
+ struct nvmet_async_event *aen, *tmp;
mutex_lock(&ctrl->lock);
- while (ctrl->nr_async_event_cmds) {
- req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
- mutex_unlock(&ctrl->lock);
- nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
- mutex_lock(&ctrl->lock);
+ list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) {
+ list_del(&aen->entry);
+ kfree(aen);
}
mutex_unlock(&ctrl->lock);
}
@@ -174,7 +188,7 @@ static void nvmet_async_event_work(struct work_struct *work)
struct nvmet_ctrl *ctrl =
container_of(work, struct nvmet_ctrl, async_event_work);
- nvmet_async_events_process(ctrl, 0);
+ nvmet_async_events_process(ctrl);
}
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
@@ -194,7 +208,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
list_add_tail(&aen->entry, &ctrl->async_events);
mutex_unlock(&ctrl->lock);
- schedule_work(&ctrl->async_event_work);
+ queue_work(nvmet_wq, &ctrl->async_event_work);
}
static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
@@ -322,12 +336,21 @@ int nvmet_enable_port(struct nvmet_port *port)
if (!try_module_get(ops->owner))
return -EINVAL;
- ret = ops->add_port(port);
- if (ret) {
- module_put(ops->owner);
- return ret;
+ /*
+ * If the user requested PI support and the transport isn't pi capable,
+ * don't enable the port.
+ */
+ if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) {
+ pr_err("T10-PI is not supported by transport type %d\n",
+ port->disc_addr.trtype);
+ ret = -EINVAL;
+ goto out_put;
}
+ ret = ops->add_port(port);
+ if (ret)
+ goto out_put;
+
/* If the transport didn't set inline_data_size, then disable it. */
if (port->inline_data_size < 0)
port->inline_data_size = 0;
@@ -335,6 +358,10 @@ int nvmet_enable_port(struct nvmet_port *port)
port->enabled = true;
port->tr_ops = ops;
return 0;
+
+out_put:
+ module_put(ops->owner);
+ return ret;
}
void nvmet_disable_port(struct nvmet_port *port)
@@ -355,13 +382,13 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
{
struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
struct nvmet_ctrl, ka_work);
- bool cmd_seen = ctrl->cmd_seen;
+ bool reset_tbkas = ctrl->reset_tbkas;
- ctrl->cmd_seen = false;
- if (cmd_seen) {
+ ctrl->reset_tbkas = false;
+ if (reset_tbkas) {
pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
ctrl->cntlid);
- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+ queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
return;
}
@@ -371,46 +398,39 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
nvmet_ctrl_fatal_error(ctrl);
}
-static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
+void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
{
+ if (unlikely(ctrl->kato == 0))
+ return;
+
pr_debug("ctrl %d start keep-alive timer for %d secs\n",
ctrl->cntlid, ctrl->kato);
- INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+ queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
}
-static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
+void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
{
+ if (unlikely(ctrl->kato == 0))
+ return;
+
pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
cancel_delayed_work_sync(&ctrl->ka_work);
}
-static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
- __le32 nsid)
+u16 nvmet_req_find_ns(struct nvmet_req *req)
{
- struct nvmet_ns *ns;
+ u32 nsid = le32_to_cpu(req->cmd->common.nsid);
- list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
- if (ns->nsid == le32_to_cpu(nsid))
- return ns;
+ req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
+ if (unlikely(!req->ns)) {
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
+ return NVME_SC_INVALID_NS | NVME_SC_DNR;
}
- return NULL;
-}
-
-struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
-{
- struct nvmet_ns *ns;
-
- rcu_read_lock();
- ns = __nvmet_find_namespace(ctrl, nsid);
- if (ns)
- percpu_ref_get(&ns->ref);
- rcu_read_unlock();
-
- return ns;
+ percpu_ref_get(&req->ns->ref);
+ return NVME_SC_SUCCESS;
}
static void nvmet_destroy_namespace(struct percpu_ref *ref)
@@ -444,7 +464,7 @@ static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
return -EINVAL;
}
- if (!blk_queue_pci_p2pdma(ns->bdev->bd_queue)) {
+ if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
ns->device_path);
return -EINVAL;
@@ -514,6 +534,18 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
ns->nsid);
}
+bool nvmet_ns_revalidate(struct nvmet_ns *ns)
+{
+ loff_t oldsize = ns->size;
+
+ if (ns->bdev)
+ nvmet_bdev_ns_revalidate(ns);
+ else
+ nvmet_file_ns_revalidate(ns);
+
+ return oldsize != ns->size;
+}
+
int nvmet_ns_enable(struct nvmet_ns *ns)
{
struct nvmet_subsys *subsys = ns->subsys;
@@ -522,6 +554,12 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
mutex_lock(&subsys->lock);
ret = 0;
+
+ if (nvmet_is_passthru_subsys(subsys)) {
+ pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
+ goto out_unlock;
+ }
+
if (ns->enabled)
goto out_unlock;
@@ -550,24 +588,10 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
if (ns->nsid > subsys->max_nsid)
subsys->max_nsid = ns->nsid;
- /*
- * The namespaces list needs to be sorted to simplify the implementation
- * of the Identify Namepace List subcommand.
- */
- if (list_empty(&subsys->namespaces)) {
- list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
- } else {
- struct nvmet_ns *old;
-
- list_for_each_entry_rcu(old, &subsys->namespaces, dev_link,
- lockdep_is_held(&subsys->lock)) {
- BUG_ON(ns->nsid == old->nsid);
- if (ns->nsid < old->nsid)
- break;
- }
+ ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
+ if (ret)
+ goto out_restore_subsys_maxnsid;
- list_add_tail_rcu(&ns->dev_link, &old->dev_link);
- }
subsys->nr_namespaces++;
nvmet_ns_changed(subsys, ns->nsid);
@@ -576,6 +600,10 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
out_unlock:
mutex_unlock(&subsys->lock);
return ret;
+
+out_restore_subsys_maxnsid:
+ subsys->max_nsid = nvmet_max_nsid(subsys);
+ percpu_ref_exit(&ns->ref);
out_dev_put:
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
@@ -594,7 +622,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
goto out_unlock;
ns->enabled = false;
- list_del_rcu(&ns->dev_link);
+ xa_erase(&ns->subsys->namespaces, ns->nsid);
if (ns->nsid == subsys->max_nsid)
subsys->max_nsid = nvmet_max_nsid(subsys);
@@ -645,7 +673,6 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
if (!ns)
return NULL;
- INIT_LIST_HEAD(&ns->dev_link);
init_completion(&ns->disable_done);
ns->nsid = nsid;
@@ -658,6 +685,7 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
uuid_gen(&ns->uuid);
ns->buffered_io = false;
+ ns->csi = NVME_CSI_NVM;
return ns;
}
@@ -707,6 +735,8 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status)
static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
{
+ struct nvmet_ns *ns = req->ns;
+
if (!req->sq->sqhd_disabled)
nvmet_update_sq_head(req);
req->cqe->sq_id = cpu_to_le16(req->sq->qid);
@@ -717,9 +747,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
trace_nvmet_req_complete(req);
- if (req->ns)
- nvmet_put_namespace(req->ns);
req->ops->queue_response(req);
+ if (ns)
+ nvmet_put_namespace(ns);
}
void nvmet_req_complete(struct nvmet_req *req, u16 status)
@@ -734,8 +764,6 @@ void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
{
cq->qid = qid;
cq->size = size;
-
- ctrl->cqs[qid] = cq;
}
void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
@@ -757,23 +785,29 @@ static void nvmet_confirm_sq(struct percpu_ref *ref)
void nvmet_sq_destroy(struct nvmet_sq *sq)
{
- u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
struct nvmet_ctrl *ctrl = sq->ctrl;
/*
* If this is the admin queue, complete all AERs so that our
* queue doesn't have outstanding requests on it.
*/
- if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq) {
- nvmet_async_events_process(ctrl, status);
- nvmet_async_events_free(ctrl);
- }
+ if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
+ nvmet_async_events_failall(ctrl);
percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
wait_for_completion(&sq->confirm_done);
wait_for_completion(&sq->free_done);
percpu_ref_exit(&sq->ref);
+ nvmet_auth_sq_free(sq);
if (ctrl) {
+ /*
+ * The teardown flow may take some time, and the host may not
+ * send us keep-alive during this period, hence reset the
+ * traffic based keep-alive timer so we don't trigger a
+ * controller teardown as a result of a keep-alive expiration.
+ */
+ ctrl->reset_tbkas = true;
+ sq->ctrl->sqs[sq->qid] = NULL;
nvmet_ctrl_put(ctrl);
sq->ctrl = NULL; /* allows reusing the queue later */
}
@@ -798,6 +832,7 @@ int nvmet_sq_init(struct nvmet_sq *sq)
}
init_completion(&sq->free_done);
init_completion(&sq->confirm_done);
+ nvmet_auth_sq_init(sq);
return 0;
}
@@ -837,15 +872,23 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
struct nvme_command *cmd = req->cmd;
u16 ret;
- ret = nvmet_check_ctrl_status(req, cmd);
+ if (nvme_is_fabrics(cmd))
+ return nvmet_parse_fabrics_io_cmd(req);
+
+ if (unlikely(!nvmet_check_auth_status(req)))
+ return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
+
+ ret = nvmet_check_ctrl_status(req);
+ if (unlikely(ret))
+ return ret;
+
+ if (nvmet_is_passthru_req(req))
+ return nvmet_parse_passthru_io_cmd(req);
+
+ ret = nvmet_req_find_ns(req);
if (unlikely(ret))
return ret;
- req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
- if (unlikely(!req->ns)) {
- req->error_loc = offsetof(struct nvme_common_command, nsid);
- return NVME_SC_INVALID_NS | NVME_SC_DNR;
- }
ret = nvmet_check_ana_state(req->port, req->ns);
if (unlikely(ret)) {
req->error_loc = offsetof(struct nvme_common_command, nsid);
@@ -857,10 +900,18 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
return ret;
}
- if (req->ns->file)
- return nvmet_file_parse_io_cmd(req);
- else
+ switch (req->ns->csi) {
+ case NVME_CSI_NVM:
+ if (req->ns->file)
+ return nvmet_file_parse_io_cmd(req);
return nvmet_bdev_parse_io_cmd(req);
+ case NVME_CSI_ZNS:
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
+ return nvmet_bdev_zns_parse_io_cmd(req);
+ return NVME_SC_INVALID_IO_CMD_SET;
+ default:
+ return NVME_SC_INVALID_IO_CMD_SET;
+ }
}
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
@@ -873,16 +924,17 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->sq = sq;
req->ops = ops;
req->sg = NULL;
+ req->metadata_sg = NULL;
req->sg_cnt = 0;
+ req->metadata_sg_cnt = 0;
req->transfer_len = 0;
+ req->metadata_len = 0;
req->cqe->status = 0;
req->cqe->sq_head = 0;
req->ns = NULL;
req->error_loc = NVMET_NO_ERROR_LOC;
req->error_slba = 0;
- trace_nvmet_req_init(req, req->cmd);
-
/* no support for fused commands yet */
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
req->error_loc = offsetof(struct nvme_common_command, flags);
@@ -912,13 +964,15 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
if (status)
goto fail;
+ trace_nvmet_req_init(req, req->cmd);
+
if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto fail;
}
if (sq->ctrl)
- sq->ctrl->cmd_seen = true;
+ sq->ctrl->reset_tbkas = true;
return true;
@@ -936,9 +990,9 @@ void nvmet_req_uninit(struct nvmet_req *req)
}
EXPORT_SYMBOL_GPL(nvmet_req_uninit);
-bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len)
+bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
{
- if (unlikely(data_len != req->transfer_len)) {
+ if (unlikely(len != req->transfer_len)) {
req->error_loc = offsetof(struct nvme_common_command, dptr);
nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
return false;
@@ -946,7 +1000,7 @@ bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len)
return true;
}
-EXPORT_SYMBOL_GPL(nvmet_check_data_len);
+EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
{
@@ -959,50 +1013,89 @@ bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
return true;
}
-int nvmet_req_alloc_sgl(struct nvmet_req *req)
+static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
{
- struct pci_dev *p2p_dev = NULL;
-
- if (IS_ENABLED(CONFIG_PCI_P2PDMA)) {
- if (req->sq->ctrl && req->ns)
- p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
- req->ns->nsid);
+ return req->transfer_len - req->metadata_len;
+}
- req->p2p_dev = NULL;
- if (req->sq->qid && p2p_dev) {
- req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
- req->transfer_len);
- if (req->sg) {
- req->p2p_dev = p2p_dev;
- return 0;
- }
- }
+static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
+ struct nvmet_req *req)
+{
+ req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
+ nvmet_data_transfer_len(req));
+ if (!req->sg)
+ goto out_err;
- /*
- * If no P2P memory was available we fallback to using
- * regular memory
- */
+ if (req->metadata_len) {
+ req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
+ &req->metadata_sg_cnt, req->metadata_len);
+ if (!req->metadata_sg)
+ goto out_free_sg;
}
- req->sg = sgl_alloc(req->transfer_len, GFP_KERNEL, &req->sg_cnt);
+ req->p2p_dev = p2p_dev;
+
+ return 0;
+out_free_sg:
+ pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
+out_err:
+ return -ENOMEM;
+}
+
+static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
+{
+ if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
+ !req->sq->ctrl || !req->sq->qid || !req->ns)
+ return NULL;
+ return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
+}
+
+int nvmet_req_alloc_sgls(struct nvmet_req *req)
+{
+ struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
+
+ if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
+ return 0;
+
+ req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
+ &req->sg_cnt);
if (unlikely(!req->sg))
- return -ENOMEM;
+ goto out;
+
+ if (req->metadata_len) {
+ req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
+ &req->metadata_sg_cnt);
+ if (unlikely(!req->metadata_sg))
+ goto out_free;
+ }
return 0;
+out_free:
+ sgl_free(req->sg);
+out:
+ return -ENOMEM;
}
-EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgl);
+EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
-void nvmet_req_free_sgl(struct nvmet_req *req)
+void nvmet_req_free_sgls(struct nvmet_req *req)
{
- if (req->p2p_dev)
+ if (req->p2p_dev) {
pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
- else
+ if (req->metadata_sg)
+ pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
+ req->p2p_dev = NULL;
+ } else {
sgl_free(req->sg);
+ if (req->metadata_sg)
+ sgl_free(req->metadata_sg);
+ }
req->sg = NULL;
+ req->metadata_sg = NULL;
req->sg_cnt = 0;
+ req->metadata_sg_cnt = 0;
}
-EXPORT_SYMBOL_GPL(nvmet_req_free_sgl);
+EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
static inline bool nvmet_cc_en(u32 cc)
{
@@ -1039,15 +1132,37 @@ static inline u8 nvmet_cc_iocqes(u32 cc)
return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
}
+static inline bool nvmet_css_supported(u8 cc_css)
+{
+ switch (cc_css << NVME_CC_CSS_SHIFT) {
+ case NVME_CC_CSS_NVM:
+ case NVME_CC_CSS_CSI:
+ return true;
+ default:
+ return false;
+ }
+}
+
static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
{
lockdep_assert_held(&ctrl->lock);
- if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
- nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
- nvmet_cc_mps(ctrl->cc) != 0 ||
+ /*
+ * Only I/O controllers should verify iosqes,iocqes.
+ * Strictly speaking, the spec says a discovery controller
+ * should verify iosqes,iocqes are zeroed, however that
+ * would break backwards compatibility, so don't enforce it.
+ */
+ if (!nvmet_is_disc_subsys(ctrl->subsys) &&
+ (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
+ nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
+ ctrl->csts = NVME_CSTS_CFS;
+ return;
+ }
+
+ if (nvmet_cc_mps(ctrl->cc) != 0 ||
nvmet_cc_ams(ctrl->cc) != 0 ||
- nvmet_cc_css(ctrl->cc) != 0) {
+ !nvmet_css_supported(nvmet_cc_css(ctrl->cc))) {
ctrl->csts = NVME_CSTS_CFS;
return;
}
@@ -1060,7 +1175,8 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
* in case a host died before it enabled the controller. Hence, simply
* reset the keep alive timer when the controller is enabled.
*/
- mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ if (ctrl->kato)
+ mod_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
}
static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
@@ -1097,25 +1213,33 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
{
/* command sets supported: NVMe command set: */
ctrl->cap = (1ULL << 37);
+ /* Controller supports one or more I/O Command Sets */
+ ctrl->cap |= (1ULL << 43);
/* CC.EN timeout in 500msec units: */
ctrl->cap |= (15ULL << 24);
/* maximum queue entries supported: */
- ctrl->cap |= NVMET_QUEUE_SIZE - 1;
+ if (ctrl->ops->get_max_queue_size)
+ ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1;
+ else
+ ctrl->cap |= NVMET_QUEUE_SIZE - 1;
+
+ if (nvmet_is_passthru_subsys(ctrl->subsys))
+ nvmet_passthrough_override_cap(ctrl);
}
-u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
- struct nvmet_req *req, struct nvmet_ctrl **ret)
+struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
+ const char *hostnqn, u16 cntlid,
+ struct nvmet_req *req)
{
+ struct nvmet_ctrl *ctrl = NULL;
struct nvmet_subsys *subsys;
- struct nvmet_ctrl *ctrl;
- u16 status = 0;
subsys = nvmet_find_get_subsys(req->port, subsysnqn);
if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n",
subsysnqn);
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
- return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ goto out;
}
mutex_lock(&subsys->lock);
@@ -1128,35 +1252,41 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
if (!kref_get_unless_zero(&ctrl->ref))
continue;
- *ret = ctrl;
- goto out;
+ /* ctrl found */
+ goto found;
}
}
+ ctrl = NULL; /* ctrl not found */
pr_warn("could not find controller %d for subsys %s / host %s\n",
cntlid, subsysnqn, hostnqn);
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
- status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
-out:
+found:
mutex_unlock(&subsys->lock);
nvmet_subsys_put(subsys);
- return status;
+out:
+ return ctrl;
}
-u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
+u16 nvmet_check_ctrl_status(struct nvmet_req *req)
{
if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
- cmd->common.opcode, req->sq->qid);
+ req->cmd->common.opcode, req->sq->qid);
return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
}
if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
- cmd->common.opcode, req->sq->qid);
+ req->cmd->common.opcode, req->sq->qid);
return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
}
+
+ if (unlikely(!nvmet_check_auth_status(req))) {
+ pr_warn("qid %d not authenticated\n", req->sq->qid);
+ return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
+ }
return 0;
}
@@ -1169,7 +1299,7 @@ bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
if (subsys->allow_any_host)
return true;
- if (subsys->type == NVME_NQN_DISC) /* allow all access to disc subsys */
+ if (nvmet_is_disc_subsys(subsys)) /* allow all access to disc subsys */
return true;
list_for_each_entry(p, &subsys->hosts, entry) {
@@ -1187,14 +1317,14 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
struct nvmet_req *req)
{
struct nvmet_ns *ns;
+ unsigned long idx;
if (!req->p2p_client)
return;
ctrl->p2p_client = get_device(req->p2p_client);
- list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link,
- lockdep_is_held(&ctrl->subsys->lock))
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns)
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
}
@@ -1235,10 +1365,10 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
pr_warn("connect request for invalid subsystem %s!\n",
subsysnqn);
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
goto out;
}
- status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
down_read(&nvmet_config_sem);
if (!nvmet_host_allowed(subsys, hostnqn)) {
pr_info("connect by host %s for subsystem %s not allowed\n",
@@ -1246,6 +1376,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
up_read(&nvmet_config_sem);
status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
goto out_put_subsystem;
}
up_read(&nvmet_config_sem);
@@ -1256,20 +1387,27 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
goto out_put_subsystem;
mutex_init(&ctrl->lock);
- nvmet_init_cap(ctrl);
-
ctrl->port = req->port;
+ ctrl->ops = req->ops;
+
+#ifdef CONFIG_NVME_TARGET_PASSTHRU
+ /* By default, set loop targets to clear IDS by default */
+ if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP)
+ subsys->clear_ids = 1;
+#endif
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
INIT_LIST_HEAD(&ctrl->async_events);
INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
+ INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
kref_init(&ctrl->ref);
ctrl->subsys = subsys;
+ nvmet_init_cap(ctrl);
WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
@@ -1277,20 +1415,17 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (!ctrl->changed_ns_list)
goto out_free_ctrl;
- ctrl->cqs = kcalloc(subsys->max_qid + 1,
- sizeof(struct nvmet_cq *),
- GFP_KERNEL);
- if (!ctrl->cqs)
- goto out_free_changed_ns_list;
-
ctrl->sqs = kcalloc(subsys->max_qid + 1,
sizeof(struct nvmet_sq *),
GFP_KERNEL);
if (!ctrl->sqs)
- goto out_free_cqs;
+ goto out_free_changed_ns_list;
- ret = ida_simple_get(&cntlid_ida,
- NVME_CNTLID_MIN, NVME_CNTLID_MAX,
+ if (subsys->cntlid_min > subsys->cntlid_max)
+ goto out_free_sqs;
+
+ ret = ida_alloc_range(&cntlid_ida,
+ subsys->cntlid_min, subsys->cntlid_max,
GFP_KERNEL);
if (ret < 0) {
status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
@@ -1298,13 +1433,11 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
}
ctrl->cntlid = ret;
- ctrl->ops = req->ops;
-
/*
* Discovery controllers may use some arbitrary high value
* in order to cleanup stale discovery sessions
*/
- if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato)
+ if (nvmet_is_disc_subsys(ctrl->subsys) && !kato)
kato = NVMET_DISC_KATO_MS;
/* keep-alive timeout in seconds */
@@ -1325,8 +1458,6 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
out_free_sqs:
kfree(ctrl->sqs);
-out_free_cqs:
- kfree(ctrl->cqs);
out_free_changed_ns_list:
kfree(ctrl->changed_ns_list);
out_free_ctrl:
@@ -1352,10 +1483,12 @@ static void nvmet_ctrl_free(struct kref *ref)
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fatal_err_work);
- ida_simple_remove(&cntlid_ida, ctrl->cntlid);
+ nvmet_destroy_auth(ctrl);
+
+ ida_free(&cntlid_ida, ctrl->cntlid);
+ nvmet_async_events_free(ctrl);
kfree(ctrl->sqs);
- kfree(ctrl->cqs);
kfree(ctrl->changed_ns_list);
kfree(ctrl);
@@ -1372,7 +1505,7 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
mutex_lock(&ctrl->lock);
if (!(ctrl->csts & NVME_CSTS_CFS)) {
ctrl->csts |= NVME_CSTS_CFS;
- schedule_work(&ctrl->fatal_err_work);
+ queue_work(nvmet_wq, &ctrl->fatal_err_work);
}
mutex_unlock(&ctrl->lock);
}
@@ -1410,43 +1543,60 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
enum nvme_subsys_type type)
{
struct nvmet_subsys *subsys;
+ char serial[NVMET_SN_MAX_SIZE / 2];
+ int ret;
subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
if (!subsys)
return ERR_PTR(-ENOMEM);
- subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
+ subsys->ver = NVMET_DEFAULT_VS;
/* generate a random serial number as our controllers are ephemeral: */
- get_random_bytes(&subsys->serial, sizeof(subsys->serial));
+ get_random_bytes(&serial, sizeof(serial));
+ bin2hex(subsys->serial, &serial, sizeof(serial));
+
+ subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
+ if (!subsys->model_number) {
+ ret = -ENOMEM;
+ goto free_subsys;
+ }
switch (type) {
case NVME_NQN_NVME:
subsys->max_qid = NVMET_NR_QUEUES;
break;
case NVME_NQN_DISC:
+ case NVME_NQN_CURR:
subsys->max_qid = 0;
break;
default:
pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
- kfree(subsys);
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto free_mn;
}
subsys->type = type;
subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
GFP_KERNEL);
if (!subsys->subsysnqn) {
- kfree(subsys);
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto free_mn;
}
-
+ subsys->cntlid_min = NVME_CNTLID_MIN;
+ subsys->cntlid_max = NVME_CNTLID_MAX;
kref_init(&subsys->ref);
mutex_init(&subsys->lock);
- INIT_LIST_HEAD(&subsys->namespaces);
+ xa_init(&subsys->namespaces);
INIT_LIST_HEAD(&subsys->ctrls);
INIT_LIST_HEAD(&subsys->hosts);
return subsys;
+
+free_mn:
+ kfree(subsys->model_number);
+free_subsys:
+ kfree(subsys);
+ return ERR_PTR(ret);
}
static void nvmet_subsys_free(struct kref *ref)
@@ -1454,9 +1604,13 @@ static void nvmet_subsys_free(struct kref *ref)
struct nvmet_subsys *subsys =
container_of(ref, struct nvmet_subsys, ref);
- WARN_ON_ONCE(!list_empty(&subsys->namespaces));
+ WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
+
+ xa_destroy(&subsys->namespaces);
+ nvmet_passthru_subsys_free(subsys);
kfree(subsys->subsysnqn);
+ kfree(subsys->model_number);
kfree(subsys);
}
@@ -1481,16 +1635,26 @@ static int __init nvmet_init(void)
nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
+ zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0);
+ if (!zbd_wq)
+ return -ENOMEM;
+
buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
WQ_MEM_RECLAIM, 0);
if (!buffered_io_wq) {
error = -ENOMEM;
- goto out;
+ goto out_free_zbd_work_queue;
+ }
+
+ nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
+ if (!nvmet_wq) {
+ error = -ENOMEM;
+ goto out_free_buffered_work_queue;
}
error = nvmet_init_discovery();
if (error)
- goto out_free_work_queue;
+ goto out_free_nvmet_work_queue;
error = nvmet_init_configfs();
if (error)
@@ -1499,9 +1663,12 @@ static int __init nvmet_init(void)
out_exit_discovery:
nvmet_exit_discovery();
-out_free_work_queue:
+out_free_nvmet_work_queue:
+ destroy_workqueue(nvmet_wq);
+out_free_buffered_work_queue:
destroy_workqueue(buffered_io_wq);
-out:
+out_free_zbd_work_queue:
+ destroy_workqueue(zbd_wq);
return error;
}
@@ -1510,7 +1677,9 @@ static void __exit nvmet_exit(void)
nvmet_exit_configfs();
nvmet_exit_discovery();
ida_destroy(&cntlid_ida);
+ destroy_workqueue(nvmet_wq);
destroy_workqueue(buffered_io_wq);
+ destroy_workqueue(zbd_wq);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 0c2274b21e15..668d257fa986 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -69,6 +69,7 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
struct nvmet_port *port;
struct nvmet_subsys_link *s;
+ lockdep_assert_held(&nvmet_config_sem);
nvmet_genctr++;
list_for_each_entry(port, nvmet_ports, global_entry)
@@ -145,7 +146,7 @@ static size_t discovery_log_entries(struct nvmet_req *req)
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmet_subsys_link *p;
struct nvmet_port *r;
- size_t entries = 0;
+ size_t entries = 1;
list_for_each_entry(p, &req->port->subsystems, entry) {
if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
@@ -170,19 +171,22 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
u32 numrec = 0;
u16 status = 0;
void *buffer;
+ char traddr[NVMF_TRADDR_SIZE];
- if (!nvmet_check_data_len(req, data_len))
+ if (!nvmet_check_transfer_len(req, data_len))
return;
if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
req->error_loc =
offsetof(struct nvme_get_log_page_command, lid);
- status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto out;
}
/* Spec requires dword aligned offsets */
if (offset & 0x3) {
+ req->error_loc =
+ offsetof(struct nvme_get_log_page_command, lpo);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto out;
}
@@ -200,15 +204,19 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
status = NVME_SC_INTERNAL;
goto out;
}
-
hdr = buffer;
- list_for_each_entry(p, &req->port->subsystems, entry) {
- char traddr[NVMF_TRADDR_SIZE];
+ nvmet_set_disc_traddr(req, req->port, traddr);
+
+ nvmet_format_discovery_entry(hdr, req->port,
+ nvmet_disc_subsys->subsysnqn,
+ traddr, NVME_NQN_CURR, numrec);
+ numrec++;
+
+ list_for_each_entry(p, &req->port->subsystems, entry) {
if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
continue;
- nvmet_set_disc_traddr(req, req->port, traddr);
nvmet_format_discovery_entry(hdr, req->port,
p->subsys->subsysnqn, traddr,
NVME_NQN_NVME, numrec);
@@ -241,15 +249,14 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_id_ctrl *id;
- const char model[] = "Linux";
u16 status = 0;
- if (!nvmet_check_data_len(req, NVME_IDENTIFY_DATA_SIZE))
+ if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
return;
if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
req->error_loc = offsetof(struct nvme_identify, cns);
- status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto out;
}
@@ -259,14 +266,15 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
goto out;
}
- memset(id->sn, ' ', sizeof(id->sn));
- bin2hex(id->sn, &ctrl->subsys->serial,
- min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
+ memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
memset(id->fr, ' ', sizeof(id->fr));
- memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
+ memcpy_and_pad(id->mn, sizeof(id->mn), ctrl->subsys->model_number,
+ strlen(ctrl->subsys->model_number), ' ');
memcpy_and_pad(id->fr, sizeof(id->fr),
UTS_RELEASE, strlen(UTS_RELEASE), ' ');
+ id->cntrltype = NVME_CTRL_DISC;
+
/* no limit on data transfer sizes for now */
id->mdts = 0;
id->cntlid = cpu_to_le16(ctrl->cntlid);
@@ -277,14 +285,14 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
- if (ctrl->ops->has_keyed_sgls)
+ if (ctrl->ops->flags & NVMF_KEYED_SGLS)
id->sgls |= cpu_to_le32(1 << 2);
if (req->port->inline_data_size)
id->sgls |= cpu_to_le32(1 << 20);
id->oaes = cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL);
- strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
+ strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
@@ -298,7 +306,7 @@ static void nvmet_execute_disc_set_features(struct nvmet_req *req)
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 stat;
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
switch (cdw10 & 0xff) {
@@ -324,7 +332,7 @@ static void nvmet_execute_disc_get_features(struct nvmet_req *req)
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 stat = 0;
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
switch (cdw10 & 0xff) {
@@ -376,7 +384,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
req->execute = nvmet_execute_disc_identify;
return 0;
default:
- pr_err("unhandled cmd %d\n", cmd->common.opcode);
+ pr_debug("unhandled cmd %d\n", cmd->common.opcode);
req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
@@ -386,7 +394,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
int __init nvmet_init_discovery(void)
{
nvmet_disc_subsys =
- nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC);
+ nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_CURR);
return PTR_ERR_OR_ZERO(nvmet_disc_subsys);
}
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
new file mode 100644
index 000000000000..7970a7640e58
--- /dev/null
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -0,0 +1,537 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics DH-HMAC-CHAP authentication command handling.
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions.
+ * All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/blkdev.h>
+#include <linux/random.h>
+#include <linux/nvme-auth.h>
+#include <crypto/hash.h>
+#include <crypto/kpp.h>
+#include "nvmet.h"
+
+static void nvmet_auth_expired_work(struct work_struct *work)
+{
+ struct nvmet_sq *sq = container_of(to_delayed_work(work),
+ struct nvmet_sq, auth_expired_work);
+
+ pr_debug("%s: ctrl %d qid %d transaction %u expired, resetting\n",
+ __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid);
+ sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ sq->dhchap_tid = -1;
+}
+
+void nvmet_auth_sq_init(struct nvmet_sq *sq)
+{
+ /* Initialize in-band authentication */
+ INIT_DELAYED_WORK(&sq->auth_expired_work, nvmet_auth_expired_work);
+ sq->authenticated = false;
+ sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+}
+
+static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmf_auth_dhchap_negotiate_data *data = d;
+ int i, hash_id = 0, fallback_hash_id = 0, dhgid, fallback_dhgid;
+
+ pr_debug("%s: ctrl %d qid %d: data sc_d %d napd %d authid %d halen %d dhlen %d\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ data->sc_c, data->napd, data->auth_protocol[0].dhchap.authid,
+ data->auth_protocol[0].dhchap.halen,
+ data->auth_protocol[0].dhchap.dhlen);
+ req->sq->dhchap_tid = le16_to_cpu(data->t_id);
+ if (data->sc_c)
+ return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+
+ if (data->napd != 1)
+ return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+
+ if (data->auth_protocol[0].dhchap.authid !=
+ NVME_AUTH_DHCHAP_AUTH_ID)
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+
+ for (i = 0; i < data->auth_protocol[0].dhchap.halen; i++) {
+ u8 host_hmac_id = data->auth_protocol[0].dhchap.idlist[i];
+
+ if (!fallback_hash_id &&
+ crypto_has_shash(nvme_auth_hmac_name(host_hmac_id), 0, 0))
+ fallback_hash_id = host_hmac_id;
+ if (ctrl->shash_id != host_hmac_id)
+ continue;
+ hash_id = ctrl->shash_id;
+ break;
+ }
+ if (hash_id == 0) {
+ if (fallback_hash_id == 0) {
+ pr_debug("%s: ctrl %d qid %d: no usable hash found\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ }
+ pr_debug("%s: ctrl %d qid %d: no usable hash found, falling back to %s\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ nvme_auth_hmac_name(fallback_hash_id));
+ ctrl->shash_id = fallback_hash_id;
+ }
+
+ dhgid = -1;
+ fallback_dhgid = -1;
+ for (i = 0; i < data->auth_protocol[0].dhchap.dhlen; i++) {
+ int tmp_dhgid = data->auth_protocol[0].dhchap.idlist[i + 30];
+
+ if (tmp_dhgid != ctrl->dh_gid) {
+ dhgid = tmp_dhgid;
+ break;
+ }
+ if (fallback_dhgid < 0) {
+ const char *kpp = nvme_auth_dhgroup_kpp(tmp_dhgid);
+
+ if (crypto_has_kpp(kpp, 0, 0))
+ fallback_dhgid = tmp_dhgid;
+ }
+ }
+ if (dhgid < 0) {
+ if (fallback_dhgid < 0) {
+ pr_debug("%s: ctrl %d qid %d: no usable DH group found\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ }
+ pr_debug("%s: ctrl %d qid %d: configured DH group %s not found\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ nvme_auth_dhgroup_name(fallback_dhgid));
+ ctrl->dh_gid = fallback_dhgid;
+ }
+ pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid);
+ return 0;
+}
+
+static u16 nvmet_auth_reply(struct nvmet_req *req, void *d)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmf_auth_dhchap_reply_data *data = d;
+ u16 dhvlen = le16_to_cpu(data->dhvlen);
+ u8 *response;
+
+ pr_debug("%s: ctrl %d qid %d: data hl %d cvalid %d dhvlen %u\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ data->hl, data->cvalid, dhvlen);
+
+ if (dhvlen) {
+ if (!ctrl->dh_tfm)
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ if (nvmet_auth_ctrl_sesskey(req, data->rval + 2 * data->hl,
+ dhvlen) < 0)
+ return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ }
+
+ response = kmalloc(data->hl, GFP_KERNEL);
+ if (!response)
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+
+ if (!ctrl->host_key) {
+ pr_warn("ctrl %d qid %d no host key\n",
+ ctrl->cntlid, req->sq->qid);
+ kfree(response);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+ if (nvmet_auth_host_hash(req, response, data->hl) < 0) {
+ pr_debug("ctrl %d qid %d host hash failed\n",
+ ctrl->cntlid, req->sq->qid);
+ kfree(response);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+
+ if (memcmp(data->rval, response, data->hl)) {
+ pr_info("ctrl %d qid %d host response mismatch\n",
+ ctrl->cntlid, req->sq->qid);
+ kfree(response);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+ kfree(response);
+ pr_debug("%s: ctrl %d qid %d host authenticated\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ if (data->cvalid) {
+ req->sq->dhchap_c2 = kmemdup(data->rval + data->hl, data->hl,
+ GFP_KERNEL);
+ if (!req->sq->dhchap_c2)
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+
+ pr_debug("%s: ctrl %d qid %d challenge %*ph\n",
+ __func__, ctrl->cntlid, req->sq->qid, data->hl,
+ req->sq->dhchap_c2);
+ req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
+ } else {
+ req->sq->authenticated = true;
+ req->sq->dhchap_c2 = NULL;
+ }
+
+ return 0;
+}
+
+static u16 nvmet_auth_failure2(void *d)
+{
+ struct nvmf_auth_dhchap_failure_data *data = d;
+
+ return data->rescode_exp;
+}
+
+void nvmet_execute_auth_send(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmf_auth_dhchap_success2_data *data;
+ void *d;
+ u32 tl;
+ u16 status = 0;
+
+ if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, secp);
+ goto done;
+ }
+ if (req->cmd->auth_send.spsp0 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, spsp0);
+ goto done;
+ }
+ if (req->cmd->auth_send.spsp1 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, spsp1);
+ goto done;
+ }
+ tl = le32_to_cpu(req->cmd->auth_send.tl);
+ if (!tl) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, tl);
+ goto done;
+ }
+ if (!nvmet_check_transfer_len(req, tl)) {
+ pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl);
+ return;
+ }
+
+ d = kmalloc(tl, GFP_KERNEL);
+ if (!d) {
+ status = NVME_SC_INTERNAL;
+ goto done;
+ }
+
+ status = nvmet_copy_from_sgl(req, 0, d, tl);
+ if (status)
+ goto done_kfree;
+
+ data = d;
+ pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
+ ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id,
+ req->sq->dhchap_step);
+ if (data->auth_type != NVME_AUTH_COMMON_MESSAGES &&
+ data->auth_type != NVME_AUTH_DHCHAP_MESSAGES)
+ goto done_failure1;
+ if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
+ if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
+ /* Restart negotiation */
+ pr_debug("%s: ctrl %d qid %d reset negotiation\n", __func__,
+ ctrl->cntlid, req->sq->qid);
+ if (!req->sq->qid) {
+ if (nvmet_setup_auth(ctrl) < 0) {
+ status = NVME_SC_INTERNAL;
+ pr_err("ctrl %d qid 0 failed to setup"
+ "re-authentication",
+ ctrl->cntlid);
+ goto done_failure1;
+ }
+ }
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ } else if (data->auth_id != req->sq->dhchap_step)
+ goto done_failure1;
+ /* Validate negotiation parameters */
+ status = nvmet_auth_negotiate(req, d);
+ if (status == 0)
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
+ else {
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status = status;
+ status = 0;
+ }
+ goto done_kfree;
+ }
+ if (data->auth_id != req->sq->dhchap_step) {
+ pr_debug("%s: ctrl %d qid %d step mismatch (%d != %d)\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ data->auth_id, req->sq->dhchap_step);
+ goto done_failure1;
+ }
+ if (le16_to_cpu(data->t_id) != req->sq->dhchap_tid) {
+ pr_debug("%s: ctrl %d qid %d invalid transaction %d (expected %d)\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ le16_to_cpu(data->t_id),
+ req->sq->dhchap_tid);
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status =
+ NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ goto done_kfree;
+ }
+
+ switch (data->auth_id) {
+ case NVME_AUTH_DHCHAP_MESSAGE_REPLY:
+ status = nvmet_auth_reply(req, d);
+ if (status == 0)
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
+ else {
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status = status;
+ status = 0;
+ }
+ goto done_kfree;
+ break;
+ case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
+ req->sq->authenticated = true;
+ pr_debug("%s: ctrl %d qid %d ctrl authenticated\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ goto done_kfree;
+ break;
+ case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
+ status = nvmet_auth_failure2(d);
+ if (status) {
+ pr_warn("ctrl %d qid %d: authentication failed (%d)\n",
+ ctrl->cntlid, req->sq->qid, status);
+ req->sq->dhchap_status = status;
+ req->sq->authenticated = false;
+ status = 0;
+ }
+ goto done_kfree;
+ break;
+ default:
+ req->sq->dhchap_status =
+ NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
+ req->sq->authenticated = false;
+ goto done_kfree;
+ break;
+ }
+done_failure1:
+ req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
+
+done_kfree:
+ kfree(d);
+done:
+ pr_debug("%s: ctrl %d qid %d dhchap status %x step %x\n", __func__,
+ ctrl->cntlid, req->sq->qid,
+ req->sq->dhchap_status, req->sq->dhchap_step);
+ if (status)
+ pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ status, req->error_loc);
+ req->cqe->result.u64 = 0;
+ nvmet_req_complete(req, status);
+ if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
+ req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
+ unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
+
+ mod_delayed_work(system_wq, &req->sq->auth_expired_work,
+ auth_expire_secs * HZ);
+ return;
+ }
+ /* Final states, clear up variables */
+ nvmet_auth_sq_free(req->sq);
+ if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
+ nvmet_ctrl_fatal_error(ctrl);
+}
+
+static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
+{
+ struct nvmf_auth_dhchap_challenge_data *data = d;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int ret = 0;
+ int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
+ int data_size = sizeof(*d) + hash_len;
+
+ if (ctrl->dh_tfm)
+ data_size += ctrl->dh_keysize;
+ if (al < data_size) {
+ pr_debug("%s: buffer too small (al %d need %d)\n", __func__,
+ al, data_size);
+ return -EINVAL;
+ }
+ memset(data, 0, data_size);
+ req->sq->dhchap_s1 = nvme_auth_get_seqnum();
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
+ data->t_id = cpu_to_le16(req->sq->dhchap_tid);
+ data->hashid = ctrl->shash_id;
+ data->hl = hash_len;
+ data->seqnum = cpu_to_le32(req->sq->dhchap_s1);
+ req->sq->dhchap_c1 = kmalloc(data->hl, GFP_KERNEL);
+ if (!req->sq->dhchap_c1)
+ return -ENOMEM;
+ get_random_bytes(req->sq->dhchap_c1, data->hl);
+ memcpy(data->cval, req->sq->dhchap_c1, data->hl);
+ if (ctrl->dh_tfm) {
+ data->dhgid = ctrl->dh_gid;
+ data->dhvlen = cpu_to_le16(ctrl->dh_keysize);
+ ret = nvmet_auth_ctrl_exponential(req, data->cval + data->hl,
+ ctrl->dh_keysize);
+ }
+ pr_debug("%s: ctrl %d qid %d seq %d transaction %d hl %d dhvlen %zu\n",
+ __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
+ req->sq->dhchap_tid, data->hl, ctrl->dh_keysize);
+ return ret;
+}
+
+static int nvmet_auth_success1(struct nvmet_req *req, void *d, int al)
+{
+ struct nvmf_auth_dhchap_success1_data *data = d;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
+
+ WARN_ON(al < sizeof(*data));
+ memset(data, 0, sizeof(*data));
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
+ data->t_id = cpu_to_le16(req->sq->dhchap_tid);
+ data->hl = hash_len;
+ if (req->sq->dhchap_c2) {
+ if (!ctrl->ctrl_key) {
+ pr_warn("ctrl %d qid %d no ctrl key\n",
+ ctrl->cntlid, req->sq->qid);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+ if (nvmet_auth_ctrl_hash(req, data->rval, data->hl))
+ return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ data->rvalid = 1;
+ pr_debug("ctrl %d qid %d response %*ph\n",
+ ctrl->cntlid, req->sq->qid, data->hl, data->rval);
+ }
+ return 0;
+}
+
+static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al)
+{
+ struct nvmf_auth_dhchap_failure_data *data = d;
+
+ WARN_ON(al < sizeof(*data));
+ data->auth_type = NVME_AUTH_COMMON_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ data->t_id = cpu_to_le16(req->sq->dhchap_tid);
+ data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
+ data->rescode_exp = req->sq->dhchap_status;
+}
+
+void nvmet_execute_auth_receive(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ void *d;
+ u32 al;
+ u16 status = 0;
+
+ if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, secp);
+ goto done;
+ }
+ if (req->cmd->auth_receive.spsp0 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, spsp0);
+ goto done;
+ }
+ if (req->cmd->auth_receive.spsp1 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, spsp1);
+ goto done;
+ }
+ al = le32_to_cpu(req->cmd->auth_receive.al);
+ if (!al) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, al);
+ goto done;
+ }
+ if (!nvmet_check_transfer_len(req, al)) {
+ pr_debug("%s: transfer length mismatch (%u)\n", __func__, al);
+ return;
+ }
+
+ d = kmalloc(al, GFP_KERNEL);
+ if (!d) {
+ status = NVME_SC_INTERNAL;
+ goto done;
+ }
+ pr_debug("%s: ctrl %d qid %d step %x\n", __func__,
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
+ switch (req->sq->dhchap_step) {
+ case NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE:
+ if (nvmet_auth_challenge(req, d, al) < 0) {
+ pr_warn("ctrl %d qid %d: challenge error (%d)\n",
+ ctrl->cntlid, req->sq->qid, status);
+ status = NVME_SC_INTERNAL;
+ break;
+ }
+ if (status) {
+ req->sq->dhchap_status = status;
+ nvmet_auth_failure1(req, d, al);
+ pr_warn("ctrl %d qid %d: challenge status (%x)\n",
+ ctrl->cntlid, req->sq->qid,
+ req->sq->dhchap_status);
+ status = 0;
+ break;
+ }
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
+ break;
+ case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1:
+ status = nvmet_auth_success1(req, d, al);
+ if (status) {
+ req->sq->dhchap_status = status;
+ req->sq->authenticated = false;
+ nvmet_auth_failure1(req, d, al);
+ pr_warn("ctrl %d qid %d: success1 status (%x)\n",
+ ctrl->cntlid, req->sq->qid,
+ req->sq->dhchap_status);
+ break;
+ }
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
+ break;
+ case NVME_AUTH_DHCHAP_MESSAGE_FAILURE1:
+ req->sq->authenticated = false;
+ nvmet_auth_failure1(req, d, al);
+ pr_warn("ctrl %d qid %d failure1 (%x)\n",
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_status);
+ break;
+ default:
+ pr_warn("ctrl %d qid %d unhandled step (%d)\n",
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ nvmet_auth_failure1(req, d, al);
+ status = 0;
+ break;
+ }
+
+ status = nvmet_copy_to_sgl(req, 0, d, al);
+ kfree(d);
+done:
+ req->cqe->result.u64 = 0;
+ nvmet_req_complete(req, status);
+ if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
+ nvmet_auth_sq_free(req->sq);
+ else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
+ nvmet_auth_sq_free(req->sq);
+ nvmet_ctrl_fatal_error(ctrl);
+ }
+}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index feef15c38ec9..43b5bd8bb6a5 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -12,7 +12,7 @@ static void nvmet_execute_prop_set(struct nvmet_req *req)
u64 val = le64_to_cpu(req->cmd->prop_set.value);
u16 status = 0;
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
if (req->cmd->prop_set.attrib & 1) {
@@ -41,7 +41,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
u16 status = 0;
u64 val = 0;
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
if (req->cmd->prop_get.attrib & 1) {
@@ -82,7 +82,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
nvmet_req_complete(req, status);
}
-u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
+u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -93,8 +93,39 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
case nvme_fabrics_type_property_get:
req->execute = nvmet_execute_prop_get;
break;
+#ifdef CONFIG_NVME_TARGET_AUTH
+ case nvme_fabrics_type_auth_send:
+ req->execute = nvmet_execute_auth_send;
+ break;
+ case nvme_fabrics_type_auth_receive:
+ req->execute = nvmet_execute_auth_receive;
+ break;
+#endif
+ default:
+ pr_debug("received unknown capsule type 0x%x\n",
+ cmd->fabrics.fctype);
+ req->error_loc = offsetof(struct nvmf_common_command, fctype);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+
+ return 0;
+}
+
+u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->fabrics.fctype) {
+#ifdef CONFIG_NVME_TARGET_AUTH
+ case nvme_fabrics_type_auth_send:
+ req->execute = nvmet_execute_auth_send;
+ break;
+ case nvme_fabrics_type_auth_receive:
+ req->execute = nvmet_execute_auth_receive;
+ break;
+#endif
default:
- pr_err("received unknown capsule type 0x%x\n",
+ pr_debug("received unknown capsule type 0x%x\n",
cmd->fabrics.fctype);
req->error_loc = offsetof(struct nvmf_common_command, fctype);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
@@ -109,21 +140,38 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
u16 qid = le16_to_cpu(c->qid);
u16 sqsize = le16_to_cpu(c->sqsize);
struct nvmet_ctrl *old;
+ u16 mqes = NVME_CAP_MQES(ctrl->cap);
u16 ret;
- old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
- if (old) {
- pr_warn("queue already connected!\n");
- req->error_loc = offsetof(struct nvmf_connect_command, opcode);
- return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
- }
if (!sqsize) {
pr_warn("queue size zero!\n");
req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
goto err;
}
+ if (ctrl->sqs[qid] != NULL) {
+ pr_warn("qid %u has already been created\n", qid);
+ req->error_loc = offsetof(struct nvmf_connect_command, qid);
+ return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ }
+
+ if (sqsize > mqes) {
+ pr_warn("sqsize %u is larger than MQES supported %u cntlid %d\n",
+ sqsize, mqes, ctrl->cntlid);
+ req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
+ return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ }
+
+ old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
+ if (old) {
+ pr_warn("queue already connected!\n");
+ req->error_loc = offsetof(struct nvmf_connect_command, opcode);
+ return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
+ }
+
/* note: convert queue size from 0's-based value to 1's-based value */
nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
@@ -138,6 +186,7 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
if (ret) {
pr_err("failed to install queue %d cntlid %d ret %x\n",
qid, ctrl->cntlid, ret);
+ ctrl->sqs[qid] = NULL;
goto err;
}
}
@@ -149,14 +198,21 @@ err:
return ret;
}
+static u32 nvmet_connect_result(struct nvmet_ctrl *ctrl)
+{
+ return (u32)ctrl->cntlid |
+ (nvmet_has_auth(ctrl) ? NVME_CONNECT_AUTHREQ_ATR : 0);
+}
+
static void nvmet_execute_admin_connect(struct nvmet_req *req)
{
struct nvmf_connect_command *c = &req->cmd->connect;
struct nvmf_connect_data *d;
struct nvmet_ctrl *ctrl = NULL;
u16 status = 0;
+ int ret;
- if (!nvmet_check_data_len(req, sizeof(struct nvmf_connect_data)))
+ if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
return;
d = kmalloc(sizeof(*d), GFP_KERNEL);
@@ -190,25 +246,36 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
le32_to_cpu(c->kato), &ctrl);
- if (status) {
- if (status == (NVME_SC_INVALID_FIELD | NVME_SC_DNR))
- req->error_loc =
- offsetof(struct nvme_common_command, opcode);
+ if (status)
goto out;
- }
+
+ ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
uuid_copy(&ctrl->hostid, &d->hostid);
+ ret = nvmet_setup_auth(ctrl);
+ if (ret < 0) {
+ pr_err("Failed to setup authentication, error %d\n", ret);
+ nvmet_ctrl_put(ctrl);
+ if (ret == -EPERM)
+ status = (NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR);
+ else
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
status = nvmet_install_queue(ctrl, req);
if (status) {
nvmet_ctrl_put(ctrl);
goto out;
}
- pr_info("creating controller %d for subsystem %s for NQN %s.\n",
- ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn);
- req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
-
+ pr_info("creating %s controller %d for subsystem %s for NQN %s%s%s.\n",
+ nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
+ ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
+ ctrl->pi_support ? " T10-PI is enabled" : "",
+ nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
+ req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl));
out:
kfree(d);
complete:
@@ -219,11 +286,11 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
{
struct nvmf_connect_command *c = &req->cmd->connect;
struct nvmf_connect_data *d;
- struct nvmet_ctrl *ctrl = NULL;
+ struct nvmet_ctrl *ctrl;
u16 qid = le16_to_cpu(c->qid);
u16 status = 0;
- if (!nvmet_check_data_len(req, sizeof(struct nvmf_connect_data)))
+ if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
return;
d = kmalloc(sizeof(*d), GFP_KERNEL);
@@ -246,11 +313,12 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
goto out;
}
- status = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
- le16_to_cpu(d->cntlid),
- req, &ctrl);
- if (status)
+ ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
+ le16_to_cpu(d->cntlid), req);
+ if (!ctrl) {
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
goto out;
+ }
if (unlikely(qid > ctrl->subsys->max_qid)) {
pr_warn("invalid queue id (%d)\n", qid);
@@ -260,14 +328,11 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
}
status = nvmet_install_queue(ctrl, req);
- if (status) {
- /* pass back cntlid that had the issue of installing queue */
- req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
+ if (status)
goto out_ctrl_put;
- }
pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
-
+ req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl));
out:
kfree(d);
complete:
@@ -284,13 +349,13 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
struct nvme_command *cmd = req->cmd;
if (!nvme_is_fabrics(cmd)) {
- pr_err("invalid command 0x%x on unconnected queue.\n",
+ pr_debug("invalid command 0x%x on unconnected queue.\n",
cmd->fabrics.opcode);
req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
- pr_err("invalid capsule type 0x%x on unconnected queue.\n",
+ pr_debug("invalid capsule type 0x%x on unconnected queue.\n",
cmd->fabrics.fctype);
req->error_loc = offsetof(struct nvmf_common_command, fctype);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index a0db6371b43e..ab2627e17bb9 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -14,6 +14,7 @@
#include "nvmet.h"
#include <linux/nvme-fc-driver.h>
#include <linux/nvme-fc.h>
+#include "../host/fc.h"
/* *************************** Data Structures/Defines ****************** */
@@ -21,23 +22,21 @@
#define NVMET_LS_CTX_COUNT 256
-/* for this implementation, assume small single frame rqst/rsp */
-#define NVME_FC_MAX_LS_BUFFER_SIZE 2048
-
struct nvmet_fc_tgtport;
struct nvmet_fc_tgt_assoc;
-struct nvmet_fc_ls_iod {
- struct nvmefc_tgt_ls_req *lsreq;
+struct nvmet_fc_ls_iod { /* for an LS RQST RCV */
+ struct nvmefc_ls_rsp *lsrsp;
struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
- struct list_head ls_list; /* tgtport->ls_list */
+ struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */
struct nvmet_fc_tgtport *tgtport;
struct nvmet_fc_tgt_assoc *assoc;
+ void *hosthandle;
- u8 *rqstbuf;
- u8 *rspbuf;
+ union nvmefc_ls_requests *rqstbuf;
+ union nvmefc_ls_responses *rspbuf;
u16 rqstdatalen;
dma_addr_t rspdma;
@@ -46,6 +45,18 @@ struct nvmet_fc_ls_iod {
struct work_struct work;
} __aligned(sizeof(unsigned long long));
+struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */
+ struct nvmefc_ls_req ls_req;
+
+ struct nvmet_fc_tgtport *tgtport;
+ void *hosthandle;
+
+ int ls_error;
+ struct list_head lsreq_list; /* tgtport->ls_req_list */
+ bool req_queued;
+};
+
+
/* desired maximum for a single sequence - if sg list allows it */
#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
@@ -83,7 +94,6 @@ struct nvmet_fc_fcp_iod {
};
struct nvmet_fc_tgtport {
-
struct nvmet_fc_target_port fc_target_port;
struct list_head tgt_list; /* nvmet_fc_target_list */
@@ -92,9 +102,11 @@ struct nvmet_fc_tgtport {
struct nvmet_fc_ls_iod *iod;
spinlock_t lock;
- struct list_head ls_list;
+ struct list_head ls_rcv_list;
+ struct list_head ls_req_list;
struct list_head ls_busylist;
struct list_head assoc_list;
+ struct list_head host_list;
struct ida assoc_cnt;
struct nvmet_fc_port_entry *pe;
struct kref ref;
@@ -133,17 +145,30 @@ struct nvmet_fc_tgt_queue {
struct list_head avail_defer_list;
struct workqueue_struct *work_q;
struct kref ref;
+ struct rcu_head rcu;
struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
} __aligned(sizeof(unsigned long long));
+struct nvmet_fc_hostport {
+ struct nvmet_fc_tgtport *tgtport;
+ void *hosthandle;
+ struct list_head host_list;
+ struct kref ref;
+ u8 invalid;
+};
+
struct nvmet_fc_tgt_assoc {
u64 association_id;
u32 a_id;
+ atomic_t terminating;
struct nvmet_fc_tgtport *tgtport;
+ struct nvmet_fc_hostport *hostport;
+ struct nvmet_fc_ls_iod *rcv_disconn;
struct list_head a_list;
- struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
+ struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1];
struct kref ref;
struct work_struct del_work;
+ struct rcu_head rcu;
};
@@ -227,6 +252,8 @@ static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_fcp_iod *fod);
static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
+static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_iod *iod);
/* *********************** FC-NVME DMA Handling **************************** */
@@ -318,6 +345,188 @@ fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
}
+/* ********************** FC-NVME LS XMT Handling ************************* */
+
+
+static void
+__nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
+{
+ struct nvmet_fc_tgtport *tgtport = lsop->tgtport;
+ struct nvmefc_ls_req *lsreq = &lsop->ls_req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+
+ if (!lsop->req_queued) {
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ return;
+ }
+
+ list_del(&lsop->lsreq_list);
+
+ lsop->req_queued = false;
+
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
+ (lsreq->rqstlen + lsreq->rsplen),
+ DMA_BIDIRECTIONAL);
+
+ nvmet_fc_tgtport_put(tgtport);
+}
+
+static int
+__nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_req_op *lsop,
+ void (*done)(struct nvmefc_ls_req *req, int status))
+{
+ struct nvmefc_ls_req *lsreq = &lsop->ls_req;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!tgtport->ops->ls_req)
+ return -EOPNOTSUPP;
+
+ if (!nvmet_fc_tgtport_get(tgtport))
+ return -ESHUTDOWN;
+
+ lsreq->done = done;
+ lsop->req_queued = false;
+ INIT_LIST_HEAD(&lsop->lsreq_list);
+
+ lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
+ lsreq->rqstlen + lsreq->rsplen,
+ DMA_BIDIRECTIONAL);
+ if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) {
+ ret = -EFAULT;
+ goto out_puttgtport;
+ }
+ lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+
+ list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list);
+
+ lsop->req_queued = true;
+
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle,
+ lsreq);
+ if (ret)
+ goto out_unlink;
+
+ return 0;
+
+out_unlink:
+ lsop->ls_error = ret;
+ spin_lock_irqsave(&tgtport->lock, flags);
+ lsop->req_queued = false;
+ list_del(&lsop->lsreq_list);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
+ (lsreq->rqstlen + lsreq->rsplen),
+ DMA_BIDIRECTIONAL);
+out_puttgtport:
+ nvmet_fc_tgtport_put(tgtport);
+
+ return ret;
+}
+
+static int
+nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_ls_req_op *lsop,
+ void (*done)(struct nvmefc_ls_req *req, int status))
+{
+ /* don't wait for completion */
+
+ return __nvmet_fc_send_ls_req(tgtport, lsop, done);
+}
+
+static void
+nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
+{
+ struct nvmet_fc_ls_req_op *lsop =
+ container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req);
+
+ __nvmet_fc_finish_ls_req(lsop);
+
+ /* fc-nvme target doesn't care about success or failure of cmd */
+
+ kfree(lsop);
+}
+
+/*
+ * This routine sends a FC-NVME LS to disconnect (aka terminate)
+ * the FC-NVME Association. Terminating the association also
+ * terminates the FC-NVME connections (per queue, both admin and io
+ * queues) that are part of the association. E.g. things are torn
+ * down, and the related FC-NVME Association ID and Connection IDs
+ * become invalid.
+ *
+ * The behavior of the fc-nvme target is such that it's
+ * understanding of the association and connections will implicitly
+ * be torn down. The action is implicit as it may be due to a loss of
+ * connectivity with the fc-nvme host, so the target may never get a
+ * response even if it tried. As such, the action of this routine
+ * is to asynchronously send the LS, ignore any results of the LS, and
+ * continue on with terminating the association. If the fc-nvme host
+ * is present and receives the LS, it too can tear down.
+ */
+static void
+nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
+{
+ struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+ struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
+ struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
+ struct nvmet_fc_ls_req_op *lsop;
+ struct nvmefc_ls_req *lsreq;
+ int ret;
+
+ /*
+ * If ls_req is NULL or no hosthandle, it's an older lldd and no
+ * message is normal. Otherwise, send unless the hostport has
+ * already been invalidated by the lldd.
+ */
+ if (!tgtport->ops->ls_req || !assoc->hostport ||
+ assoc->hostport->invalid)
+ return;
+
+ lsop = kzalloc((sizeof(*lsop) +
+ sizeof(*discon_rqst) + sizeof(*discon_acc) +
+ tgtport->ops->lsrqst_priv_sz), GFP_KERNEL);
+ if (!lsop) {
+ dev_info(tgtport->dev,
+ "{%d:%d} send Disconnect Association failed: ENOMEM\n",
+ tgtport->fc_target_port.port_num, assoc->a_id);
+ return;
+ }
+
+ discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
+ discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
+ lsreq = &lsop->ls_req;
+ if (tgtport->ops->lsrqst_priv_sz)
+ lsreq->private = (void *)&discon_acc[1];
+ else
+ lsreq->private = NULL;
+
+ lsop->tgtport = tgtport;
+ lsop->hosthandle = assoc->hostport->hosthandle;
+
+ nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
+ assoc->association_id);
+
+ ret = nvmet_fc_send_ls_req_async(tgtport, lsop,
+ nvmet_fc_disconnect_assoc_done);
+ if (ret) {
+ dev_info(tgtport->dev,
+ "{%d:%d} XMT Disconnect Association failed: %d\n",
+ tgtport->fc_target_port.port_num, assoc->a_id, ret);
+ kfree(lsop);
+ }
+}
+
+
/* *********************** FC-NVME Port Management ************************ */
@@ -337,17 +546,18 @@ nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
iod->tgtport = tgtport;
- list_add_tail(&iod->ls_list, &tgtport->ls_list);
+ list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
- iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
- GFP_KERNEL);
+ iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) +
+ sizeof(union nvmefc_ls_responses),
+ GFP_KERNEL);
if (!iod->rqstbuf)
goto out_fail;
- iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
+ iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1];
iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
- NVME_FC_MAX_LS_BUFFER_SIZE,
+ sizeof(*iod->rspbuf),
DMA_TO_DEVICE);
if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
goto out_fail;
@@ -357,12 +567,12 @@ nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
out_fail:
kfree(iod->rqstbuf);
- list_del(&iod->ls_list);
+ list_del(&iod->ls_rcv_list);
for (iod--, i--; i >= 0; iod--, i--) {
fc_dma_unmap_single(tgtport->dev, iod->rspdma,
- NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+ sizeof(*iod->rspbuf), DMA_TO_DEVICE);
kfree(iod->rqstbuf);
- list_del(&iod->ls_list);
+ list_del(&iod->ls_rcv_list);
}
kfree(iod);
@@ -378,10 +588,10 @@ nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
fc_dma_unmap_single(tgtport->dev,
- iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
+ iod->rspdma, sizeof(*iod->rspbuf),
DMA_TO_DEVICE);
kfree(iod->rqstbuf);
- list_del(&iod->ls_list);
+ list_del(&iod->ls_rcv_list);
}
kfree(tgtport->iod);
}
@@ -393,10 +603,10 @@ nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
unsigned long flags;
spin_lock_irqsave(&tgtport->lock, flags);
- iod = list_first_entry_or_null(&tgtport->ls_list,
- struct nvmet_fc_ls_iod, ls_list);
+ iod = list_first_entry_or_null(&tgtport->ls_rcv_list,
+ struct nvmet_fc_ls_iod, ls_rcv_list);
if (iod)
- list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
+ list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist);
spin_unlock_irqrestore(&tgtport->lock, flags);
return iod;
}
@@ -409,7 +619,7 @@ nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
unsigned long flags;
spin_lock_irqsave(&tgtport->lock, flags);
- list_move(&iod->ls_list, &tgtport->ls_list);
+ list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
spin_unlock_irqrestore(&tgtport->lock, flags);
}
@@ -582,7 +792,6 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
u16 qid, u16 sqsize)
{
struct nvmet_fc_tgt_queue *queue;
- unsigned long flags;
int ret;
if (qid > NVMET_NR_QUEUES)
@@ -621,9 +830,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
goto out_fail_iodlist;
WARN_ON(assoc->queues[qid]);
- spin_lock_irqsave(&assoc->tgtport->lock, flags);
- assoc->queues[qid] = queue;
- spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
+ rcu_assign_pointer(assoc->queues[qid], queue);
return queue;
@@ -643,11 +850,8 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
{
struct nvmet_fc_tgt_queue *queue =
container_of(ref, struct nvmet_fc_tgt_queue, ref);
- unsigned long flags;
- spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
- queue->assoc->queues[queue->qid] = NULL;
- spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
+ rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
@@ -655,7 +859,7 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
destroy_workqueue(queue->work_q);
- kfree(queue);
+ kfree_rcu(queue, rcu);
}
static void
@@ -678,31 +882,33 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
struct nvmet_fc_fcp_iod *fod = queue->fod;
struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
unsigned long flags;
- int i, writedataactive;
+ int i;
bool disconnect;
disconnect = atomic_xchg(&queue->connected, 0);
+ /* if not connected, nothing to do */
+ if (!disconnect)
+ return;
+
spin_lock_irqsave(&queue->qlock, flags);
- /* about outstanding io's */
+ /* abort outstanding io's */
for (i = 0; i < queue->sqsize; fod++, i++) {
if (fod->active) {
spin_lock(&fod->flock);
fod->abort = true;
- writedataactive = fod->writedataactive;
- spin_unlock(&fod->flock);
/*
* only call lldd abort routine if waiting for
* writedata. other outstanding ops should finish
* on their own.
*/
- if (writedataactive) {
- spin_lock(&fod->flock);
+ if (fod->writedataactive) {
fod->aborted = true;
spin_unlock(&fod->flock);
tgtport->ops->fcp_abort(
&tgtport->fc_target_port, fod->fcpreq);
- }
+ } else
+ spin_unlock(&fod->flock);
}
}
@@ -742,8 +948,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
flush_workqueue(queue->work_q);
- if (disconnect)
- nvmet_sq_destroy(&queue->nvme_sq);
+ nvmet_sq_destroy(&queue->nvme_sq);
nvmet_fc_tgt_q_put(queue);
}
@@ -756,27 +961,137 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_tgt_queue *queue;
u64 association_id = nvmet_fc_getassociationid(connection_id);
u16 qid = nvmet_fc_getqueueid(connection_id);
- unsigned long flags;
if (qid > NVMET_NR_QUEUES)
return NULL;
- spin_lock_irqsave(&tgtport->lock, flags);
- list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (association_id == assoc->association_id) {
- queue = assoc->queues[qid];
+ queue = rcu_dereference(assoc->queues[qid]);
if (queue &&
(!atomic_read(&queue->connected) ||
!nvmet_fc_tgt_q_get(queue)))
queue = NULL;
- spin_unlock_irqrestore(&tgtport->lock, flags);
+ rcu_read_unlock();
return queue;
}
}
+ rcu_read_unlock();
+ return NULL;
+}
+
+static void
+nvmet_fc_hostport_free(struct kref *ref)
+{
+ struct nvmet_fc_hostport *hostport =
+ container_of(ref, struct nvmet_fc_hostport, ref);
+ struct nvmet_fc_tgtport *tgtport = hostport->tgtport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_del(&hostport->host_list);
spin_unlock_irqrestore(&tgtport->lock, flags);
+ if (tgtport->ops->host_release && hostport->invalid)
+ tgtport->ops->host_release(hostport->hosthandle);
+ kfree(hostport);
+ nvmet_fc_tgtport_put(tgtport);
+}
+
+static void
+nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport)
+{
+ kref_put(&hostport->ref, nvmet_fc_hostport_free);
+}
+
+static int
+nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport)
+{
+ return kref_get_unless_zero(&hostport->ref);
+}
+
+static void
+nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport)
+{
+ /* if LLDD not implemented, leave as NULL */
+ if (!hostport || !hostport->hosthandle)
+ return;
+
+ nvmet_fc_hostport_put(hostport);
+}
+
+static struct nvmet_fc_hostport *
+nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
+{
+ struct nvmet_fc_hostport *host;
+
+ lockdep_assert_held(&tgtport->lock);
+
+ list_for_each_entry(host, &tgtport->host_list, host_list) {
+ if (host->hosthandle == hosthandle && !host->invalid) {
+ if (nvmet_fc_hostport_get(host))
+ return (host);
+ }
+ }
+
return NULL;
}
+static struct nvmet_fc_hostport *
+nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
+{
+ struct nvmet_fc_hostport *newhost, *match = NULL;
+ unsigned long flags;
+
+ /* if LLDD not implemented, leave as NULL */
+ if (!hosthandle)
+ return NULL;
+
+ /*
+ * take reference for what will be the newly allocated hostport if
+ * we end up using a new allocation
+ */
+ if (!nvmet_fc_tgtport_get(tgtport))
+ return ERR_PTR(-EINVAL);
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ match = nvmet_fc_match_hostport(tgtport, hosthandle);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ if (match) {
+ /* no new allocation - release reference */
+ nvmet_fc_tgtport_put(tgtport);
+ return match;
+ }
+
+ newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
+ if (!newhost) {
+ /* no new allocation - release reference */
+ nvmet_fc_tgtport_put(tgtport);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_irqsave(&tgtport->lock, flags);
+ match = nvmet_fc_match_hostport(tgtport, hosthandle);
+ if (match) {
+ /* new allocation not needed */
+ kfree(newhost);
+ newhost = match;
+ /* no new allocation - release reference */
+ nvmet_fc_tgtport_put(tgtport);
+ } else {
+ newhost->tgtport = tgtport;
+ newhost->hosthandle = hosthandle;
+ INIT_LIST_HEAD(&newhost->host_list);
+ kref_init(&newhost->ref);
+
+ list_add_tail(&newhost->host_list, &tgtport->host_list);
+ }
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ return newhost;
+}
+
static void
nvmet_fc_delete_assoc(struct work_struct *work)
{
@@ -788,7 +1103,7 @@ nvmet_fc_delete_assoc(struct work_struct *work)
}
static struct nvmet_fc_tgt_assoc *
-nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
+nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
{
struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
unsigned long flags;
@@ -800,18 +1115,23 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
if (!assoc)
return NULL;
- idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
+ idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL);
if (idx < 0)
goto out_free_assoc;
if (!nvmet_fc_tgtport_get(tgtport))
- goto out_ida_put;
+ goto out_ida;
+
+ assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
+ if (IS_ERR(assoc->hostport))
+ goto out_put;
assoc->tgtport = tgtport;
assoc->a_id = idx;
INIT_LIST_HEAD(&assoc->a_list);
kref_init(&assoc->ref);
INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
+ atomic_set(&assoc->terminating, 0);
while (needrandom) {
get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
@@ -819,22 +1139,25 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
spin_lock_irqsave(&tgtport->lock, flags);
needrandom = false;
- list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
+ list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
if (ran == tmpassoc->association_id) {
needrandom = true;
break;
}
+ }
if (!needrandom) {
assoc->association_id = ran;
- list_add_tail(&assoc->a_list, &tgtport->assoc_list);
+ list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
}
spin_unlock_irqrestore(&tgtport->lock, flags);
}
return assoc;
-out_ida_put:
- ida_simple_remove(&tgtport->assoc_cnt, idx);
+out_put:
+ nvmet_fc_tgtport_put(tgtport);
+out_ida:
+ ida_free(&tgtport->assoc_cnt, idx);
out_free_assoc:
kfree(assoc);
return NULL;
@@ -846,13 +1169,25 @@ nvmet_fc_target_assoc_free(struct kref *ref)
struct nvmet_fc_tgt_assoc *assoc =
container_of(ref, struct nvmet_fc_tgt_assoc, ref);
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+ struct nvmet_fc_ls_iod *oldls;
unsigned long flags;
+ /* Send Disconnect now that all i/o has completed */
+ nvmet_fc_xmt_disconnect_assoc(assoc);
+
+ nvmet_fc_free_hostport(assoc->hostport);
spin_lock_irqsave(&tgtport->lock, flags);
- list_del(&assoc->a_list);
+ list_del_rcu(&assoc->a_list);
+ oldls = assoc->rcv_disconn;
spin_unlock_irqrestore(&tgtport->lock, flags);
- ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
- kfree(assoc);
+ /* if pending Rcv Disconnect Association LS, send rsp now */
+ if (oldls)
+ nvmet_fc_xmt_ls_rsp(tgtport, oldls);
+ ida_free(&tgtport->assoc_cnt, assoc->a_id);
+ dev_info(tgtport->dev,
+ "{%d:%d} Association freed\n",
+ tgtport->fc_target_port.port_num, assoc->a_id);
+ kfree_rcu(assoc, rcu);
nvmet_fc_tgtport_put(tgtport);
}
@@ -873,22 +1208,35 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
struct nvmet_fc_tgt_queue *queue;
- unsigned long flags;
- int i;
+ int i, terminating;
+
+ terminating = atomic_xchg(&assoc->terminating, 1);
+
+ /* if already terminating, do nothing */
+ if (terminating)
+ return;
+
- spin_lock_irqsave(&tgtport->lock, flags);
for (i = NVMET_NR_QUEUES; i >= 0; i--) {
- queue = assoc->queues[i];
- if (queue) {
- if (!nvmet_fc_tgt_q_get(queue))
- continue;
- spin_unlock_irqrestore(&tgtport->lock, flags);
- nvmet_fc_delete_target_queue(queue);
- nvmet_fc_tgt_q_put(queue);
- spin_lock_irqsave(&tgtport->lock, flags);
+ rcu_read_lock();
+ queue = rcu_dereference(assoc->queues[i]);
+ if (!queue) {
+ rcu_read_unlock();
+ continue;
}
+
+ if (!nvmet_fc_tgt_q_get(queue)) {
+ rcu_read_unlock();
+ continue;
+ }
+ rcu_read_unlock();
+ nvmet_fc_delete_target_queue(queue);
+ nvmet_fc_tgt_q_put(queue);
}
- spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ dev_info(tgtport->dev,
+ "{%d:%d} Association deleted\n",
+ tgtport->fc_target_port.port_num, assoc->a_id);
nvmet_fc_tgt_a_put(assoc);
}
@@ -899,17 +1247,17 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
{
struct nvmet_fc_tgt_assoc *assoc;
struct nvmet_fc_tgt_assoc *ret = NULL;
- unsigned long flags;
- spin_lock_irqsave(&tgtport->lock, flags);
- list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (association_id == assoc->association_id) {
ret = assoc;
- nvmet_fc_tgt_a_get(assoc);
+ if (!nvmet_fc_tgt_a_get(assoc))
+ ret = NULL;
break;
}
}
- spin_unlock_irqrestore(&tgtport->lock, flags);
+ rcu_read_unlock();
return ret;
}
@@ -993,7 +1341,7 @@ nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
}
/**
- * nvme_fc_register_targetport - transport entry point called by an
+ * nvmet_fc_register_targetport - transport entry point called by an
* LLDD to register the existence of a local
* NVME subystem FC port.
* @pinfo: pointer to information about the port to be registered
@@ -1035,7 +1383,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
goto out_regtgt_failed;
}
- idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
+ idx = ida_alloc(&nvmet_fc_tgtport_cnt, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
goto out_fail_kfree;
@@ -1048,16 +1396,21 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
newrec->fc_target_port.node_name = pinfo->node_name;
newrec->fc_target_port.port_name = pinfo->port_name;
- newrec->fc_target_port.private = &newrec[1];
+ if (template->target_priv_sz)
+ newrec->fc_target_port.private = &newrec[1];
+ else
+ newrec->fc_target_port.private = NULL;
newrec->fc_target_port.port_id = pinfo->port_id;
newrec->fc_target_port.port_num = idx;
INIT_LIST_HEAD(&newrec->tgt_list);
newrec->dev = dev;
newrec->ops = template;
spin_lock_init(&newrec->lock);
- INIT_LIST_HEAD(&newrec->ls_list);
+ INIT_LIST_HEAD(&newrec->ls_rcv_list);
+ INIT_LIST_HEAD(&newrec->ls_req_list);
INIT_LIST_HEAD(&newrec->ls_busylist);
INIT_LIST_HEAD(&newrec->assoc_list);
+ INIT_LIST_HEAD(&newrec->host_list);
kref_init(&newrec->ref);
ida_init(&newrec->assoc_cnt);
newrec->max_sg_cnt = template->max_sgl_segments;
@@ -1080,7 +1433,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
out_free_newrec:
put_device(dev);
out_ida_put:
- ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
+ ida_free(&nvmet_fc_tgtport_cnt, idx);
out_fail_kfree:
kfree(newrec);
out_regtgt_failed:
@@ -1107,7 +1460,7 @@ nvmet_fc_free_tgtport(struct kref *ref)
/* let the LLDD know we've finished tearing it down */
tgtport->ops->targetport_delete(&tgtport->fc_target_port);
- ida_simple_remove(&nvmet_fc_tgtport_cnt,
+ ida_free(&nvmet_fc_tgtport_cnt,
tgtport->fc_target_port.port_num);
ida_destroy(&tgtport->assoc_cnt);
@@ -1132,19 +1485,78 @@ nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
static void
__nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
{
+ struct nvmet_fc_tgt_assoc *assoc;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+ if (!nvmet_fc_tgt_a_get(assoc))
+ continue;
+ if (!queue_work(nvmet_wq, &assoc->del_work))
+ /* already deleting - release local reference */
+ nvmet_fc_tgt_a_put(assoc);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * nvmet_fc_invalidate_host - transport entry point called by an LLDD
+ * to remove references to a hosthandle for LS's.
+ *
+ * The nvmet-fc layer ensures that any references to the hosthandle
+ * on the targetport are forgotten (set to NULL). The LLDD will
+ * typically call this when a login with a remote host port has been
+ * lost, thus LS's for the remote host port are no longer possible.
+ *
+ * If an LS request is outstanding to the targetport/hosthandle (or
+ * issued concurrently with the call to invalidate the host), the
+ * LLDD is responsible for terminating/aborting the LS and completing
+ * the LS request. It is recommended that these terminations/aborts
+ * occur after calling to invalidate the host handle to avoid additional
+ * retries by the nvmet-fc transport. The nvmet-fc transport may
+ * continue to reference host handle while it cleans up outstanding
+ * NVME associations. The nvmet-fc transport will call the
+ * ops->host_release() callback to notify the LLDD that all references
+ * are complete and the related host handle can be recovered.
+ * Note: if there are no references, the callback may be called before
+ * the invalidate host call returns.
+ *
+ * @target_port: pointer to the (registered) target port that a prior
+ * LS was received on and which supplied the transport the
+ * hosthandle.
+ * @hosthandle: the handle (pointer) that represents the host port
+ * that no longer has connectivity and that LS's should
+ * no longer be directed to.
+ */
+void
+nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
+ void *hosthandle)
+{
+ struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
struct nvmet_fc_tgt_assoc *assoc, *next;
unsigned long flags;
+ bool noassoc = true;
spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry_safe(assoc, next,
&tgtport->assoc_list, a_list) {
+ if (!assoc->hostport ||
+ assoc->hostport->hosthandle != hosthandle)
+ continue;
if (!nvmet_fc_tgt_a_get(assoc))
continue;
- if (!schedule_work(&assoc->del_work))
+ assoc->hostport->invalid = 1;
+ noassoc = false;
+ if (!queue_work(nvmet_wq, &assoc->del_work))
+ /* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
}
spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ /* if there's nothing to wait for - call the callback */
+ if (noassoc && tgtport->ops->host_release)
+ tgtport->ops->host_release(hosthandle);
}
+EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host);
/*
* nvmet layer has called to terminate an association
@@ -1166,21 +1578,22 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
continue;
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
- spin_lock_irqsave(&tgtport->lock, flags);
- list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
- queue = assoc->queues[0];
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+ queue = rcu_dereference(assoc->queues[0]);
if (queue && queue->nvme_sq.ctrl == ctrl) {
if (nvmet_fc_tgt_a_get(assoc))
found_ctrl = true;
break;
}
}
- spin_unlock_irqrestore(&tgtport->lock, flags);
+ rcu_read_unlock();
nvmet_fc_tgtport_put(tgtport);
if (found_ctrl) {
- if (!schedule_work(&assoc->del_work))
+ if (!queue_work(nvmet_wq, &assoc->del_work))
+ /* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
return;
}
@@ -1191,7 +1604,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
}
/**
- * nvme_fc_unregister_targetport - transport entry point called by an
+ * nvmet_fc_unregister_targetport - transport entry point called by an
* LLDD to deregister/remove a previously
* registered a local NVME subsystem FC port.
* @target_port: pointer to the (registered) target port that is to be
@@ -1211,6 +1624,13 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
/* terminate any outstanding associations */
__nvmet_fc_free_assocs(tgtport);
+ /*
+ * should terminate LS's as well. However, LS's will be generated
+ * at the tail end of association termination, so they likely don't
+ * exist yet. And even if they did, it's worthwhile to just let
+ * them finish and targetport ref counting will clean things up.
+ */
+
nvmet_fc_tgtport_put(tgtport);
return 0;
@@ -1218,113 +1638,15 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
-/* *********************** FC-NVME LS Handling **************************** */
-
-
-static void
-nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
-{
- struct fcnvme_ls_acc_hdr *acc = buf;
-
- acc->w0.ls_cmd = ls_cmd;
- acc->desc_list_len = desc_len;
- acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
- acc->rqst.desc_len =
- fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
- acc->rqst.w0.ls_cmd = rqst_ls_cmd;
-}
-
-static int
-nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
- u8 reason, u8 explanation, u8 vendor)
-{
- struct fcnvme_ls_rjt *rjt = buf;
-
- nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
- fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
- ls_cmd);
- rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
- rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
- rjt->rjt.reason_code = reason;
- rjt->rjt.reason_explanation = explanation;
- rjt->rjt.vendor = vendor;
-
- return sizeof(struct fcnvme_ls_rjt);
-}
-
-/* Validation Error indexes into the string table below */
-enum {
- VERR_NO_ERROR = 0,
- VERR_CR_ASSOC_LEN = 1,
- VERR_CR_ASSOC_RQST_LEN = 2,
- VERR_CR_ASSOC_CMD = 3,
- VERR_CR_ASSOC_CMD_LEN = 4,
- VERR_ERSP_RATIO = 5,
- VERR_ASSOC_ALLOC_FAIL = 6,
- VERR_QUEUE_ALLOC_FAIL = 7,
- VERR_CR_CONN_LEN = 8,
- VERR_CR_CONN_RQST_LEN = 9,
- VERR_ASSOC_ID = 10,
- VERR_ASSOC_ID_LEN = 11,
- VERR_NO_ASSOC = 12,
- VERR_CONN_ID = 13,
- VERR_CONN_ID_LEN = 14,
- VERR_NO_CONN = 15,
- VERR_CR_CONN_CMD = 16,
- VERR_CR_CONN_CMD_LEN = 17,
- VERR_DISCONN_LEN = 18,
- VERR_DISCONN_RQST_LEN = 19,
- VERR_DISCONN_CMD = 20,
- VERR_DISCONN_CMD_LEN = 21,
- VERR_DISCONN_SCOPE = 22,
- VERR_RS_LEN = 23,
- VERR_RS_RQST_LEN = 24,
- VERR_RS_CMD = 25,
- VERR_RS_CMD_LEN = 26,
- VERR_RS_RCTL = 27,
- VERR_RS_RO = 28,
-};
+/* ********************** FC-NVME LS RCV Handling ************************* */
-static char *validation_errors[] = {
- "OK",
- "Bad CR_ASSOC Length",
- "Bad CR_ASSOC Rqst Length",
- "Not CR_ASSOC Cmd",
- "Bad CR_ASSOC Cmd Length",
- "Bad Ersp Ratio",
- "Association Allocation Failed",
- "Queue Allocation Failed",
- "Bad CR_CONN Length",
- "Bad CR_CONN Rqst Length",
- "Not Association ID",
- "Bad Association ID Length",
- "No Association",
- "Not Connection ID",
- "Bad Connection ID Length",
- "No Connection",
- "Not CR_CONN Cmd",
- "Bad CR_CONN Cmd Length",
- "Bad DISCONN Length",
- "Bad DISCONN Rqst Length",
- "Not DISCONN Cmd",
- "Bad DISCONN Cmd Length",
- "Bad Disconnect Scope",
- "Bad RS Length",
- "Bad RS Rqst Length",
- "Not RS Cmd",
- "Bad RS Cmd Length",
- "Bad RS R_CTL",
- "Bad RS Relative Offset",
-};
static void
nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_ls_iod *iod)
{
- struct fcnvme_ls_cr_assoc_rqst *rqst =
- (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
- struct fcnvme_ls_cr_assoc_acc *acc =
- (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
+ struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc;
+ struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc;
struct nvmet_fc_tgt_queue *queue;
int ret = 0;
@@ -1356,7 +1678,8 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
else {
/* new association w/ admin queue */
- iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
+ iod->assoc = nvmet_fc_alloc_target_assoc(
+ tgtport, iod->hosthandle);
if (!iod->assoc)
ret = VERR_ASSOC_ALLOC_FAIL;
else {
@@ -1371,8 +1694,8 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
dev_err(tgtport->dev,
"Create Association LS failed: %s\n",
validation_errors[ret]);
- iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
- NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+ iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
+ sizeof(*acc), rqst->w0.ls_cmd,
FCNVME_RJT_RC_LOGIC,
FCNVME_RJT_EXP_NONE, 0);
return;
@@ -1382,11 +1705,15 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
atomic_set(&queue->connected, 1);
queue->sqhd = 0; /* best place to init value */
+ dev_info(tgtport->dev,
+ "{%d:%d} Association created\n",
+ tgtport->fc_target_port.port_num, iod->assoc->a_id);
+
/* format a response */
- iod->lsreq->rsplen = sizeof(*acc);
+ iod->lsrsp->rsplen = sizeof(*acc);
- nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+ nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
fcnvme_lsdesc_len(
sizeof(struct fcnvme_ls_cr_assoc_acc)),
FCNVME_LS_CREATE_ASSOCIATION);
@@ -1407,10 +1734,8 @@ static void
nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_ls_iod *iod)
{
- struct fcnvme_ls_cr_conn_rqst *rqst =
- (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
- struct fcnvme_ls_cr_conn_acc *acc =
- (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
+ struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn;
+ struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn;
struct nvmet_fc_tgt_queue *queue;
int ret = 0;
@@ -1462,8 +1787,8 @@ nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
dev_err(tgtport->dev,
"Create Connection LS failed: %s\n",
validation_errors[ret]);
- iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
- NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+ iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
+ sizeof(*acc), rqst->w0.ls_cmd,
(ret == VERR_NO_ASSOC) ?
FCNVME_RJT_RC_INV_ASSOC :
FCNVME_RJT_RC_LOGIC,
@@ -1477,9 +1802,9 @@ nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
/* format a response */
- iod->lsreq->rsplen = sizeof(*acc);
+ iod->lsrsp->rsplen = sizeof(*acc);
- nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+ nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
FCNVME_LS_CREATE_CONNECTION);
acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
@@ -1491,46 +1816,28 @@ nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
be16_to_cpu(rqst->connect_cmd.qid)));
}
-static void
+/*
+ * Returns true if the LS response is to be transmit
+ * Returns false if the LS response is to be delayed
+ */
+static int
nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_ls_iod *iod)
{
struct fcnvme_ls_disconnect_assoc_rqst *rqst =
- (struct fcnvme_ls_disconnect_assoc_rqst *)iod->rqstbuf;
+ &iod->rqstbuf->rq_dis_assoc;
struct fcnvme_ls_disconnect_assoc_acc *acc =
- (struct fcnvme_ls_disconnect_assoc_acc *)iod->rspbuf;
- struct nvmet_fc_tgt_assoc *assoc;
+ &iod->rspbuf->rsp_dis_assoc;
+ struct nvmet_fc_tgt_assoc *assoc = NULL;
+ struct nvmet_fc_ls_iod *oldls = NULL;
+ unsigned long flags;
int ret = 0;
memset(acc, 0, sizeof(*acc));
- if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_assoc_rqst))
- ret = VERR_DISCONN_LEN;
- else if (rqst->desc_list_len !=
- fcnvme_lsdesc_len(
- sizeof(struct fcnvme_ls_disconnect_assoc_rqst)))
- ret = VERR_DISCONN_RQST_LEN;
- else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
- ret = VERR_ASSOC_ID;
- else if (rqst->associd.desc_len !=
- fcnvme_lsdesc_len(
- sizeof(struct fcnvme_lsdesc_assoc_id)))
- ret = VERR_ASSOC_ID_LEN;
- else if (rqst->discon_cmd.desc_tag !=
- cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
- ret = VERR_DISCONN_CMD;
- else if (rqst->discon_cmd.desc_len !=
- fcnvme_lsdesc_len(
- sizeof(struct fcnvme_lsdesc_disconn_cmd)))
- ret = VERR_DISCONN_CMD_LEN;
- /*
- * As the standard changed on the LS, check if old format and scope
- * something other than Association (e.g. 0).
- */
- else if (rqst->discon_cmd.rsvd8[0])
- ret = VERR_DISCONN_SCOPE;
- else {
- /* match an active association */
+ ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst);
+ if (!ret) {
+ /* match an active association - takes an assoc ref if !NULL */
assoc = nvmet_fc_find_target_assoc(tgtport,
be64_to_cpu(rqst->associd.association_id));
iod->assoc = assoc;
@@ -1538,34 +1845,63 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
ret = VERR_NO_ASSOC;
}
- if (ret) {
+ if (ret || !assoc) {
dev_err(tgtport->dev,
"Disconnect LS failed: %s\n",
validation_errors[ret]);
- iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
- NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+ iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
+ sizeof(*acc), rqst->w0.ls_cmd,
(ret == VERR_NO_ASSOC) ?
FCNVME_RJT_RC_INV_ASSOC :
- (ret == VERR_NO_CONN) ?
- FCNVME_RJT_RC_INV_CONN :
- FCNVME_RJT_RC_LOGIC,
+ FCNVME_RJT_RC_LOGIC,
FCNVME_RJT_EXP_NONE, 0);
- return;
+ return true;
}
/* format a response */
- iod->lsreq->rsplen = sizeof(*acc);
+ iod->lsrsp->rsplen = sizeof(*acc);
- nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+ nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
fcnvme_lsdesc_len(
sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
FCNVME_LS_DISCONNECT_ASSOC);
/* release get taken in nvmet_fc_find_target_assoc */
- nvmet_fc_tgt_a_put(iod->assoc);
+ nvmet_fc_tgt_a_put(assoc);
- nvmet_fc_delete_target_assoc(iod->assoc);
+ /*
+ * The rules for LS response says the response cannot
+ * go back until ABTS's have been sent for all outstanding
+ * I/O and a Disconnect Association LS has been sent.
+ * So... save off the Disconnect LS to send the response
+ * later. If there was a prior LS already saved, replace
+ * it with the newer one and send a can't perform reject
+ * on the older one.
+ */
+ spin_lock_irqsave(&tgtport->lock, flags);
+ oldls = assoc->rcv_disconn;
+ assoc->rcv_disconn = iod;
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ nvmet_fc_delete_target_assoc(assoc);
+
+ if (oldls) {
+ dev_info(tgtport->dev,
+ "{%d:%d} Multiple Disconnect Association LS's "
+ "received\n",
+ tgtport->fc_target_port.port_num, assoc->a_id);
+ /* overwrite good response with bogus failure */
+ oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
+ sizeof(*iod->rspbuf),
+ /* ok to use rqst, LS is same */
+ rqst->w0.ls_cmd,
+ FCNVME_RJT_RC_UNAB,
+ FCNVME_RJT_EXP_NONE, 0);
+ nvmet_fc_xmt_ls_rsp(tgtport, oldls);
+ }
+
+ return false;
}
@@ -1577,13 +1913,13 @@ static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
static void
-nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
+nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
{
- struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
+ struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private;
struct nvmet_fc_tgtport *tgtport = iod->tgtport;
fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
- NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+ sizeof(*iod->rspbuf), DMA_TO_DEVICE);
nvmet_fc_free_ls_iod(tgtport, iod);
nvmet_fc_tgtport_put(tgtport);
}
@@ -1595,11 +1931,11 @@ nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
int ret;
fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
- NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+ sizeof(*iod->rspbuf), DMA_TO_DEVICE);
- ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
+ ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp);
if (ret)
- nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
+ nvmet_fc_xmt_ls_rsp_done(iod->lsrsp);
}
/*
@@ -1609,15 +1945,15 @@ static void
nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_ls_iod *iod)
{
- struct fcnvme_ls_rqst_w0 *w0 =
- (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
+ struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0;
+ bool sendrsp = true;
- iod->lsreq->nvmet_fc_private = iod;
- iod->lsreq->rspbuf = iod->rspbuf;
- iod->lsreq->rspdma = iod->rspdma;
- iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
+ iod->lsrsp->nvme_fc_private = iod;
+ iod->lsrsp->rspbuf = iod->rspbuf;
+ iod->lsrsp->rspdma = iod->rspdma;
+ iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done;
/* Be preventative. handlers will later set to valid length */
- iod->lsreq->rsplen = 0;
+ iod->lsrsp->rsplen = 0;
iod->assoc = NULL;
@@ -1637,15 +1973,16 @@ nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
break;
case FCNVME_LS_DISCONNECT_ASSOC:
/* Terminate a Queue/Connection or the Association */
- nvmet_fc_ls_disconnect(tgtport, iod);
+ sendrsp = nvmet_fc_ls_disconnect(tgtport, iod);
break;
default:
- iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
- NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
+ iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf,
+ sizeof(*iod->rspbuf), w0->ls_cmd,
FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
}
- nvmet_fc_xmt_ls_rsp(tgtport, iod);
+ if (sendrsp)
+ nvmet_fc_xmt_ls_rsp(tgtport, iod);
}
/*
@@ -1674,37 +2011,56 @@ nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
*
* @target_port: pointer to the (registered) target port the LS was
* received on.
- * @lsreq: pointer to a lsreq request structure to be used to reference
+ * @hosthandle: pointer to the host specific data, gets stored in iod.
+ * @lsrsp: pointer to a lsrsp structure to be used to reference
* the exchange corresponding to the LS.
* @lsreqbuf: pointer to the buffer containing the LS Request
* @lsreqbuf_len: length, in bytes, of the received LS request
*/
int
nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
- struct nvmefc_tgt_ls_req *lsreq,
+ void *hosthandle,
+ struct nvmefc_ls_rsp *lsrsp,
void *lsreqbuf, u32 lsreqbuf_len)
{
struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
struct nvmet_fc_ls_iod *iod;
-
- if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
+ struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
+
+ if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
+ dev_info(tgtport->dev,
+ "RCV %s LS failed: payload too large (%d)\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "",
+ lsreqbuf_len);
return -E2BIG;
+ }
- if (!nvmet_fc_tgtport_get(tgtport))
+ if (!nvmet_fc_tgtport_get(tgtport)) {
+ dev_info(tgtport->dev,
+ "RCV %s LS failed: target deleting\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
return -ESHUTDOWN;
+ }
iod = nvmet_fc_alloc_ls_iod(tgtport);
if (!iod) {
+ dev_info(tgtport->dev,
+ "RCV %s LS failed: context allocation failed\n",
+ (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
+ nvmefc_ls_names[w0->ls_cmd] : "");
nvmet_fc_tgtport_put(tgtport);
return -ENOENT;
}
- iod->lsreq = lsreq;
+ iod->lsrsp = lsrsp;
iod->fcpreq = NULL;
memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
iod->rqstdatalen = lsreqbuf_len;
+ iod->hosthandle = hosthandle;
- schedule_work(&iod->work);
+ queue_work(nvmet_wq, &iod->work);
return 0;
}
@@ -1997,9 +2353,9 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
return;
if (fcpreq->fcp_error ||
fcpreq->transferred_length != fcpreq->transfer_length) {
- spin_lock(&fod->flock);
+ spin_lock_irqsave(&fod->flock, flags);
fod->abort = true;
- spin_unlock(&fod->flock);
+ spin_unlock_irqrestore(&fod->flock, flags);
nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
return;
@@ -2155,13 +2511,6 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
int ret;
/*
- * if there is no nvmet mapping to the targetport there
- * shouldn't be requests. just terminate them.
- */
- if (!tgtport->pe)
- goto transport_error;
-
- /*
* Fused commands are currently not supported in the linux
* implementation.
*
@@ -2188,7 +2537,8 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
fod->req.cmd = &fod->cmdiubuf.sqe;
fod->req.cqe = &fod->rspiubuf.cqe;
- fod->req.port = tgtport->pe->port;
+ if (tgtport->pe)
+ fod->req.port = tgtport->pe->port;
/* clear any response payload */
memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 1c50af6219f3..5c16372f3b53 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -43,6 +43,17 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_ERR, NULL }
};
+static int fcloop_verify_addr(substring_t *s)
+{
+ size_t blen = s->to - s->from + 1;
+
+ if (strnlen(s->from, blen) != NVME_FC_TRADDR_HEXNAMELEN + 2 ||
+ strncmp(s->from, "0x", 2))
+ return -EINVAL;
+
+ return 0;
+}
+
static int
fcloop_parse_options(struct fcloop_ctrl_options *opts,
const char *buf)
@@ -64,14 +75,16 @@ fcloop_parse_options(struct fcloop_ctrl_options *opts,
opts->mask |= token;
switch (token) {
case NVMF_OPT_WWNN:
- if (match_u64(args, &token64)) {
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
ret = -EINVAL;
goto out_free_options;
}
opts->wwnn = token64;
break;
case NVMF_OPT_WWPN:
- if (match_u64(args, &token64)) {
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
ret = -EINVAL;
goto out_free_options;
}
@@ -92,14 +105,16 @@ fcloop_parse_options(struct fcloop_ctrl_options *opts,
opts->fcaddr = token;
break;
case NVMF_OPT_LPWWNN:
- if (match_u64(args, &token64)) {
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
ret = -EINVAL;
goto out_free_options;
}
opts->lpwwnn = token64;
break;
case NVMF_OPT_LPWWPN:
- if (match_u64(args, &token64)) {
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
ret = -EINVAL;
goto out_free_options;
}
@@ -141,14 +156,16 @@ fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
token = match_token(p, opt_tokens, args);
switch (token) {
case NVMF_OPT_WWNN:
- if (match_u64(args, &token64)) {
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
ret = -EINVAL;
goto out_free_options;
}
*nname = token64;
break;
case NVMF_OPT_WWPN:
- if (match_u64(args, &token64)) {
+ if (fcloop_verify_addr(args) ||
+ match_u64(args, &token64)) {
ret = -EINVAL;
goto out_free_options;
}
@@ -198,17 +215,23 @@ struct fcloop_lport_priv {
};
struct fcloop_rport {
- struct nvme_fc_remote_port *remoteport;
- struct nvmet_fc_target_port *targetport;
- struct fcloop_nport *nport;
- struct fcloop_lport *lport;
+ struct nvme_fc_remote_port *remoteport;
+ struct nvmet_fc_target_port *targetport;
+ struct fcloop_nport *nport;
+ struct fcloop_lport *lport;
+ spinlock_t lock;
+ struct list_head ls_list;
+ struct work_struct ls_work;
};
struct fcloop_tport {
- struct nvmet_fc_target_port *targetport;
- struct nvme_fc_remote_port *remoteport;
- struct fcloop_nport *nport;
- struct fcloop_lport *lport;
+ struct nvmet_fc_target_port *targetport;
+ struct nvme_fc_remote_port *remoteport;
+ struct fcloop_nport *nport;
+ struct fcloop_lport *lport;
+ spinlock_t lock;
+ struct list_head ls_list;
+ struct work_struct ls_work;
};
struct fcloop_nport {
@@ -224,11 +247,11 @@ struct fcloop_nport {
};
struct fcloop_lsreq {
- struct fcloop_tport *tport;
struct nvmefc_ls_req *lsreq;
- struct work_struct work;
- struct nvmefc_tgt_ls_req tgt_ls_req;
+ struct nvmefc_ls_rsp ls_rsp;
+ int lsdir; /* H2T or T2H */
int status;
+ struct list_head ls_list; /* fcloop_rport->ls_list */
};
struct fcloop_rscn {
@@ -265,9 +288,9 @@ struct fcloop_ini_fcpreq {
};
static inline struct fcloop_lsreq *
-tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
+ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp)
{
- return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
+ return container_of(lsrsp, struct fcloop_lsreq, ls_rsp);
}
static inline struct fcloop_fcpreq *
@@ -292,25 +315,36 @@ fcloop_delete_queue(struct nvme_fc_local_port *localport,
{
}
-
-/*
- * Transmit of LS RSP done (e.g. buffers all set). call back up
- * initiator "done" flows.
- */
static void
-fcloop_tgt_lsrqst_done_work(struct work_struct *work)
+fcloop_rport_lsrqst_work(struct work_struct *work)
{
- struct fcloop_lsreq *tls_req =
- container_of(work, struct fcloop_lsreq, work);
- struct fcloop_tport *tport = tls_req->tport;
- struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+ struct fcloop_rport *rport =
+ container_of(work, struct fcloop_rport, ls_work);
+ struct fcloop_lsreq *tls_req;
+
+ spin_lock(&rport->lock);
+ for (;;) {
+ tls_req = list_first_entry_or_null(&rport->ls_list,
+ struct fcloop_lsreq, ls_list);
+ if (!tls_req)
+ break;
- if (!tport || tport->remoteport)
- lsreq->done(lsreq, tls_req->status);
+ list_del(&tls_req->ls_list);
+ spin_unlock(&rport->lock);
+
+ tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
+ /*
+ * callee may free memory containing tls_req.
+ * do not reference lsreq after this.
+ */
+
+ spin_lock(&rport->lock);
+ }
+ spin_unlock(&rport->lock);
}
static int
-fcloop_ls_req(struct nvme_fc_local_port *localport,
+fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
struct nvme_fc_remote_port *remoteport,
struct nvmefc_ls_req *lsreq)
{
@@ -319,40 +353,145 @@ fcloop_ls_req(struct nvme_fc_local_port *localport,
int ret = 0;
tls_req->lsreq = lsreq;
- INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
+ INIT_LIST_HEAD(&tls_req->ls_list);
if (!rport->targetport) {
tls_req->status = -ECONNREFUSED;
- tls_req->tport = NULL;
- schedule_work(&tls_req->work);
+ spin_lock(&rport->lock);
+ list_add_tail(&rport->ls_list, &tls_req->ls_list);
+ spin_unlock(&rport->lock);
+ queue_work(nvmet_wq, &rport->ls_work);
return ret;
}
tls_req->status = 0;
- tls_req->tport = rport->targetport->private;
- ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
- lsreq->rqstaddr, lsreq->rqstlen);
+ ret = nvmet_fc_rcv_ls_req(rport->targetport, rport,
+ &tls_req->ls_rsp,
+ lsreq->rqstaddr, lsreq->rqstlen);
return ret;
}
static int
-fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
- struct nvmefc_tgt_ls_req *tgt_lsreq)
+fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
+ struct nvmefc_ls_rsp *lsrsp)
{
- struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
+ struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+ struct fcloop_tport *tport = targetport->private;
+ struct nvme_fc_remote_port *remoteport = tport->remoteport;
+ struct fcloop_rport *rport;
+
+ memcpy(lsreq->rspaddr, lsrsp->rspbuf,
+ ((lsreq->rsplen < lsrsp->rsplen) ?
+ lsreq->rsplen : lsrsp->rsplen));
+
+ lsrsp->done(lsrsp);
+
+ if (remoteport) {
+ rport = remoteport->private;
+ spin_lock(&rport->lock);
+ list_add_tail(&rport->ls_list, &tls_req->ls_list);
+ spin_unlock(&rport->lock);
+ queue_work(nvmet_wq, &rport->ls_work);
+ }
+
+ return 0;
+}
+
+static void
+fcloop_tport_lsrqst_work(struct work_struct *work)
+{
+ struct fcloop_tport *tport =
+ container_of(work, struct fcloop_tport, ls_work);
+ struct fcloop_lsreq *tls_req;
+
+ spin_lock(&tport->lock);
+ for (;;) {
+ tls_req = list_first_entry_or_null(&tport->ls_list,
+ struct fcloop_lsreq, ls_list);
+ if (!tls_req)
+ break;
+
+ list_del(&tls_req->ls_list);
+ spin_unlock(&tport->lock);
+
+ tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
+ /*
+ * callee may free memory containing tls_req.
+ * do not reference lsreq after this.
+ */
+
+ spin_lock(&tport->lock);
+ }
+ spin_unlock(&tport->lock);
+}
+
+static int
+fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
+ struct nvmefc_ls_req *lsreq)
+{
+ struct fcloop_lsreq *tls_req = lsreq->private;
+ struct fcloop_tport *tport = targetport->private;
+ int ret = 0;
- memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
- ((lsreq->rsplen < tgt_lsreq->rsplen) ?
- lsreq->rsplen : tgt_lsreq->rsplen));
- tgt_lsreq->done(tgt_lsreq);
+ /*
+ * hosthandle should be the dst.rport value.
+ * hosthandle ignored as fcloop currently is
+ * 1:1 tgtport vs remoteport
+ */
+ tls_req->lsreq = lsreq;
+ INIT_LIST_HEAD(&tls_req->ls_list);
+
+ if (!tport->remoteport) {
+ tls_req->status = -ECONNREFUSED;
+ spin_lock(&tport->lock);
+ list_add_tail(&tport->ls_list, &tls_req->ls_list);
+ spin_unlock(&tport->lock);
+ queue_work(nvmet_wq, &tport->ls_work);
+ return ret;
+ }
+
+ tls_req->status = 0;
+ ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
+ lsreq->rqstaddr, lsreq->rqstlen);
+
+ return ret;
+}
+
+static int
+fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
+ struct nvme_fc_remote_port *remoteport,
+ struct nvmefc_ls_rsp *lsrsp)
+{
+ struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
+ struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+ struct fcloop_rport *rport = remoteport->private;
+ struct nvmet_fc_target_port *targetport = rport->targetport;
+ struct fcloop_tport *tport;
- schedule_work(&tls_req->work);
+ memcpy(lsreq->rspaddr, lsrsp->rspbuf,
+ ((lsreq->rsplen < lsrsp->rsplen) ?
+ lsreq->rsplen : lsrsp->rsplen));
+ lsrsp->done(lsrsp);
+
+ if (targetport) {
+ tport = targetport->private;
+ spin_lock(&tport->lock);
+ list_add_tail(&tport->ls_list, &tls_req->ls_list);
+ spin_unlock(&tport->lock);
+ queue_work(nvmet_wq, &tport->ls_work);
+ }
return 0;
}
+static void
+fcloop_t2h_host_release(void *hosthandle)
+{
+ /* host handle ignored for now */
+}
+
/*
* Simulate reception of RSCN and converting it to a initiator transport
* call to rescan a remote port.
@@ -381,7 +520,7 @@ fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
tgt_rscn->tport = tgtport->private;
INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
- schedule_work(&tgt_rscn->work);
+ queue_work(nvmet_wq, &tgt_rscn->work);
}
static void
@@ -425,6 +564,50 @@ fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
fcloop_tfcp_req_put(tfcp_req);
}
+static bool drop_fabric_opcode;
+#define DROP_OPCODE_MASK 0x00FF
+/* fabrics opcode will have a bit set above 1st byte */
+static int drop_opcode = -1;
+static int drop_instance;
+static int drop_amount;
+static int drop_current_cnt;
+
+/*
+ * Routine to parse io and determine if the io is to be dropped.
+ * Returns:
+ * 0 if io is not obstructed
+ * 1 if io was dropped
+ */
+static int check_for_drop(struct fcloop_fcpreq *tfcp_req)
+{
+ struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
+ struct nvme_fc_cmd_iu *cmdiu = fcpreq->cmdaddr;
+ struct nvme_command *sqe = &cmdiu->sqe;
+
+ if (drop_opcode == -1)
+ return 0;
+
+ pr_info("%s: seq opcd x%02x fctype x%02x: drop F %s op x%02x "
+ "inst %d start %d amt %d\n",
+ __func__, sqe->common.opcode, sqe->fabrics.fctype,
+ drop_fabric_opcode ? "y" : "n",
+ drop_opcode, drop_current_cnt, drop_instance, drop_amount);
+
+ if ((drop_fabric_opcode &&
+ (sqe->common.opcode != nvme_fabrics_command ||
+ sqe->fabrics.fctype != drop_opcode)) ||
+ (!drop_fabric_opcode && sqe->common.opcode != drop_opcode))
+ return 0;
+
+ if (++drop_current_cnt >= drop_instance) {
+ if (drop_current_cnt >= drop_instance + drop_amount)
+ drop_opcode = -1;
+ return 1;
+ }
+
+ return 0;
+}
+
static void
fcloop_fcp_recv_work(struct work_struct *work)
{
@@ -451,10 +634,14 @@ fcloop_fcp_recv_work(struct work_struct *work)
if (unlikely(aborted))
ret = -ECANCELED;
- else
- ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
+ else {
+ if (likely(!check_for_drop(tfcp_req)))
+ ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
&tfcp_req->tgt_fcp_req,
fcpreq->cmdaddr, fcpreq->cmdlen);
+ else
+ pr_info("%s: dropped command ********\n", __func__);
+ }
if (ret)
fcloop_call_host_done(fcpreq, tfcp_req, ret);
@@ -552,7 +739,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
kref_init(&tfcp_req->ref);
- schedule_work(&tfcp_req->fcp_rcv_work);
+ queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
return 0;
}
@@ -673,7 +860,7 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
break;
/* Fall-Thru to RSP handling */
- /* FALLTHRU */
+ fallthrough;
case NVMET_FCOP_RSP:
if (fcpreq) {
@@ -734,17 +921,23 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
{
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
- schedule_work(&tfcp_req->tio_done_work);
+ queue_work(nvmet_wq, &tfcp_req->tio_done_work);
}
static void
-fcloop_ls_abort(struct nvme_fc_local_port *localport,
+fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport,
struct nvme_fc_remote_port *remoteport,
struct nvmefc_ls_req *lsreq)
{
}
static void
+fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport,
+ void *hosthandle, struct nvmefc_ls_req *lsreq)
+{
+}
+
+static void
fcloop_fcp_abort(struct nvme_fc_local_port *localport,
struct nvme_fc_remote_port *remoteport,
void *hw_queue_handle,
@@ -783,7 +976,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
if (abortio)
/* leave the reference while the work item is scheduled */
- WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
+ WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
else {
/*
* as the io has already had the done callback made,
@@ -834,6 +1027,7 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
{
struct fcloop_rport *rport = remoteport->private;
+ flush_work(&rport->ls_work);
fcloop_nport_put(rport->nport);
}
@@ -842,6 +1036,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
{
struct fcloop_tport *tport = targetport->private;
+ flush_work(&tport->ls_work);
fcloop_nport_put(tport->nport);
}
@@ -850,15 +1045,15 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
#define FCLOOP_DMABOUND_4G 0xFFFFFFFF
static struct nvme_fc_port_template fctemplate = {
- .module = THIS_MODULE,
.localport_delete = fcloop_localport_delete,
.remoteport_delete = fcloop_remoteport_delete,
.create_queue = fcloop_create_queue,
.delete_queue = fcloop_delete_queue,
- .ls_req = fcloop_ls_req,
+ .ls_req = fcloop_h2t_ls_req,
.fcp_io = fcloop_fcp_req,
- .ls_abort = fcloop_ls_abort,
+ .ls_abort = fcloop_h2t_ls_abort,
.fcp_abort = fcloop_fcp_abort,
+ .xmt_ls_rsp = fcloop_t2h_xmt_ls_rsp,
.max_hw_queues = FCLOOP_HW_QUEUES,
.max_sgl_segments = FCLOOP_SGL_SEGS,
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
@@ -872,11 +1067,14 @@ static struct nvme_fc_port_template fctemplate = {
static struct nvmet_fc_target_template tgttemplate = {
.targetport_delete = fcloop_targetport_delete,
- .xmt_ls_rsp = fcloop_xmt_ls_rsp,
+ .xmt_ls_rsp = fcloop_h2t_xmt_ls_rsp,
.fcp_op = fcloop_fcp_op,
.fcp_abort = fcloop_tgt_fcp_abort,
.fcp_req_release = fcloop_fcp_req_release,
.discovery_event = fcloop_tgt_discovery_evt,
+ .ls_req = fcloop_t2h_ls_req,
+ .ls_abort = fcloop_t2h_ls_abort,
+ .host_release = fcloop_t2h_host_release,
.max_hw_queues = FCLOOP_HW_QUEUES,
.max_sgl_segments = FCLOOP_SGL_SEGS,
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
@@ -885,6 +1083,7 @@ static struct nvmet_fc_target_template tgttemplate = {
.target_features = 0,
/* sizes of additional private data for data structures */
.target_priv_sz = sizeof(struct fcloop_tport),
+ .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
};
static ssize_t
@@ -1136,6 +1335,9 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
rport->nport = nport;
rport->lport = nport->lport;
nport->rport = rport;
+ spin_lock_init(&rport->lock);
+ INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
+ INIT_LIST_HEAD(&rport->ls_list);
return count;
}
@@ -1231,6 +1433,9 @@ fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
tport->nport = nport;
tport->lport = nport->lport;
nport->tport = tport;
+ spin_lock_init(&tport->lock);
+ INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work);
+ INIT_LIST_HEAD(&tport->ls_list);
return count;
}
@@ -1292,6 +1497,34 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
return ret ? ret : count;
}
+static ssize_t
+fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int opcode;
+ int starting, amount;
+
+ if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
+ return -EBADRQC;
+
+ drop_current_cnt = 0;
+ drop_fabric_opcode = (opcode & ~DROP_OPCODE_MASK) ? true : false;
+ drop_opcode = (opcode & DROP_OPCODE_MASK);
+ drop_instance = starting;
+ /* the check to drop routine uses instance + count to know when
+ * to end. Thus, if dropping 1 instance, count should be 0.
+ * so subtract 1 from the count.
+ */
+ drop_amount = amount - 1;
+
+ pr_info("%s: DROP: Starting at instance %d of%s opcode x%x drop +%d "
+ "instances\n",
+ __func__, drop_instance, drop_fabric_opcode ? " fabric" : "",
+ drop_opcode, drop_amount);
+
+ return count;
+}
+
static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
@@ -1299,6 +1532,7 @@ static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
+static DEVICE_ATTR(set_cmd_drop, 0200, NULL, fcloop_set_cmd_drop);
static struct attribute *fcloop_dev_attrs[] = {
&dev_attr_add_local_port.attr,
@@ -1307,10 +1541,11 @@ static struct attribute *fcloop_dev_attrs[] = {
&dev_attr_del_remote_port.attr,
&dev_attr_add_target_port.attr,
&dev_attr_del_target_port.attr,
+ &dev_attr_set_cmd_drop.attr,
NULL
};
-static struct attribute_group fclopp_dev_attrs_group = {
+static const struct attribute_group fclopp_dev_attrs_group = {
.attrs = fcloop_dev_attrs,
};
@@ -1354,8 +1589,8 @@ out_destroy_class:
static void __exit fcloop_exit(void)
{
- struct fcloop_lport *lport;
- struct fcloop_nport *nport;
+ struct fcloop_lport *lport = NULL;
+ struct fcloop_nport *nport = NULL;
struct fcloop_tport *tport;
struct fcloop_rport *rport;
unsigned long flags;
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index ea0e596be15d..c2d6cea0236b 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -5,16 +5,16 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
+#include <linux/memremap.h>
#include <linux/module.h>
#include "nvmet.h"
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
{
- const struct queue_limits *ql = &bdev_get_queue(bdev)->limits;
- /* Number of logical blocks per physical block. */
- const u32 lpp = ql->physical_block_size / ql->logical_block_size;
/* Logical blocks per physical block, 0's based. */
- const __le16 lpp0b = to0based(lpp);
+ const __le16 lpp0b = to0based(bdev_physical_block_size(bdev) /
+ bdev_logical_block_size(bdev));
/*
* For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
@@ -40,17 +40,50 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
/* NPWA = Namespace Preferred Write Alignment. 0's based */
id->npwa = id->npwg;
/* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
- id->npdg = to0based(ql->discard_granularity / ql->logical_block_size);
+ id->npdg = to0based(bdev_discard_granularity(bdev) /
+ bdev_logical_block_size(bdev));
/* NPDG = Namespace Preferred Deallocate Alignment */
id->npda = id->npdg;
/* NOWS = Namespace Optimal Write Size */
- id->nows = to0based(ql->io_opt / ql->logical_block_size);
+ id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev));
+}
+
+void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
+{
+ if (ns->bdev) {
+ blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
+ ns->bdev = NULL;
+ }
+}
+
+static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
+{
+ struct blk_integrity *bi = bdev_get_integrity(ns->bdev);
+
+ if (bi) {
+ ns->metadata_size = bi->tuple_size;
+ if (bi->profile == &t10_pi_type1_crc)
+ ns->pi_type = NVME_NS_DPS_PI_TYPE1;
+ else if (bi->profile == &t10_pi_type3_crc)
+ ns->pi_type = NVME_NS_DPS_PI_TYPE3;
+ else
+ /* Unsupported metadata type */
+ ns->metadata_size = 0;
+ }
}
int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
{
int ret;
+ /*
+ * When buffered_io namespace attribute is enabled that means user want
+ * this block device to be used as a file, so block device can take
+ * an advantage of cache.
+ */
+ if (ns->buffered_io)
+ return -ENOTBLK;
+
ns->bdev = blkdev_get_by_path(ns->device_path,
FMODE_READ | FMODE_WRITE, NULL);
if (IS_ERR(ns->bdev)) {
@@ -62,20 +95,31 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
ns->bdev = NULL;
return ret;
}
- ns->size = i_size_read(ns->bdev->bd_inode);
+ ns->size = bdev_nr_bytes(ns->bdev);
ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
+
+ ns->pi_type = 0;
+ ns->metadata_size = 0;
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10))
+ nvmet_bdev_ns_enable_integrity(ns);
+
+ if (bdev_is_zoned(ns->bdev)) {
+ if (!nvmet_bdev_zns_enable(ns)) {
+ nvmet_bdev_ns_disable(ns);
+ return -EINVAL;
+ }
+ ns->csi = NVME_CSI_ZNS;
+ }
+
return 0;
}
-void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
+void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
{
- if (ns->bdev) {
- blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
- ns->bdev = NULL;
- }
+ ns->size = bdev_nr_bytes(ns->bdev);
}
-static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
+u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
{
u16 status = NVME_SC_SUCCESS;
@@ -112,7 +156,6 @@ static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
req->error_loc = offsetof(struct nvme_rw_command, nsid);
break;
case BLK_STS_IOERR:
- /* fallthru */
default:
status = NVME_SC_INTERNAL | NVME_SC_DNR;
req->error_loc = offsetof(struct nvme_common_command, opcode);
@@ -138,20 +181,77 @@ static void nvmet_bio_done(struct bio *bio)
struct nvmet_req *req = bio->bi_private;
nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
- if (bio != &req->b.inline_bio)
- bio_put(bio);
+ nvmet_req_bio_put(req, bio);
}
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
+ struct sg_mapping_iter *miter)
+{
+ struct blk_integrity *bi;
+ struct bio_integrity_payload *bip;
+ int rc;
+ size_t resid, len;
+
+ bi = bdev_get_integrity(req->ns->bdev);
+ if (unlikely(!bi)) {
+ pr_err("Unable to locate bio_integrity\n");
+ return -ENODEV;
+ }
+
+ bip = bio_integrity_alloc(bio, GFP_NOIO,
+ bio_max_segs(req->metadata_sg_cnt));
+ if (IS_ERR(bip)) {
+ pr_err("Unable to allocate bio_integrity_payload\n");
+ return PTR_ERR(bip);
+ }
+
+ bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
+ /* virtual start sector must be in integrity interval units */
+ bip_set_seed(bip, bio->bi_iter.bi_sector >>
+ (bi->interval_exp - SECTOR_SHIFT));
+
+ resid = bip->bip_iter.bi_size;
+ while (resid > 0 && sg_miter_next(miter)) {
+ len = min_t(size_t, miter->length, resid);
+ rc = bio_integrity_add_page(bio, miter->page, len,
+ offset_in_page(miter->addr));
+ if (unlikely(rc != len)) {
+ pr_err("bio_integrity_add_page() failed; %d\n", rc);
+ sg_miter_stop(miter);
+ return -ENOMEM;
+ }
+
+ resid -= len;
+ if (len < miter->length)
+ miter->consumed -= miter->length - len;
+ }
+ sg_miter_stop(miter);
+
+ return 0;
+}
+#else
+static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
+ struct sg_mapping_iter *miter)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
static void nvmet_bdev_execute_rw(struct nvmet_req *req)
{
- int sg_cnt = req->sg_cnt;
+ unsigned int sg_cnt = req->sg_cnt;
struct bio *bio;
struct scatterlist *sg;
struct blk_plug plug;
sector_t sector;
- int op, i;
+ blk_opf_t opf;
+ int i, rc;
+ struct sg_mapping_iter prot_miter;
+ unsigned int iter_flags;
+ unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len;
- if (!nvmet_check_data_len(req, nvmet_rw_len(req)))
+ if (!nvmet_check_transfer_len(req, total_len))
return;
if (!req->sg_cnt) {
@@ -160,41 +260,54 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
}
if (req->cmd->rw.opcode == nvme_cmd_write) {
- op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
+ opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
- op |= REQ_FUA;
+ opf |= REQ_FUA;
+ iter_flags = SG_MITER_TO_SG;
} else {
- op = REQ_OP_READ;
+ opf = REQ_OP_READ;
+ iter_flags = SG_MITER_FROM_SG;
}
if (is_pci_p2pdma_page(sg_page(req->sg)))
- op |= REQ_NOMERGE;
+ opf |= REQ_NOMERGE;
- sector = le64_to_cpu(req->cmd->rw.slba);
- sector <<= (req->ns->blksize_shift - 9);
+ sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
- if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
+ if (nvmet_use_inline_bvec(req)) {
bio = &req->b.inline_bio;
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ bio_init(bio, req->ns->bdev, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec), opf);
} else {
- bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
+ bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), opf,
+ GFP_KERNEL);
}
- bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector;
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
- bio->bi_opf = op;
blk_start_plug(&plug);
+ if (req->metadata_len)
+ sg_miter_start(&prot_miter, req->metadata_sg,
+ req->metadata_sg_cnt, iter_flags);
+
for_each_sg(req->sg, sg, req->sg_cnt, i) {
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
struct bio *prev = bio;
- bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
- bio_set_dev(bio, req->ns->bdev);
+ if (req->metadata_len) {
+ rc = nvmet_bdev_alloc_bip(req, bio,
+ &prot_miter);
+ if (unlikely(rc)) {
+ bio_io_error(bio);
+ return;
+ }
+ }
+
+ bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt),
+ opf, GFP_KERNEL);
bio->bi_iter.bi_sector = sector;
- bio->bi_opf = op;
bio_chain(bio, prev);
submit_bio(prev);
@@ -204,6 +317,14 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
sg_cnt--;
}
+ if (req->metadata_len) {
+ rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter);
+ if (unlikely(rc)) {
+ bio_io_error(bio);
+ return;
+ }
+ }
+
submit_bio(bio);
blk_finish_plug(&plug);
}
@@ -212,21 +333,28 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
{
struct bio *bio = &req->b.inline_bio;
- if (!nvmet_check_data_len(req, 0))
+ if (!bdev_write_cache(req->ns->bdev)) {
+ nvmet_req_complete(req, NVME_SC_SUCCESS);
return;
+ }
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
- bio_set_dev(bio, req->ns->bdev);
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+
+ bio_init(bio, req->ns->bdev, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec), REQ_OP_WRITE | REQ_PREFLUSH);
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
submit_bio(bio);
}
u16 nvmet_bdev_flush(struct nvmet_req *req)
{
- if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL))
+ if (!bdev_write_cache(req->ns->bdev))
+ return 0;
+
+ if (blkdev_issue_flush(req->ns->bdev))
return NVME_SC_INTERNAL | NVME_SC_DNR;
return 0;
}
@@ -238,9 +366,9 @@ static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
int ret;
ret = __blkdev_issue_discard(ns->bdev,
- le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
+ nvmet_lba_to_sect(ns, range->slba),
le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
- GFP_KERNEL, 0, bio);
+ GFP_KERNEL, bio);
if (ret && ret != -EOPNOTSUPP) {
req->error_slba = le64_to_cpu(range->slba);
return errno_to_nvme_status(req, ret);
@@ -304,11 +432,10 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
sector_t nr_sector;
int ret;
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
- sector = le64_to_cpu(write_zeroes->slba) <<
- (req->ns->blksize_shift - 9);
+ sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba);
nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
(req->ns->blksize_shift - 9));
@@ -325,12 +452,12 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
{
- struct nvme_command *cmd = req->cmd;
-
- switch (cmd->common.opcode) {
+ switch (req->cmd->common.opcode) {
case nvme_cmd_read:
case nvme_cmd_write:
req->execute = nvmet_bdev_execute_rw;
+ if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns))
+ req->metadata_len = nvmet_rw_metadata_len(req);
return 0;
case nvme_cmd_flush:
req->execute = nvmet_bdev_execute_flush;
@@ -342,9 +469,6 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
req->execute = nvmet_bdev_execute_write_zeroes;
return 0;
default:
- pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
- req->sq->qid);
- req->error_loc = offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return nvmet_report_invalid_opcode(req);
}
}
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index cd5670b83118..64b47e2a4633 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -8,11 +8,17 @@
#include <linux/uio.h>
#include <linux/falloc.h>
#include <linux/file.h>
+#include <linux/fs.h>
#include "nvmet.h"
#define NVMET_MAX_MPOOL_BVEC 16
#define NVMET_MIN_MPOOL_OBJ 16
+void nvmet_file_ns_revalidate(struct nvmet_ns *ns)
+{
+ ns->size = i_size_read(ns->file->f_mapping->host);
+}
+
void nvmet_file_ns_disable(struct nvmet_ns *ns)
{
if (ns->file) {
@@ -30,25 +36,22 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns)
int nvmet_file_ns_enable(struct nvmet_ns *ns)
{
int flags = O_RDWR | O_LARGEFILE;
- struct kstat stat;
- int ret;
+ int ret = 0;
if (!ns->buffered_io)
flags |= O_DIRECT;
ns->file = filp_open(ns->device_path, flags, 0);
if (IS_ERR(ns->file)) {
- pr_err("failed to open file %s: (%ld)\n",
- ns->device_path, PTR_ERR(ns->file));
- return PTR_ERR(ns->file);
+ ret = PTR_ERR(ns->file);
+ pr_err("failed to open file %s: (%d)\n",
+ ns->device_path, ret);
+ ns->file = NULL;
+ return ret;
}
- ret = vfs_getattr(&ns->file->f_path,
- &stat, STATX_SIZE, AT_STATX_FORCE_SYNC);
- if (ret)
- goto err;
+ nvmet_file_ns_revalidate(ns);
- ns->size = stat.size;
/*
* i_blkbits can be greater than the universally accepted upper bound,
* so make sure we export a sane namespace lba_shift.
@@ -109,12 +112,12 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
iocb->ki_pos = pos;
iocb->ki_filp = req->ns->file;
- iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
+ iocb->ki_flags = ki_flags | iocb->ki_filp->f_iocb_flags;
return call_iter(iocb, &iter);
}
-static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
+static void nvmet_file_io_done(struct kiocb *iocb, long ret)
{
struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
u16 status = NVME_SC_SUCCESS;
@@ -211,7 +214,7 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
}
complete:
- nvmet_file_io_done(&req->f.iocb, ret, 0);
+ nvmet_file_io_done(&req->f.iocb, ret);
return true;
}
@@ -232,7 +235,7 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
{
ssize_t nr_bvec = req->sg_cnt;
- if (!nvmet_check_data_len(req, nvmet_rw_len(req)))
+ if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
return;
if (!req->sg_cnt || !nr_bvec) {
@@ -255,7 +258,8 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
if (req->ns->buffered_io) {
if (likely(!req->f.mpool_alloc) &&
- nvmet_file_execute_io(req, IOCB_NOWAIT))
+ (req->ns->file->f_mode & FMODE_NOWAIT) &&
+ nvmet_file_execute_io(req, IOCB_NOWAIT))
return;
nvmet_file_submit_buffered_io(req);
} else
@@ -276,10 +280,10 @@ static void nvmet_file_flush_work(struct work_struct *w)
static void nvmet_file_execute_flush(struct nvmet_req *req)
{
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
INIT_WORK(&req->f.work, nvmet_file_flush_work);
- schedule_work(&req->f.work);
+ queue_work(nvmet_wq, &req->f.work);
}
static void nvmet_file_execute_discard(struct nvmet_req *req)
@@ -339,7 +343,7 @@ static void nvmet_file_execute_dsm(struct nvmet_req *req)
if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
return;
INIT_WORK(&req->f.work, nvmet_file_dsm_work);
- schedule_work(&req->f.work);
+ queue_work(nvmet_wq, &req->f.work);
}
static void nvmet_file_write_zeroes_work(struct work_struct *w)
@@ -366,17 +370,15 @@ static void nvmet_file_write_zeroes_work(struct work_struct *w)
static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
{
- if (!nvmet_check_data_len(req, 0))
+ if (!nvmet_check_transfer_len(req, 0))
return;
INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
- schedule_work(&req->f.work);
+ queue_work(nvmet_wq, &req->f.work);
}
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
{
- struct nvme_command *cmd = req->cmd;
-
- switch (cmd->common.opcode) {
+ switch (req->cmd->common.opcode) {
case nvme_cmd_read:
case nvme_cmd_write:
req->execute = nvmet_file_execute_rw;
@@ -391,9 +393,6 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
req->execute = nvmet_file_execute_write_zeroes;
return 0;
default:
- pr_err("unhandled cmd for file ns %d on qid %d\n",
- cmd->common.opcode, req->sq->qid);
- req->error_loc = offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return nvmet_report_invalid_opcode(req);
}
}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 4df4ebde208a..b45fe3adf015 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -36,7 +36,6 @@ struct nvme_loop_ctrl {
struct nvme_loop_iod async_event_iod;
struct nvme_ctrl ctrl;
- struct nvmet_ctrl *target_ctrl;
struct nvmet_port *port;
};
@@ -108,15 +107,16 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
} else {
struct request *rq;
- rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
+ rq = nvme_find_rq(nvme_loop_tagset(queue), cqe->command_id);
if (!rq) {
dev_err(queue->ctrl->ctrl.device,
- "tag 0x%x on queue %d not found\n",
+ "got bad command_id %#x on queue %d\n",
cqe->command_id, nvme_loop_queue_idx(queue));
return;
}
- nvme_end_request(rq, cqe->status, cqe->result);
+ if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
+ nvme_loop_complete_rq(rq);
}
}
@@ -138,10 +138,10 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
blk_status_t ret;
- if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
- return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
+ if (!nvme_check_ready(&queue->ctrl->ctrl, req, queue_ready))
+ return nvme_fail_nonready_command(&queue->ctrl->ctrl, req);
- ret = nvme_setup_cmd(ns, req, &iod->cmd);
+ ret = nvme_setup_cmd(ns, req);
if (ret)
return ret;
@@ -166,7 +166,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
iod->req.transfer_len = blk_rq_payload_bytes(req);
}
- schedule_work(&iod->work);
+ queue_work(nvmet_wq, &iod->work);
return BLK_STS_OK;
}
@@ -187,7 +187,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
return;
}
- schedule_work(&iod->work);
+ queue_work(nvmet_wq, &iod->work);
}
static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
@@ -204,21 +204,33 @@ static int nvme_loop_init_request(struct blk_mq_tag_set *set,
struct request *req, unsigned int hctx_idx,
unsigned int numa_node)
{
- struct nvme_loop_ctrl *ctrl = set->driver_data;
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(set->driver_data);
+ struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
nvme_req(req)->ctrl = &ctrl->ctrl;
+ nvme_req(req)->cmd = &iod->cmd;
return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
(set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
}
+static struct lock_class_key loop_hctx_fq_lock_key;
+
static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_loop_ctrl *ctrl = data;
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data);
struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
+ /*
+ * flush_end_io() can be called recursively for us, so use our own
+ * lock class key for avoiding lockdep possible recursive locking,
+ * then we can remove the dynamically allocated lock class for each
+ * flush queue, that way may cause horrible boot delay.
+ */
+ blk_mq_hctx_set_fq_lock_class(hctx, &loop_hctx_fq_lock_key);
+
hctx->driver_data = queue;
return 0;
}
@@ -226,7 +238,7 @@ static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_loop_ctrl *ctrl = data;
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data);
struct nvme_loop_queue *queue = &ctrl->queues[0];
BUG_ON(hctx_idx != 0);
@@ -251,11 +263,10 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
{
- clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
+ if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
+ return;
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
- blk_cleanup_queue(ctrl->ctrl.admin_q);
- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
- blk_mq_free_tag_set(&ctrl->admin_tag_set);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
}
static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
@@ -269,10 +280,8 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
list_del(&ctrl->list);
mutex_unlock(&nvme_loop_ctrl_mutex);
- if (nctrl->tagset) {
- blk_cleanup_queue(ctrl->ctrl.connect_q);
- blk_mq_free_tag_set(&ctrl->tag_set);
- }
+ if (nctrl->tagset)
+ nvme_remove_io_tag_set(nctrl);
kfree(ctrl->queues);
nvmf_free_options(nctrl->opts);
free_ctrl:
@@ -287,6 +296,7 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
}
+ ctrl->ctrl.queue_count = 1;
}
static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
@@ -323,7 +333,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
int i, ret;
for (i = 1; i < ctrl->ctrl.queue_count; i++) {
- ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
if (ret)
return ret;
set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
@@ -336,68 +346,46 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
{
int error;
- memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
- ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
- ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
- ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
- ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
- NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
- ctrl->admin_tag_set.driver_data = ctrl;
- ctrl->admin_tag_set.nr_hw_queues = 1;
- ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
- ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
-
ctrl->queues[0].ctrl = ctrl;
error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
if (error)
return error;
ctrl->ctrl.queue_count = 1;
- error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
+ error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
+ &nvme_loop_admin_mq_ops, BLK_MQ_F_NO_SCHED,
+ sizeof(struct nvme_loop_iod) +
+ NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
if (error)
goto out_free_sq;
- ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
- ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.fabrics_q)) {
- error = PTR_ERR(ctrl->ctrl.fabrics_q);
- goto out_free_tagset;
- }
-
- ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.admin_q)) {
- error = PTR_ERR(ctrl->ctrl.admin_q);
- goto out_cleanup_fabrics_q;
- }
+ /* reset stopped state for the fresh admin queue */
+ clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags);
error = nvmf_connect_admin_queue(&ctrl->ctrl);
if (error)
- goto out_cleanup_queue;
+ goto out_cleanup_tagset;
set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
error = nvme_enable_ctrl(&ctrl->ctrl);
if (error)
- goto out_cleanup_queue;
+ goto out_cleanup_tagset;
ctrl->ctrl.max_hw_sectors =
(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_start_admin_queue(&ctrl->ctrl);
- error = nvme_init_identify(&ctrl->ctrl);
+ error = nvme_init_ctrl_finish(&ctrl->ctrl);
if (error)
- goto out_cleanup_queue;
+ goto out_cleanup_tagset;
return 0;
-out_cleanup_queue:
- blk_cleanup_queue(ctrl->ctrl.admin_q);
-out_cleanup_fabrics_q:
- blk_cleanup_queue(ctrl->ctrl.fabrics_q);
-out_free_tagset:
- blk_mq_free_tag_set(&ctrl->admin_tag_set);
+out_cleanup_tagset:
+ clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
out_free_sq:
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
return error;
@@ -407,19 +395,15 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
{
if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
- blk_mq_tagset_busy_iter(&ctrl->tag_set,
- nvme_cancel_request, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
+ nvme_cancel_tagset(&ctrl->ctrl);
nvme_loop_destroy_io_queues(ctrl);
}
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ nvme_stop_admin_queue(&ctrl->ctrl);
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
nvme_shutdown_ctrl(&ctrl->ctrl);
- blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
- nvme_cancel_request, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
nvme_loop_destroy_admin_queue(ctrl);
}
@@ -444,15 +428,16 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
{
struct nvme_loop_ctrl *ctrl =
container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
- bool changed;
int ret;
nvme_stop_ctrl(&ctrl->ctrl);
nvme_loop_shutdown_ctrl(ctrl);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
- /* state change failure should never happen */
- WARN_ON_ONCE(1);
+ if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
+ ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
+ /* state change failure for non-deleted ctrl? */
+ WARN_ON_ONCE(1);
return;
}
@@ -471,8 +456,8 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
blk_mq_update_nr_hw_queues(&ctrl->tag_set,
ctrl->ctrl.queue_count - 1);
- changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
- WARN_ON_ONCE(!changed);
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
+ WARN_ON_ONCE(1);
nvme_start_ctrl(&ctrl->ctrl);
@@ -485,7 +470,6 @@ out_destroy_admin:
out_disable:
dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
nvme_uninit_ctrl(&ctrl->ctrl);
- nvme_put_ctrl(&ctrl->ctrl);
}
static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
@@ -509,39 +493,21 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
if (ret)
return ret;
- memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
- ctrl->tag_set.ops = &nvme_loop_mq_ops;
- ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
- ctrl->tag_set.reserved_tags = 1; /* fabric connect */
- ctrl->tag_set.numa_node = NUMA_NO_NODE;
- ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
- NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
- ctrl->tag_set.driver_data = ctrl;
- ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
- ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
- ctrl->ctrl.tagset = &ctrl->tag_set;
-
- ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
+ ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
+ &nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE,
+ sizeof(struct nvme_loop_iod) +
+ NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
if (ret)
goto out_destroy_queues;
- ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
- if (IS_ERR(ctrl->ctrl.connect_q)) {
- ret = PTR_ERR(ctrl->ctrl.connect_q);
- goto out_free_tagset;
- }
-
ret = nvme_loop_connect_io_queues(ctrl);
if (ret)
- goto out_cleanup_connect_q;
+ goto out_cleanup_tagset;
return 0;
-out_cleanup_connect_q:
- blk_cleanup_queue(ctrl->ctrl.connect_q);
-out_free_tagset:
- blk_mq_free_tag_set(&ctrl->tag_set);
+out_cleanup_tagset:
+ nvme_remove_io_tag_set(&ctrl->ctrl);
out_destroy_queues:
nvme_loop_destroy_io_queues(ctrl);
return ret;
@@ -568,7 +534,6 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
struct nvmf_ctrl_options *opts)
{
struct nvme_loop_ctrl *ctrl;
- bool changed;
int ret;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
@@ -581,12 +546,16 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
0 /* no quirks, we're perfect! */);
- if (ret)
- goto out_put_ctrl;
+ if (ret) {
+ kfree(ctrl);
+ goto out;
+ }
+
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
+ WARN_ON_ONCE(1);
ret = -ENOMEM;
- ctrl->ctrl.sqsize = opts->queue_size - 1;
ctrl->ctrl.kato = opts->kato;
ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
@@ -606,6 +575,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
opts->queue_size, ctrl->ctrl.maxcmd);
opts->queue_size = ctrl->ctrl.maxcmd;
}
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
if (opts->nr_io_queues) {
ret = nvme_loop_create_io_queues(ctrl);
@@ -618,10 +588,8 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
dev_info(ctrl->ctrl.device,
"new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
- nvme_get_ctrl(&ctrl->ctrl);
-
- changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
- WARN_ON_ONCE(!changed);
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
+ WARN_ON_ONCE(1);
mutex_lock(&nvme_loop_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
@@ -637,8 +605,8 @@ out_free_queues:
kfree(ctrl->queues);
out_uninit_ctrl:
nvme_uninit_ctrl(&ctrl->ctrl);
-out_put_ctrl:
nvme_put_ctrl(&ctrl->ctrl);
+out:
if (ret > 0)
ret = -EIO;
return ERR_PTR(ret);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index eda28b22a2c8..dfe3894205aa 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -19,10 +19,16 @@
#include <linux/rcupdate.h>
#include <linux/blkdev.h>
#include <linux/radix-tree.h>
+#include <linux/t10-pi.h>
+
+#define NVMET_DEFAULT_VS NVME_VS(1, 3, 0)
#define NVMET_ASYNC_EVENTS 4
#define NVMET_ERROR_LOG_SLOTS 128
#define NVMET_NO_ERROR_LOC ((u16)-1)
+#define NVMET_DEFAULT_CTRL_MODEL "Linux"
+#define NVMET_MN_MAX_SIZE 40
+#define NVMET_SN_MAX_SIZE 20
/*
* Supported optional AENs:
@@ -50,7 +56,6 @@
(cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
struct nvmet_ns {
- struct list_head dev_link;
struct percpu_ref ref;
struct block_device *bdev;
struct file *file;
@@ -76,6 +81,9 @@ struct nvmet_ns {
int use_p2pmem;
struct pci_dev *p2p_dev;
+ int pi_type;
+ int metadata_size;
+ u8 csi;
};
static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
@@ -100,6 +108,19 @@ struct nvmet_sq {
u16 size;
u32 sqhd;
bool sqhd_disabled;
+#ifdef CONFIG_NVME_TARGET_AUTH
+ struct delayed_work auth_expired_work;
+ bool authenticated;
+ u16 dhchap_tid;
+ u16 dhchap_status;
+ int dhchap_step;
+ u8 *dhchap_c1;
+ u8 *dhchap_c2;
+ u32 dhchap_s1;
+ u32 dhchap_s2;
+ u8 *dhchap_skey;
+ int dhchap_skey_len;
+#endif
struct completion free_done;
struct completion confirm_done;
};
@@ -141,6 +162,7 @@ struct nvmet_port {
bool enabled;
int inline_data_size;
const struct nvmet_fabrics_ops *tr_ops;
+ bool pi_enable;
};
static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
@@ -158,10 +180,9 @@ static inline struct nvmet_port *ana_groups_to_port(
struct nvmet_ctrl {
struct nvmet_subsys *subsys;
- struct nvmet_cq **cqs;
struct nvmet_sq **sqs;
- bool cmd_seen;
+ bool reset_tbkas;
struct mutex lock;
u64 cap;
@@ -200,6 +221,16 @@ struct nvmet_ctrl {
spinlock_t error_lock;
u64 err_counter;
struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
+ bool pi_support;
+#ifdef CONFIG_NVME_TARGET_AUTH
+ struct nvme_dhchap_key *host_key;
+ struct nvme_dhchap_key *ctrl_key;
+ u8 shash_id;
+ struct crypto_kpp *dh_tfm;
+ u8 dh_gid;
+ u8 *dh_key;
+ size_t dh_keysize;
+#endif
};
struct nvmet_subsys {
@@ -208,9 +239,11 @@ struct nvmet_subsys {
struct mutex lock;
struct kref ref;
- struct list_head namespaces;
+ struct xarray namespaces;
unsigned int nr_namespaces;
- unsigned int max_nsid;
+ u32 max_nsid;
+ u16 cntlid_min;
+ u16 cntlid_max;
struct list_head ctrls;
@@ -220,13 +253,30 @@ struct nvmet_subsys {
u16 max_qid;
u64 ver;
- u64 serial;
+ char serial[NVMET_SN_MAX_SIZE];
+ bool subsys_discovered;
char *subsysnqn;
+ bool pi_support;
struct config_group group;
struct config_group namespaces_group;
struct config_group allowed_hosts_group;
+
+ char *model_number;
+
+#ifdef CONFIG_NVME_TARGET_PASSTHRU
+ struct nvme_ctrl *passthru_ctrl;
+ char *passthru_ctrl_path;
+ struct config_group passthru_group;
+ unsigned int admin_timeout;
+ unsigned int io_timeout;
+ unsigned int clear_ids;
+#endif /* CONFIG_NVME_TARGET_PASSTHRU */
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ u8 zasl;
+#endif /* CONFIG_BLK_DEV_ZONED */
};
static inline struct nvmet_subsys *to_subsys(struct config_item *item)
@@ -243,6 +293,12 @@ static inline struct nvmet_subsys *namespaces_to_subsys(
struct nvmet_host {
struct config_group group;
+ u8 *dhchap_secret;
+ u8 *dhchap_ctrl_secret;
+ u8 dhchap_key_hash;
+ u8 dhchap_ctrl_key_hash;
+ u8 dhchap_hash_id;
+ u8 dhchap_dhgroup_id;
};
static inline struct nvmet_host *to_host(struct config_item *item)
@@ -270,7 +326,9 @@ struct nvmet_fabrics_ops {
struct module *owner;
unsigned int type;
unsigned int msdbd;
- bool has_keyed_sgls : 1;
+ unsigned int flags;
+#define NVMF_KEYED_SGLS (1 << 0)
+#define NVMF_METADATA_SUPPORTED (1 << 1)
void (*queue_response)(struct nvmet_req *req);
int (*add_port)(struct nvmet_port *port);
void (*remove_port)(struct nvmet_port *port);
@@ -279,6 +337,8 @@ struct nvmet_fabrics_ops {
struct nvmet_port *port, char *traddr);
u16 (*install_queue)(struct nvmet_sq *nvme_sq);
void (*discovery_chg)(struct nvmet_port *port);
+ u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
+ u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
};
#define NVMET_MAX_INLINE_BIOVEC 8
@@ -291,6 +351,7 @@ struct nvmet_req {
struct nvmet_cq *cq;
struct nvmet_ns *ns;
struct scatterlist *sg;
+ struct scatterlist *metadata_sg;
struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
union {
struct {
@@ -302,10 +363,24 @@ struct nvmet_req {
struct bio_vec *bvec;
struct work_struct work;
} f;
+ struct {
+ struct bio inline_bio;
+ struct request *rq;
+ struct work_struct work;
+ bool use_workqueue;
+ } p;
+#ifdef CONFIG_BLK_DEV_ZONED
+ struct {
+ struct bio inline_bio;
+ struct work_struct zmgmt_work;
+ } z;
+#endif /* CONFIG_BLK_DEV_ZONED */
};
int sg_cnt;
+ int metadata_sg_cnt;
/* data length as parsed from the SGL descriptor: */
size_t transfer_len;
+ size_t metadata_len;
struct nvmet_port *port;
@@ -319,6 +394,8 @@ struct nvmet_req {
};
extern struct workqueue_struct *buffered_io_wq;
+extern struct workqueue_struct *zbd_wq;
+extern struct workqueue_struct *nvmet_wq;
static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
{
@@ -361,24 +438,30 @@ void nvmet_get_feat_async_event(struct nvmet_req *req);
u16 nvmet_set_feat_kato(struct nvmet_req *req);
u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
void nvmet_execute_async_event(struct nvmet_req *req);
+void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
+void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
+u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
-u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
+u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
+u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
void nvmet_req_uninit(struct nvmet_req *req);
-bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len);
+bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
void nvmet_req_complete(struct nvmet_req *req, u16 status);
-int nvmet_req_alloc_sgl(struct nvmet_req *req);
-void nvmet_req_free_sgl(struct nvmet_req *req);
+int nvmet_req_alloc_sgls(struct nvmet_req *req);
+void nvmet_req_free_sgls(struct nvmet_req *req);
+void nvmet_execute_set_features(struct nvmet_req *req);
+void nvmet_execute_get_features(struct nvmet_req *req);
void nvmet_execute_keep_alive(struct nvmet_req *req);
void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
@@ -393,17 +476,18 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
-u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
- struct nvmet_req *req, struct nvmet_ctrl **ret);
+struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
+ const char *hostnqn, u16 cntlid,
+ struct nvmet_req *req);
void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
-u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd);
+u16 nvmet_check_ctrl_status(struct nvmet_req *req);
struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
enum nvme_subsys_type type);
void nvmet_subsys_put(struct nvmet_subsys *subsys);
void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
-struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
+u16 nvmet_req_find_ns(struct nvmet_req *req);
void nvmet_put_namespace(struct nvmet_ns *ns);
int nvmet_ns_enable(struct nvmet_ns *ns);
void nvmet_ns_disable(struct nvmet_ns *ns);
@@ -487,20 +571,88 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns);
u16 nvmet_bdev_flush(struct nvmet_req *req);
u16 nvmet_file_flush(struct nvmet_req *req);
void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
-
-static inline u32 nvmet_rw_len(struct nvmet_req *req)
+void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
+void nvmet_file_ns_revalidate(struct nvmet_ns *ns);
+bool nvmet_ns_revalidate(struct nvmet_ns *ns);
+u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
+
+bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
+void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req);
+void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req);
+void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
+void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
+void nvmet_bdev_execute_zone_append(struct nvmet_req *req);
+
+static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
{
return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
req->ns->blksize_shift;
}
+static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req)
+{
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
+ return 0;
+ return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) *
+ req->ns->metadata_size;
+}
+
static inline u32 nvmet_dsm_len(struct nvmet_req *req)
{
return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
sizeof(struct nvme_dsm_range);
}
+static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
+{
+ return req->sq->ctrl->subsys;
+}
+
+static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
+{
+ return subsys->type != NVME_NQN_NVME;
+}
+
+#ifdef CONFIG_NVME_TARGET_PASSTHRU
+void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
+int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
+void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
+u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
+u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
+static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
+{
+ return subsys->passthru_ctrl;
+}
+#else /* CONFIG_NVME_TARGET_PASSTHRU */
+static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
+{
+}
+static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
+{
+}
+static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
+{
+ return 0;
+}
+static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
+{
+ return 0;
+}
+static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
+{
+ return NULL;
+}
+#endif /* CONFIG_NVME_TARGET_PASSTHRU */
+
+static inline bool nvmet_is_passthru_req(struct nvmet_req *req)
+{
+ return nvmet_is_passthru_subsys(nvmet_req_subsys(req));
+}
+
+void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
+
u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
+u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
/* Convert a 32-bit number to a 16-bit 0's based number */
static inline __le16 to0based(u32 a)
@@ -508,4 +660,86 @@ static inline __le16 to0based(u32 a)
return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
}
+static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
+{
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
+ return false;
+ return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
+}
+
+static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
+{
+ return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
+}
+
+static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
+{
+ return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
+}
+
+static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
+{
+ return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
+ req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
+}
+
+static inline void nvmet_req_cns_error_complete(struct nvmet_req *req)
+{
+ pr_debug("unhandled identify cns %d on qid %d\n",
+ req->cmd->identify.cns, req->sq->qid);
+ req->error_loc = offsetof(struct nvme_identify, cns);
+ nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+}
+
+static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
+{
+ if (bio != &req->b.inline_bio)
+ bio_put(bio);
+}
+
+#ifdef CONFIG_NVME_TARGET_AUTH
+void nvmet_execute_auth_send(struct nvmet_req *req);
+void nvmet_execute_auth_receive(struct nvmet_req *req);
+int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
+ bool set_ctrl);
+int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
+int nvmet_setup_auth(struct nvmet_ctrl *ctrl);
+void nvmet_auth_sq_init(struct nvmet_sq *sq);
+void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
+void nvmet_auth_sq_free(struct nvmet_sq *sq);
+int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
+bool nvmet_check_auth_status(struct nvmet_req *req);
+int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+ unsigned int hash_len);
+int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
+ unsigned int hash_len);
+static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
+{
+ return ctrl->host_key != NULL;
+}
+int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
+ u8 *buf, int buf_size);
+int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
+ u8 *buf, int buf_size);
+#else
+static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
+{
+ return 0;
+}
+static inline void nvmet_auth_sq_init(struct nvmet_sq *sq)
+{
+}
+static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
+static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
+static inline bool nvmet_check_auth_status(struct nvmet_req *req)
+{
+ return true;
+}
+static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
+{
+ return false;
+}
+static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
+#endif
+
#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
new file mode 100644
index 000000000000..79af5140af8b
--- /dev/null
+++ b/drivers/nvme/target/passthru.c
@@ -0,0 +1,659 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe Over Fabrics Target Passthrough command implementation.
+ *
+ * Copyright (c) 2017-2018 Western Digital Corporation or its
+ * affiliates.
+ * Copyright (c) 2019-2020, Eideticom Inc.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+
+#include "../host/nvme.h"
+#include "nvmet.h"
+
+MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU);
+
+/*
+ * xarray to maintain one passthru subsystem per nvme controller.
+ */
+static DEFINE_XARRAY(passthru_subsystems);
+
+void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
+{
+ /*
+ * Multiple command set support can only be declared if the underlying
+ * controller actually supports it.
+ */
+ if (!nvme_multi_css(ctrl->subsys->passthru_ctrl))
+ ctrl->cap &= ~(1ULL << 43);
+}
+
+static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u16 status = NVME_SC_SUCCESS;
+ int pos, len;
+ bool csi_seen = false;
+ void *data;
+ u8 csi;
+
+ if (!ctrl->subsys->clear_ids)
+ return status;
+
+ data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
+ if (!data)
+ return NVME_SC_INTERNAL;
+
+ status = nvmet_copy_from_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
+ if (status)
+ goto out_free;
+
+ for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
+ struct nvme_ns_id_desc *cur = data + pos;
+
+ if (cur->nidl == 0)
+ break;
+ if (cur->nidt == NVME_NIDT_CSI) {
+ memcpy(&csi, cur + 1, NVME_NIDT_CSI_LEN);
+ csi_seen = true;
+ break;
+ }
+ len = sizeof(struct nvme_ns_id_desc) + cur->nidl;
+ }
+
+ memset(data, 0, NVME_IDENTIFY_DATA_SIZE);
+ if (csi_seen) {
+ struct nvme_ns_id_desc *cur = data;
+
+ cur->nidt = NVME_NIDT_CSI;
+ cur->nidl = NVME_NIDT_CSI_LEN;
+ memcpy(cur + 1, &csi, NVME_NIDT_CSI_LEN);
+ }
+ status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
+out_free:
+ kfree(data);
+ return status;
+}
+
+static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl;
+ u16 status = NVME_SC_SUCCESS;
+ struct nvme_id_ctrl *id;
+ unsigned int max_hw_sectors;
+ int page_shift;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return NVME_SC_INTERNAL;
+
+ status = nvmet_copy_from_sgl(req, 0, id, sizeof(*id));
+ if (status)
+ goto out_free;
+
+ id->cntlid = cpu_to_le16(ctrl->cntlid);
+ id->ver = cpu_to_le32(ctrl->subsys->ver);
+
+ /*
+ * The passthru NVMe driver may have a limit on the number of segments
+ * which depends on the host's memory fragementation. To solve this,
+ * ensure mdts is limited to the pages equal to the number of segments.
+ */
+ max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9),
+ pctrl->max_hw_sectors);
+
+ /*
+ * nvmet_passthru_map_sg is limitted to using a single bio so limit
+ * the mdts based on BIO_MAX_VECS as well
+ */
+ max_hw_sectors = min_not_zero(BIO_MAX_VECS << (PAGE_SHIFT - 9),
+ max_hw_sectors);
+
+ page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
+
+ id->mdts = ilog2(max_hw_sectors) + 9 - page_shift;
+
+ id->acl = 3;
+ /*
+ * We export aerl limit for the fabrics controller, update this when
+ * passthru based aerl support is added.
+ */
+ id->aerl = NVMET_ASYNC_EVENTS - 1;
+
+ /* emulate kas as most of the PCIe ctrl don't have a support for kas */
+ id->kas = cpu_to_le16(NVMET_KAS);
+
+ /* don't support host memory buffer */
+ id->hmpre = 0;
+ id->hmmin = 0;
+
+ id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes);
+ id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes);
+ id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
+
+ /* don't support fuse commands */
+ id->fuses = 0;
+
+ id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
+ if (ctrl->ops->flags & NVMF_KEYED_SGLS)
+ id->sgls |= cpu_to_le32(1 << 2);
+ if (req->port->inline_data_size)
+ id->sgls |= cpu_to_le32(1 << 20);
+
+ /*
+ * When passthru controller is setup using nvme-loop transport it will
+ * export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in
+ * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl()
+ * code path with duplicate ctr subsynqn. In order to prevent that we
+ * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
+ */
+ memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn));
+
+ /* use fabric id-ctrl values */
+ id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
+ req->port->inline_data_size) / 16);
+ id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
+
+ id->msdbd = ctrl->ops->msdbd;
+
+ /* Support multipath connections with fabrics */
+ id->cmic |= 1 << 1;
+
+ /* Disable reservations, see nvmet_parse_passthru_io_cmd() */
+ id->oncs &= cpu_to_le16(~NVME_CTRL_ONCS_RESERVATIONS);
+
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(struct nvme_id_ctrl));
+
+out_free:
+ kfree(id);
+ return status;
+}
+
+static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
+{
+ u16 status = NVME_SC_SUCCESS;
+ struct nvme_id_ns *id;
+ int i;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ return NVME_SC_INTERNAL;
+
+ status = nvmet_copy_from_sgl(req, 0, id, sizeof(struct nvme_id_ns));
+ if (status)
+ goto out_free;
+
+ for (i = 0; i < (id->nlbaf + 1); i++)
+ if (id->lbaf[i].ms)
+ memset(&id->lbaf[i], 0, sizeof(id->lbaf[i]));
+
+ id->flbas = id->flbas & ~(1 << 4);
+
+ /*
+ * Presently the NVMEof target code does not support sending
+ * metadata, so we must disable it here. This should be updated
+ * once target starts supporting metadata.
+ */
+ id->mc = 0;
+
+ if (req->sq->ctrl->subsys->clear_ids) {
+ memset(id->nguid, 0, NVME_NIDT_NGUID_LEN);
+ memset(id->eui64, 0, NVME_NIDT_EUI64_LEN);
+ }
+
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
+out_free:
+ kfree(id);
+ return status;
+}
+
+static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
+ struct request *rq = req->p.rq;
+ struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
+ u32 effects;
+ int status;
+
+ status = nvme_execute_passthru_rq(rq, &effects);
+
+ if (status == NVME_SC_SUCCESS &&
+ req->cmd->common.opcode == nvme_admin_identify) {
+ switch (req->cmd->identify.cns) {
+ case NVME_ID_CNS_CTRL:
+ nvmet_passthru_override_id_ctrl(req);
+ break;
+ case NVME_ID_CNS_NS:
+ nvmet_passthru_override_id_ns(req);
+ break;
+ case NVME_ID_CNS_NS_DESC_LIST:
+ nvmet_passthru_override_id_descs(req);
+ break;
+ }
+ } else if (status < 0)
+ status = NVME_SC_INTERNAL;
+
+ req->cqe->result = nvme_req(rq)->result;
+ nvmet_req_complete(req, status);
+ blk_mq_free_request(rq);
+
+ if (effects)
+ nvme_passthru_end(ctrl, effects, req->cmd, status);
+}
+
+static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
+ blk_status_t blk_status)
+{
+ struct nvmet_req *req = rq->end_io_data;
+
+ req->cqe->result = nvme_req(rq)->result;
+ nvmet_req_complete(req, nvme_req(rq)->status);
+ blk_mq_free_request(rq);
+ return RQ_END_IO_NONE;
+}
+
+static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
+{
+ struct scatterlist *sg;
+ struct bio *bio;
+ int i;
+
+ if (req->sg_cnt > BIO_MAX_VECS)
+ return -EINVAL;
+
+ if (nvmet_use_inline_bvec(req)) {
+ bio = &req->p.inline_bio;
+ bio_init(bio, NULL, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec), req_op(rq));
+ } else {
+ bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq),
+ GFP_KERNEL);
+ bio->bi_end_io = bio_put;
+ }
+
+ for_each_sg(req->sg, sg, req->sg_cnt, i) {
+ if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
+ sg->offset) < sg->length) {
+ nvmet_req_bio_put(req, bio);
+ return -EINVAL;
+ }
+ }
+
+ blk_rq_bio_prep(rq, bio, req->sg_cnt);
+
+ return 0;
+}
+
+static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
+{
+ struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
+ struct request_queue *q = ctrl->admin_q;
+ struct nvme_ns *ns = NULL;
+ struct request *rq = NULL;
+ unsigned int timeout;
+ u32 effects;
+ u16 status;
+ int ret;
+
+ if (likely(req->sq->qid != 0)) {
+ u32 nsid = le32_to_cpu(req->cmd->common.nsid);
+
+ ns = nvme_find_get_ns(ctrl, nsid);
+ if (unlikely(!ns)) {
+ pr_err("failed to get passthru ns nsid:%u\n", nsid);
+ status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ goto out;
+ }
+
+ q = ns->queue;
+ timeout = nvmet_req_subsys(req)->io_timeout;
+ } else {
+ timeout = nvmet_req_subsys(req)->admin_timeout;
+ }
+
+ rq = blk_mq_alloc_request(q, nvme_req_op(req->cmd), 0);
+ if (IS_ERR(rq)) {
+ status = NVME_SC_INTERNAL;
+ goto out_put_ns;
+ }
+ nvme_init_request(rq, req->cmd);
+
+ if (timeout)
+ rq->timeout = timeout;
+
+ if (req->sg_cnt) {
+ ret = nvmet_passthru_map_sg(req, rq);
+ if (unlikely(ret)) {
+ status = NVME_SC_INTERNAL;
+ goto out_put_req;
+ }
+ }
+
+ /*
+ * If there are effects for the command we are about to execute, or
+ * an end_req function we need to use nvme_execute_passthru_rq()
+ * synchronously in a work item seeing the end_req function and
+ * nvme_passthru_end() can't be called in the request done callback
+ * which is typically in interrupt context.
+ */
+ effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode);
+ if (req->p.use_workqueue || effects) {
+ INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
+ req->p.rq = rq;
+ queue_work(nvmet_wq, &req->p.work);
+ } else {
+ rq->end_io = nvmet_passthru_req_done;
+ rq->end_io_data = req;
+ blk_execute_rq_nowait(rq, false);
+ }
+
+ if (ns)
+ nvme_put_ns(ns);
+
+ return;
+
+out_put_req:
+ blk_mq_free_request(rq);
+out_put_ns:
+ if (ns)
+ nvme_put_ns(ns);
+out:
+ nvmet_req_complete(req, status);
+}
+
+/*
+ * We need to emulate set host behaviour to ensure that any requested
+ * behaviour of the target's host matches the requested behaviour
+ * of the device's host and fail otherwise.
+ */
+static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req)
+{
+ struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
+ struct nvme_feat_host_behavior *host;
+ u16 status = NVME_SC_INTERNAL;
+ int ret;
+
+ host = kzalloc(sizeof(*host) * 2, GFP_KERNEL);
+ if (!host)
+ goto out_complete_req;
+
+ ret = nvme_get_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
+ host, sizeof(*host), NULL);
+ if (ret)
+ goto out_free_host;
+
+ status = nvmet_copy_from_sgl(req, 0, &host[1], sizeof(*host));
+ if (status)
+ goto out_free_host;
+
+ if (memcmp(&host[0], &host[1], sizeof(host[0]))) {
+ pr_warn("target host has requested different behaviour from the local host\n");
+ status = NVME_SC_INTERNAL;
+ }
+
+out_free_host:
+ kfree(host);
+out_complete_req:
+ nvmet_req_complete(req, status);
+}
+
+static u16 nvmet_setup_passthru_command(struct nvmet_req *req)
+{
+ req->p.use_workqueue = false;
+ req->execute = nvmet_passthru_execute_cmd;
+ return NVME_SC_SUCCESS;
+}
+
+u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
+{
+ /* Reject any commands with non-sgl flags set (ie. fused commands) */
+ if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
+ return NVME_SC_INVALID_FIELD;
+
+ switch (req->cmd->common.opcode) {
+ case nvme_cmd_resv_register:
+ case nvme_cmd_resv_report:
+ case nvme_cmd_resv_acquire:
+ case nvme_cmd_resv_release:
+ /*
+ * Reservations cannot be supported properly because the
+ * underlying device has no way of differentiating different
+ * hosts that connect via fabrics. This could potentially be
+ * emulated in the future if regular targets grow support for
+ * this feature.
+ */
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+
+ return nvmet_setup_passthru_command(req);
+}
+
+/*
+ * Only features that are emulated or specifically allowed in the list are
+ * passed down to the controller. This function implements the allow list for
+ * both get and set features.
+ */
+static u16 nvmet_passthru_get_set_features(struct nvmet_req *req)
+{
+ switch (le32_to_cpu(req->cmd->features.fid)) {
+ case NVME_FEAT_ARBITRATION:
+ case NVME_FEAT_POWER_MGMT:
+ case NVME_FEAT_LBA_RANGE:
+ case NVME_FEAT_TEMP_THRESH:
+ case NVME_FEAT_ERR_RECOVERY:
+ case NVME_FEAT_VOLATILE_WC:
+ case NVME_FEAT_WRITE_ATOMIC:
+ case NVME_FEAT_AUTO_PST:
+ case NVME_FEAT_TIMESTAMP:
+ case NVME_FEAT_HCTM:
+ case NVME_FEAT_NOPSC:
+ case NVME_FEAT_RRL:
+ case NVME_FEAT_PLM_CONFIG:
+ case NVME_FEAT_PLM_WINDOW:
+ case NVME_FEAT_HOST_BEHAVIOR:
+ case NVME_FEAT_SANITIZE:
+ case NVME_FEAT_VENDOR_START ... NVME_FEAT_VENDOR_END:
+ return nvmet_setup_passthru_command(req);
+
+ case NVME_FEAT_ASYNC_EVENT:
+ /* There is no support for forwarding ASYNC events */
+ case NVME_FEAT_IRQ_COALESCE:
+ case NVME_FEAT_IRQ_CONFIG:
+ /* The IRQ settings will not apply to the target controller */
+ case NVME_FEAT_HOST_MEM_BUF:
+ /*
+ * Any HMB that's set will not be passed through and will
+ * not work as expected
+ */
+ case NVME_FEAT_SW_PROGRESS:
+ /*
+ * The Pre-Boot Software Load Count doesn't make much
+ * sense for a target to export
+ */
+ case NVME_FEAT_RESV_MASK:
+ case NVME_FEAT_RESV_PERSIST:
+ /* No reservations, see nvmet_parse_passthru_io_cmd() */
+ default:
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+}
+
+u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
+{
+ /* Reject any commands with non-sgl flags set (ie. fused commands) */
+ if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
+ return NVME_SC_INVALID_FIELD;
+
+ /*
+ * Passthru all vendor specific commands
+ */
+ if (req->cmd->common.opcode >= nvme_admin_vendor_start)
+ return nvmet_setup_passthru_command(req);
+
+ switch (req->cmd->common.opcode) {
+ case nvme_admin_async_event:
+ req->execute = nvmet_execute_async_event;
+ return NVME_SC_SUCCESS;
+ case nvme_admin_keep_alive:
+ /*
+ * Most PCIe ctrls don't support keep alive cmd, we route keep
+ * alive to the non-passthru mode. In future please change this
+ * code when PCIe ctrls with keep alive support available.
+ */
+ req->execute = nvmet_execute_keep_alive;
+ return NVME_SC_SUCCESS;
+ case nvme_admin_set_features:
+ switch (le32_to_cpu(req->cmd->features.fid)) {
+ case NVME_FEAT_ASYNC_EVENT:
+ case NVME_FEAT_KATO:
+ case NVME_FEAT_NUM_QUEUES:
+ case NVME_FEAT_HOST_ID:
+ req->execute = nvmet_execute_set_features;
+ return NVME_SC_SUCCESS;
+ case NVME_FEAT_HOST_BEHAVIOR:
+ req->execute = nvmet_passthru_set_host_behaviour;
+ return NVME_SC_SUCCESS;
+ default:
+ return nvmet_passthru_get_set_features(req);
+ }
+ break;
+ case nvme_admin_get_features:
+ switch (le32_to_cpu(req->cmd->features.fid)) {
+ case NVME_FEAT_ASYNC_EVENT:
+ case NVME_FEAT_KATO:
+ case NVME_FEAT_NUM_QUEUES:
+ case NVME_FEAT_HOST_ID:
+ req->execute = nvmet_execute_get_features;
+ return NVME_SC_SUCCESS;
+ default:
+ return nvmet_passthru_get_set_features(req);
+ }
+ break;
+ case nvme_admin_identify:
+ switch (req->cmd->identify.cns) {
+ case NVME_ID_CNS_CTRL:
+ req->execute = nvmet_passthru_execute_cmd;
+ req->p.use_workqueue = true;
+ return NVME_SC_SUCCESS;
+ case NVME_ID_CNS_CS_CTRL:
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_ZNS:
+ req->execute = nvmet_passthru_execute_cmd;
+ req->p.use_workqueue = true;
+ return NVME_SC_SUCCESS;
+ }
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ case NVME_ID_CNS_NS:
+ req->execute = nvmet_passthru_execute_cmd;
+ req->p.use_workqueue = true;
+ return NVME_SC_SUCCESS;
+ case NVME_ID_CNS_CS_NS:
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_ZNS:
+ req->execute = nvmet_passthru_execute_cmd;
+ req->p.use_workqueue = true;
+ return NVME_SC_SUCCESS;
+ }
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ default:
+ return nvmet_setup_passthru_command(req);
+ }
+ case nvme_admin_get_log_page:
+ return nvmet_setup_passthru_command(req);
+ default:
+ /* Reject commands not in the allowlist above */
+ return nvmet_report_invalid_opcode(req);
+ }
+}
+
+int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys)
+{
+ struct nvme_ctrl *ctrl;
+ struct file *file;
+ int ret = -EINVAL;
+ void *old;
+
+ mutex_lock(&subsys->lock);
+ if (!subsys->passthru_ctrl_path)
+ goto out_unlock;
+ if (subsys->passthru_ctrl)
+ goto out_unlock;
+
+ if (subsys->nr_namespaces) {
+ pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
+ goto out_unlock;
+ }
+
+ file = filp_open(subsys->passthru_ctrl_path, O_RDWR, 0);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto out_unlock;
+ }
+
+ ctrl = nvme_ctrl_from_file(file);
+ if (!ctrl) {
+ pr_err("failed to open nvme controller %s\n",
+ subsys->passthru_ctrl_path);
+
+ goto out_put_file;
+ }
+
+ old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL,
+ subsys, GFP_KERNEL);
+ if (xa_is_err(old)) {
+ ret = xa_err(old);
+ goto out_put_file;
+ }
+
+ if (old)
+ goto out_put_file;
+
+ subsys->passthru_ctrl = ctrl;
+ subsys->ver = ctrl->vs;
+
+ if (subsys->ver < NVME_VS(1, 2, 1)) {
+ pr_warn("nvme controller version is too old: %llu.%llu.%llu, advertising 1.2.1\n",
+ NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver),
+ NVME_TERTIARY(subsys->ver));
+ subsys->ver = NVME_VS(1, 2, 1);
+ }
+ nvme_get_ctrl(ctrl);
+ __module_get(subsys->passthru_ctrl->ops->module);
+ ret = 0;
+
+out_put_file:
+ filp_close(file, NULL);
+out_unlock:
+ mutex_unlock(&subsys->lock);
+ return ret;
+}
+
+static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
+{
+ if (subsys->passthru_ctrl) {
+ xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid);
+ module_put(subsys->passthru_ctrl->ops->module);
+ nvme_put_ctrl(subsys->passthru_ctrl);
+ }
+ subsys->passthru_ctrl = NULL;
+ subsys->ver = NVMET_DEFAULT_VS;
+}
+
+void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
+{
+ mutex_lock(&subsys->lock);
+ __nvmet_passthru_ctrl_disable(subsys);
+ mutex_unlock(&subsys->lock);
+}
+
+void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
+{
+ mutex_lock(&subsys->lock);
+ __nvmet_passthru_ctrl_disable(subsys);
+ mutex_unlock(&subsys->lock);
+ kfree(subsys->passthru_ctrl_path);
+}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 37d262a65877..4597bca43a6d 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -5,6 +5,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/atomic.h>
+#include <linux/blk-integrity.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -20,6 +21,7 @@
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <rdma/rw.h>
+#include <rdma/ib_cm.h>
#include <linux/nvme-rdma.h>
#include "nvmet.h"
@@ -31,6 +33,12 @@
#define NVMET_RDMA_MAX_INLINE_SGE 4
#define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE)
+/* Assume mpsmin == device_page_size == 4KB */
+#define NVMET_RDMA_MAX_MDTS 8
+#define NVMET_RDMA_MAX_METADATA_MDTS 5
+
+struct nvmet_rdma_srq;
+
struct nvmet_rdma_cmd {
struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1];
struct ib_cqe cqe;
@@ -38,6 +46,7 @@ struct nvmet_rdma_cmd {
struct scatterlist inline_sg[NVMET_RDMA_MAX_INLINE_SGE];
struct nvme_command *nvme_cmd;
struct nvmet_rdma_queue *queue;
+ struct nvmet_rdma_srq *nsrq;
};
enum {
@@ -54,6 +63,7 @@ struct nvmet_rdma_rsp {
struct nvmet_rdma_queue *queue;
struct ib_cqe read_cqe;
+ struct ib_cqe write_cqe;
struct rdma_rw_ctx rw;
struct nvmet_req req;
@@ -75,10 +85,12 @@ enum nvmet_rdma_queue_state {
struct nvmet_rdma_queue {
struct rdma_cm_id *cm_id;
+ struct ib_qp *qp;
struct nvmet_port *port;
struct ib_cq *cq;
atomic_t sq_wr_avail;
struct nvmet_rdma_device *dev;
+ struct nvmet_rdma_srq *nsrq;
spinlock_t state_lock;
enum nvmet_rdma_queue_state state;
struct nvmet_cq nvme_cq;
@@ -96,17 +108,31 @@ struct nvmet_rdma_queue {
int idx;
int host_qid;
+ int comp_vector;
int recv_queue_size;
int send_queue_size;
struct list_head queue_list;
};
+struct nvmet_rdma_port {
+ struct nvmet_port *nport;
+ struct sockaddr_storage addr;
+ struct rdma_cm_id *cm_id;
+ struct delayed_work repair_work;
+};
+
+struct nvmet_rdma_srq {
+ struct ib_srq *srq;
+ struct nvmet_rdma_cmd *cmds;
+ struct nvmet_rdma_device *ndev;
+};
+
struct nvmet_rdma_device {
struct ib_device *device;
struct ib_pd *pd;
- struct ib_srq *srq;
- struct nvmet_rdma_cmd *srq_cmds;
+ struct nvmet_rdma_srq **srqs;
+ int srq_count;
size_t srq_size;
struct kref ref;
struct list_head entry;
@@ -118,6 +144,16 @@ static bool nvmet_rdma_use_srq;
module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
+static int srq_size_set(const char *val, const struct kernel_param *kp);
+static const struct kernel_param_ops srq_size_ops = {
+ .set = srq_size_set,
+ .get = param_get_int,
+};
+
+static int nvmet_rdma_srq_size = 1024;
+module_param_cb(srq_size, &srq_size_ops, &nvmet_rdma_srq_size, 0644);
+MODULE_PARM_DESC(srq_size, "set Shared Receive Queue (SRQ) size, should >= 256 (default: 1024)");
+
static DEFINE_IDA(nvmet_rdma_queue_ida);
static LIST_HEAD(nvmet_rdma_queue_list);
static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
@@ -129,6 +165,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
+static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc);
static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
@@ -138,15 +175,20 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
static const struct nvmet_fabrics_ops nvmet_rdma_ops;
-static int num_pages(int len)
+static int srq_size_set(const char *val, const struct kernel_param *kp)
{
- return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
+ int n = 0, ret;
+
+ ret = kstrtoint(val, 10, &n);
+ if (ret != 0 || n < 256)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
}
-/* XXX: really should move to a generic header sooner or later.. */
-static inline u32 get_unaligned_le24(const u8 *p)
+static int num_pages(int len)
{
- return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16;
+ return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
}
static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
@@ -373,7 +415,8 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
goto out_free_rsp;
- r->req.p2p_client = &ndev->device->dev;
+ if (ib_dma_pci_p2p_dma_supported(ndev->device))
+ r->req.p2p_client = &ndev->device->dev;
r->send_sge.length = sizeof(*r->req.cqe);
r->send_sge.lkey = ndev->pd->local_dma_lkey;
@@ -386,6 +429,9 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
/* Data In / RDMA READ */
r->read_cqe.done = nvmet_rdma_read_data_done;
+ /* Data Out / RDMA WRITE */
+ r->write_cqe.done = nvmet_rdma_write_data_done;
+
return 0;
out_free_rsp:
@@ -461,10 +507,10 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
cmd->sge[0].addr, cmd->sge[0].length,
DMA_FROM_DEVICE);
- if (ndev->srq)
- ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
+ if (cmd->nsrq)
+ ret = ib_post_srq_recv(cmd->nsrq->srq, &cmd->wr, NULL);
else
- ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
+ ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL);
if (unlikely(ret))
pr_err("post_recv cmd failed\n");
@@ -495,6 +541,129 @@ static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
spin_unlock(&queue->rsp_wr_wait_lock);
}
+static u16 nvmet_rdma_check_pi_status(struct ib_mr *sig_mr)
+{
+ struct ib_mr_status mr_status;
+ int ret;
+ u16 status = 0;
+
+ ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
+ if (ret) {
+ pr_err("ib_check_mr_status failed, ret %d\n", ret);
+ return NVME_SC_INVALID_PI;
+ }
+
+ if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
+ switch (mr_status.sig_err.err_type) {
+ case IB_SIG_BAD_GUARD:
+ status = NVME_SC_GUARD_CHECK;
+ break;
+ case IB_SIG_BAD_REFTAG:
+ status = NVME_SC_REFTAG_CHECK;
+ break;
+ case IB_SIG_BAD_APPTAG:
+ status = NVME_SC_APPTAG_CHECK;
+ break;
+ }
+ pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n",
+ mr_status.sig_err.err_type,
+ mr_status.sig_err.expected,
+ mr_status.sig_err.actual);
+ }
+
+ return status;
+}
+
+static void nvmet_rdma_set_sig_domain(struct blk_integrity *bi,
+ struct nvme_command *cmd, struct ib_sig_domain *domain,
+ u16 control, u8 pi_type)
+{
+ domain->sig_type = IB_SIG_TYPE_T10_DIF;
+ domain->sig.dif.bg_type = IB_T10DIF_CRC;
+ domain->sig.dif.pi_interval = 1 << bi->interval_exp;
+ domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag);
+ if (control & NVME_RW_PRINFO_PRCHK_REF)
+ domain->sig.dif.ref_remap = true;
+
+ domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag);
+ domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask);
+ domain->sig.dif.app_escape = true;
+ if (pi_type == NVME_NS_DPS_PI_TYPE3)
+ domain->sig.dif.ref_escape = true;
+}
+
+static void nvmet_rdma_set_sig_attrs(struct nvmet_req *req,
+ struct ib_sig_attrs *sig_attrs)
+{
+ struct nvme_command *cmd = req->cmd;
+ u16 control = le16_to_cpu(cmd->rw.control);
+ u8 pi_type = req->ns->pi_type;
+ struct blk_integrity *bi;
+
+ bi = bdev_get_integrity(req->ns->bdev);
+
+ memset(sig_attrs, 0, sizeof(*sig_attrs));
+
+ if (control & NVME_RW_PRINFO_PRACT) {
+ /* for WRITE_INSERT/READ_STRIP no wire domain */
+ sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
+ nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
+ pi_type);
+ /* Clear the PRACT bit since HCA will generate/verify the PI */
+ control &= ~NVME_RW_PRINFO_PRACT;
+ cmd->rw.control = cpu_to_le16(control);
+ /* PI is added by the HW */
+ req->transfer_len += req->metadata_len;
+ } else {
+ /* for WRITE_PASS/READ_PASS both wire/memory domains exist */
+ nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
+ pi_type);
+ nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
+ pi_type);
+ }
+
+ if (control & NVME_RW_PRINFO_PRCHK_REF)
+ sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG;
+ if (control & NVME_RW_PRINFO_PRCHK_GUARD)
+ sig_attrs->check_mask |= IB_SIG_CHECK_GUARD;
+ if (control & NVME_RW_PRINFO_PRCHK_APP)
+ sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG;
+}
+
+static int nvmet_rdma_rw_ctx_init(struct nvmet_rdma_rsp *rsp, u64 addr, u32 key,
+ struct ib_sig_attrs *sig_attrs)
+{
+ struct rdma_cm_id *cm_id = rsp->queue->cm_id;
+ struct nvmet_req *req = &rsp->req;
+ int ret;
+
+ if (req->metadata_len)
+ ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp,
+ cm_id->port_num, req->sg, req->sg_cnt,
+ req->metadata_sg, req->metadata_sg_cnt, sig_attrs,
+ addr, key, nvmet_data_dir(req));
+ else
+ ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
+ req->sg, req->sg_cnt, 0, addr, key,
+ nvmet_data_dir(req));
+
+ return ret;
+}
+
+static void nvmet_rdma_rw_ctx_destroy(struct nvmet_rdma_rsp *rsp)
+{
+ struct rdma_cm_id *cm_id = rsp->queue->cm_id;
+ struct nvmet_req *req = &rsp->req;
+
+ if (req->metadata_len)
+ rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp,
+ cm_id->port_num, req->sg, req->sg_cnt,
+ req->metadata_sg, req->metadata_sg_cnt,
+ nvmet_data_dir(req));
+ else
+ rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num,
+ req->sg, req->sg_cnt, nvmet_data_dir(req));
+}
static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
{
@@ -502,14 +671,11 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
- if (rsp->n_rdma) {
- rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
- queue->cm_id->port_num, rsp->req.sg,
- rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
- }
+ if (rsp->n_rdma)
+ nvmet_rdma_rw_ctx_destroy(rsp);
if (rsp->req.sg != rsp->cmd->inline_sg)
- nvmet_req_free_sgl(&rsp->req);
+ nvmet_req_free_sgls(&rsp->req);
if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
nvmet_rdma_process_wr_wait_list(queue);
@@ -535,7 +701,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct nvmet_rdma_rsp *rsp =
container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
- struct nvmet_rdma_queue *queue = cq->cq_context;
+ struct nvmet_rdma_queue *queue = wc->qp->qp_context;
nvmet_rdma_release_rsp(rsp);
@@ -561,11 +727,16 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
rsp->send_wr.opcode = IB_WR_SEND;
}
- if (nvmet_rdma_need_data_out(rsp))
- first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
- cm_id->port_num, NULL, &rsp->send_wr);
- else
+ if (nvmet_rdma_need_data_out(rsp)) {
+ if (rsp->req.metadata_len)
+ first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
+ cm_id->port_num, &rsp->write_cqe, NULL);
+ else
+ first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
+ cm_id->port_num, NULL, &rsp->send_wr);
+ } else {
first_wr = &rsp->send_wr;
+ }
nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
@@ -583,16 +754,15 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct nvmet_rdma_rsp *rsp =
container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
- struct nvmet_rdma_queue *queue = cq->cq_context;
+ struct nvmet_rdma_queue *queue = wc->qp->qp_context;
+ u16 status = 0;
WARN_ON(rsp->n_rdma <= 0);
atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
- rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
- queue->cm_id->port_num, rsp->req.sg,
- rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
rsp->n_rdma = 0;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ nvmet_rdma_rw_ctx_destroy(rsp);
nvmet_req_uninit(&rsp->req);
nvmet_rdma_release_rsp(rsp);
if (wc->status != IB_WC_WR_FLUSH_ERR) {
@@ -603,7 +773,57 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
return;
}
- rsp->req.execute(&rsp->req);
+ if (rsp->req.metadata_len)
+ status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr);
+ nvmet_rdma_rw_ctx_destroy(rsp);
+
+ if (unlikely(status))
+ nvmet_req_complete(&rsp->req, status);
+ else
+ rsp->req.execute(&rsp->req);
+}
+
+static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct nvmet_rdma_rsp *rsp =
+ container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe);
+ struct nvmet_rdma_queue *queue = wc->qp->qp_context;
+ struct rdma_cm_id *cm_id = rsp->queue->cm_id;
+ u16 status;
+
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
+ return;
+
+ WARN_ON(rsp->n_rdma <= 0);
+ atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
+ rsp->n_rdma = 0;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ nvmet_rdma_rw_ctx_destroy(rsp);
+ nvmet_req_uninit(&rsp->req);
+ nvmet_rdma_release_rsp(rsp);
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ pr_info("RDMA WRITE for CQE failed with status %s (%d).\n",
+ ib_wc_status_msg(wc->status), wc->status);
+ nvmet_rdma_error_comp(queue);
+ }
+ return;
+ }
+
+ /*
+ * Upon RDMA completion check the signature status
+ * - if succeeded send good NVMe response
+ * - if failed send bad NVMe response with appropriate error
+ */
+ status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr);
+ if (unlikely(status))
+ rsp->req.cqe->status = cpu_to_le16(status << 1);
+ nvmet_rdma_rw_ctx_destroy(rsp);
+
+ if (unlikely(ib_post_send(cm_id->qp, &rsp->send_wr, NULL))) {
+ pr_err("sending cmd response failed\n");
+ nvmet_rdma_release_rsp(rsp);
+ }
}
static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
@@ -660,9 +880,9 @@ static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
struct nvme_keyed_sgl_desc *sgl, bool invalidate)
{
- struct rdma_cm_id *cm_id = rsp->queue->cm_id;
u64 addr = le64_to_cpu(sgl->addr);
u32 key = get_unaligned_le32(sgl->key);
+ struct ib_sig_attrs sig_attrs;
int ret;
rsp->req.transfer_len = get_unaligned_le24(sgl->length);
@@ -671,13 +891,14 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
if (!rsp->req.transfer_len)
return 0;
- ret = nvmet_req_alloc_sgl(&rsp->req);
+ if (rsp->req.metadata_len)
+ nvmet_rdma_set_sig_attrs(&rsp->req, &sig_attrs);
+
+ ret = nvmet_req_alloc_sgls(&rsp->req);
if (unlikely(ret < 0))
goto error_out;
- ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
- rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
- nvmet_data_dir(&rsp->req));
+ ret = nvmet_rdma_rw_ctx_init(rsp, addr, key, &sig_attrs);
if (unlikely(ret < 0))
goto error_out;
rsp->n_rdma += ret;
@@ -742,7 +963,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
}
if (nvmet_rdma_need_data_in(rsp)) {
- if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
+ if (rdma_rw_ctx_post(&rsp->rw, queue->qp,
queue->cm_id->port_num, &rsp->read_cqe, NULL))
nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
} else {
@@ -788,7 +1009,7 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct nvmet_rdma_cmd *cmd =
container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
- struct nvmet_rdma_queue *queue = cq->cq_context;
+ struct nvmet_rdma_queue *queue = wc->qp->qp_context;
struct nvmet_rdma_rsp *rsp;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
@@ -840,23 +1061,40 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
nvmet_rdma_handle_command(queue, rsp);
}
-static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev)
+static void nvmet_rdma_destroy_srq(struct nvmet_rdma_srq *nsrq)
{
- if (!ndev->srq)
+ nvmet_rdma_free_cmds(nsrq->ndev, nsrq->cmds, nsrq->ndev->srq_size,
+ false);
+ ib_destroy_srq(nsrq->srq);
+
+ kfree(nsrq);
+}
+
+static void nvmet_rdma_destroy_srqs(struct nvmet_rdma_device *ndev)
+{
+ int i;
+
+ if (!ndev->srqs)
return;
- nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
- ib_destroy_srq(ndev->srq);
+ for (i = 0; i < ndev->srq_count; i++)
+ nvmet_rdma_destroy_srq(ndev->srqs[i]);
+
+ kfree(ndev->srqs);
}
-static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
+static struct nvmet_rdma_srq *
+nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
{
struct ib_srq_init_attr srq_attr = { NULL, };
+ size_t srq_size = ndev->srq_size;
+ struct nvmet_rdma_srq *nsrq;
struct ib_srq *srq;
- size_t srq_size;
int ret, i;
- srq_size = 4095; /* XXX: tune */
+ nsrq = kzalloc(sizeof(*nsrq), GFP_KERNEL);
+ if (!nsrq)
+ return ERR_PTR(-ENOMEM);
srq_attr.attr.max_wr = srq_size;
srq_attr.attr.max_sge = 1 + ndev->inline_page_count;
@@ -864,35 +1102,73 @@ static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
srq_attr.srq_type = IB_SRQT_BASIC;
srq = ib_create_srq(ndev->pd, &srq_attr);
if (IS_ERR(srq)) {
- /*
- * If SRQs aren't supported we just go ahead and use normal
- * non-shared receive queues.
- */
- pr_info("SRQ requested but not supported.\n");
- return 0;
+ ret = PTR_ERR(srq);
+ goto out_free;
}
- ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
- if (IS_ERR(ndev->srq_cmds)) {
- ret = PTR_ERR(ndev->srq_cmds);
+ nsrq->cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
+ if (IS_ERR(nsrq->cmds)) {
+ ret = PTR_ERR(nsrq->cmds);
goto out_destroy_srq;
}
- ndev->srq = srq;
- ndev->srq_size = srq_size;
+ nsrq->srq = srq;
+ nsrq->ndev = ndev;
for (i = 0; i < srq_size; i++) {
- ret = nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
+ nsrq->cmds[i].nsrq = nsrq;
+ ret = nvmet_rdma_post_recv(ndev, &nsrq->cmds[i]);
if (ret)
goto out_free_cmds;
}
- return 0;
+ return nsrq;
out_free_cmds:
- nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
+ nvmet_rdma_free_cmds(ndev, nsrq->cmds, srq_size, false);
out_destroy_srq:
ib_destroy_srq(srq);
+out_free:
+ kfree(nsrq);
+ return ERR_PTR(ret);
+}
+
+static int nvmet_rdma_init_srqs(struct nvmet_rdma_device *ndev)
+{
+ int i, ret;
+
+ if (!ndev->device->attrs.max_srq_wr || !ndev->device->attrs.max_srq) {
+ /*
+ * If SRQs aren't supported we just go ahead and use normal
+ * non-shared receive queues.
+ */
+ pr_info("SRQ requested but not supported.\n");
+ return 0;
+ }
+
+ ndev->srq_size = min(ndev->device->attrs.max_srq_wr,
+ nvmet_rdma_srq_size);
+ ndev->srq_count = min(ndev->device->num_comp_vectors,
+ ndev->device->attrs.max_srq);
+
+ ndev->srqs = kcalloc(ndev->srq_count, sizeof(*ndev->srqs), GFP_KERNEL);
+ if (!ndev->srqs)
+ return -ENOMEM;
+
+ for (i = 0; i < ndev->srq_count; i++) {
+ ndev->srqs[i] = nvmet_rdma_init_srq(ndev);
+ if (IS_ERR(ndev->srqs[i])) {
+ ret = PTR_ERR(ndev->srqs[i]);
+ goto err_srq;
+ }
+ }
+
+ return 0;
+
+err_srq:
+ while (--i >= 0)
+ nvmet_rdma_destroy_srq(ndev->srqs[i]);
+ kfree(ndev->srqs);
return ret;
}
@@ -905,7 +1181,7 @@ static void nvmet_rdma_free_dev(struct kref *ref)
list_del(&ndev->entry);
mutex_unlock(&device_list_mutex);
- nvmet_rdma_destroy_srq(ndev);
+ nvmet_rdma_destroy_srqs(ndev);
ib_dealloc_pd(ndev->pd);
kfree(ndev);
@@ -914,7 +1190,8 @@ static void nvmet_rdma_free_dev(struct kref *ref)
static struct nvmet_rdma_device *
nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
{
- struct nvmet_port *port = cm_id->context;
+ struct nvmet_rdma_port *port = cm_id->context;
+ struct nvmet_port *nport = port->nport;
struct nvmet_rdma_device *ndev;
int inline_page_count;
int inline_sge_count;
@@ -931,18 +1208,26 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
if (!ndev)
goto out_err;
- inline_page_count = num_pages(port->inline_data_size);
+ inline_page_count = num_pages(nport->inline_data_size);
inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
cm_id->device->attrs.max_recv_sge) - 1;
if (inline_page_count > inline_sge_count) {
pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
- port->inline_data_size, cm_id->device->name,
+ nport->inline_data_size, cm_id->device->name,
inline_sge_count * PAGE_SIZE);
- port->inline_data_size = inline_sge_count * PAGE_SIZE;
+ nport->inline_data_size = inline_sge_count * PAGE_SIZE;
inline_page_count = inline_sge_count;
}
- ndev->inline_data_size = port->inline_data_size;
+ ndev->inline_data_size = nport->inline_data_size;
ndev->inline_page_count = inline_page_count;
+
+ if (nport->pi_enable && !(cm_id->device->attrs.kernel_cap_flags &
+ IBK_INTEGRITY_HANDOVER)) {
+ pr_warn("T10-PI is not supported by device %s. Disabling it\n",
+ cm_id->device->name);
+ nport->pi_enable = false;
+ }
+
ndev->device = cm_id->device;
kref_init(&ndev->ref);
@@ -951,7 +1236,7 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
goto out_free_dev;
if (nvmet_rdma_use_srq) {
- ret = nvmet_rdma_init_srq(ndev);
+ ret = nvmet_rdma_init_srqs(ndev);
if (ret)
goto out_free_pd;
}
@@ -973,25 +1258,17 @@ out_err:
static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
{
- struct ib_qp_init_attr qp_attr;
+ struct ib_qp_init_attr qp_attr = { };
struct nvmet_rdma_device *ndev = queue->dev;
- int comp_vector, nr_cqe, ret, i;
-
- /*
- * Spread the io queues across completion vectors,
- * but still keep all admin queues on vector 0.
- */
- comp_vector = !queue->host_qid ? 0 :
- queue->idx % ndev->device->num_comp_vectors;
+ int nr_cqe, ret, i, factor;
/*
* Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
*/
nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
- queue->cq = ib_alloc_cq(ndev->device, queue,
- nr_cqe + 1, comp_vector,
- IB_POLL_WORKQUEUE);
+ queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1,
+ queue->comp_vector, IB_POLL_WORKQUEUE);
if (IS_ERR(queue->cq)) {
ret = PTR_ERR(queue->cq);
pr_err("failed to create CQ cqe= %d ret= %d\n",
@@ -999,7 +1276,6 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
goto out;
}
- memset(&qp_attr, 0, sizeof(qp_attr));
qp_attr.qp_context = queue;
qp_attr.event_handler = nvmet_rdma_qp_event;
qp_attr.send_cq = queue->cq;
@@ -1008,23 +1284,29 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
qp_attr.qp_type = IB_QPT_RC;
/* +1 for drain */
qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
- qp_attr.cap.max_rdma_ctxs = queue->send_queue_size;
+ factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num,
+ 1 << NVMET_RDMA_MAX_MDTS);
+ qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor;
qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
ndev->device->attrs.max_send_sge);
- if (ndev->srq) {
- qp_attr.srq = ndev->srq;
+ if (queue->nsrq) {
+ qp_attr.srq = queue->nsrq->srq;
} else {
/* +1 for drain */
qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count;
}
+ if (queue->port->pi_enable && queue->host_qid)
+ qp_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
+
ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
if (ret) {
pr_err("failed to create_qp ret= %d\n", ret);
goto err_destroy_cq;
}
+ queue->qp = queue->cm_id->qp;
atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
@@ -1032,7 +1314,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
__func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
qp_attr.cap.max_send_wr, queue->cm_id);
- if (!ndev->srq) {
+ if (!queue->nsrq) {
for (i = 0; i < queue->recv_queue_size; i++) {
queue->cmds[i].queue = queue;
ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
@@ -1047,18 +1329,18 @@ out:
err_destroy_qp:
rdma_destroy_qp(queue->cm_id);
err_destroy_cq:
- ib_free_cq(queue->cq);
+ ib_cq_pool_put(queue->cq, nr_cqe + 1);
goto out;
}
static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
{
- struct ib_qp *qp = queue->cm_id->qp;
-
- ib_drain_qp(qp);
- rdma_destroy_id(queue->cm_id);
- ib_destroy_qp(qp);
- ib_free_cq(queue->cq);
+ ib_drain_qp(queue->qp);
+ if (queue->cm_id)
+ rdma_destroy_id(queue->cm_id);
+ ib_destroy_qp(queue->qp);
+ ib_cq_pool_put(queue->cq, queue->recv_queue_size + 2 *
+ queue->send_queue_size + 1);
}
static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
@@ -1068,13 +1350,13 @@ static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
nvmet_sq_destroy(&queue->nvme_sq);
nvmet_rdma_destroy_queue_ib(queue);
- if (!queue->dev->srq) {
+ if (!queue->nsrq) {
nvmet_rdma_free_cmds(queue->dev, queue->cmds,
queue->recv_queue_size,
!queue->host_qid);
}
nvmet_rdma_free_rsps(queue);
- ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
+ ida_free(&nvmet_rdma_queue_ida, queue->idx);
kfree(queue);
}
@@ -1130,7 +1412,8 @@ static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
rej.sts = cpu_to_le16(status);
- return rdma_reject(cm_id, (void *)&rej, sizeof(rej));
+ return rdma_reject(cm_id, (void *)&rej, sizeof(rej),
+ IB_CM_REJ_CONSUMER_DEFINED);
}
static struct nvmet_rdma_queue *
@@ -1138,6 +1421,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
+ struct nvmet_rdma_port *port = cm_id->context;
struct nvmet_rdma_queue *queue;
int ret;
@@ -1164,6 +1448,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
queue->dev = ndev;
queue->cm_id = cm_id;
+ queue->port = port->nport;
spin_lock_init(&queue->state_lock);
queue->state = NVMET_RDMA_Q_CONNECTING;
@@ -1174,19 +1459,29 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
spin_lock_init(&queue->rsps_lock);
INIT_LIST_HEAD(&queue->queue_list);
- queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
+ queue->idx = ida_alloc(&nvmet_rdma_queue_ida, GFP_KERNEL);
if (queue->idx < 0) {
ret = NVME_RDMA_CM_NO_RSC;
goto out_destroy_sq;
}
+ /*
+ * Spread the io queues across completion vectors,
+ * but still keep all admin queues on vector 0.
+ */
+ queue->comp_vector = !queue->host_qid ? 0 :
+ queue->idx % ndev->device->num_comp_vectors;
+
+
ret = nvmet_rdma_alloc_rsps(queue);
if (ret) {
ret = NVME_RDMA_CM_NO_RSC;
goto out_ida_remove;
}
- if (!ndev->srq) {
+ if (ndev->srqs) {
+ queue->nsrq = ndev->srqs[queue->comp_vector % ndev->srq_count];
+ } else {
queue->cmds = nvmet_rdma_alloc_cmds(ndev,
queue->recv_queue_size,
!queue->host_qid);
@@ -1207,7 +1502,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
return queue;
out_free_cmds:
- if (!ndev->srq) {
+ if (!queue->nsrq) {
nvmet_rdma_free_cmds(queue->dev, queue->cmds,
queue->recv_queue_size,
!queue->host_qid);
@@ -1215,7 +1510,7 @@ out_free_cmds:
out_free_responses:
nvmet_rdma_free_rsps(queue);
out_ida_remove:
- ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
+ ida_free(&nvmet_rdma_queue_ida, queue->idx);
out_destroy_sq:
nvmet_sq_destroy(&queue->nvme_sq);
out_free_queue:
@@ -1233,6 +1528,10 @@ static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
case IB_EVENT_COMM_EST:
rdma_notify(queue->cm_id, event->event);
break;
+ case IB_EVENT_QP_LAST_WQE_REACHED:
+ pr_debug("received last WQE reached event for queue=0x%p\n",
+ queue);
+ break;
default:
pr_err("received IB QP event: %s (%d)\n",
ib_event_msg(event->event), event->event);
@@ -1282,18 +1581,20 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
ret = -ENOMEM;
goto put_device;
}
- queue->port = cm_id->context;
if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
}
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
if (ret) {
- schedule_work(&queue->release_work);
- /* Destroying rdma_cm id is not needed here */
- return 0;
+ /*
+ * Don't destroy the cm_id in free path, as we implicitly
+ * destroy the cm_id here with non-zero ret code.
+ */
+ queue->cm_id = NULL;
+ goto free_queue;
}
mutex_lock(&nvmet_rdma_queue_mutex);
@@ -1302,6 +1603,8 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
return 0;
+free_queue:
+ nvmet_rdma_free_queue(queue);
put_device:
kref_put(&ndev->ref, nvmet_rdma_free_dev);
@@ -1345,6 +1648,16 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
spin_lock_irqsave(&queue->state_lock, flags);
switch (queue->state) {
case NVMET_RDMA_Q_CONNECTING:
+ while (!list_empty(&queue->rsp_wait_list)) {
+ struct nvmet_rdma_rsp *rsp;
+
+ rsp = list_first_entry(&queue->rsp_wait_list,
+ struct nvmet_rdma_rsp,
+ wait_list);
+ list_del(&rsp->wait_list);
+ nvmet_rdma_put_rsp(rsp);
+ }
+ fallthrough;
case NVMET_RDMA_Q_LIVE:
queue->state = NVMET_RDMA_Q_DISCONNECTING;
disconnect = true;
@@ -1356,7 +1669,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
if (disconnect) {
rdma_disconnect(queue->cm_id);
- schedule_work(&queue->release_work);
+ queue_work(nvmet_wq, &queue->release_work);
}
}
@@ -1386,11 +1699,11 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
mutex_unlock(&nvmet_rdma_queue_mutex);
pr_err("failed to connect queue %d\n", queue->idx);
- schedule_work(&queue->release_work);
+ queue_work(nvmet_wq, &queue->release_work);
}
/**
- * nvme_rdma_device_removal() - Handle RDMA device removal
+ * nvmet_rdma_device_removal() - Handle RDMA device removal
* @cm_id: rdma_cm id, used for nvmet port
* @queue: nvmet rdma queue (cm id qp_context)
*
@@ -1407,7 +1720,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
struct nvmet_rdma_queue *queue)
{
- struct nvmet_port *port;
+ struct nvmet_rdma_port *port;
if (queue) {
/*
@@ -1426,7 +1739,7 @@ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
* cm_id destroy. use atomic xchg to make sure
* we don't compete with remove_port.
*/
- if (xchg(&port->priv, NULL) != cm_id)
+ if (xchg(&port->cm_id, NULL) != cm_id)
return 0;
/*
@@ -1457,6 +1770,13 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
nvmet_rdma_queue_established(queue);
break;
case RDMA_CM_EVENT_ADDR_CHANGE:
+ if (!queue) {
+ struct nvmet_rdma_port *port = cm_id->context;
+
+ queue_delayed_work(nvmet_wq, &port->repair_work, 0);
+ break;
+ }
+ fallthrough;
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
nvmet_rdma_queue_disconnect(queue);
@@ -1467,7 +1787,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_REJECTED:
pr_debug("Connection rejected: %s\n",
rdma_reject_msg(cm_id, event->status));
- /* FALLTHROUGH */
+ fallthrough;
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_CONNECT_ERROR:
nvmet_rdma_queue_connect_fail(cm_id, queue);
@@ -1499,42 +1819,43 @@ restart:
mutex_unlock(&nvmet_rdma_queue_mutex);
}
-static int nvmet_rdma_add_port(struct nvmet_port *port)
+static void nvmet_rdma_destroy_port_queues(struct nvmet_rdma_port *port)
{
- struct rdma_cm_id *cm_id;
- struct sockaddr_storage addr = { };
- __kernel_sa_family_t af;
- int ret;
+ struct nvmet_rdma_queue *queue, *tmp;
+ struct nvmet_port *nport = port->nport;
- switch (port->disc_addr.adrfam) {
- case NVMF_ADDR_FAMILY_IP4:
- af = AF_INET;
- break;
- case NVMF_ADDR_FAMILY_IP6:
- af = AF_INET6;
- break;
- default:
- pr_err("address family %d not supported\n",
- port->disc_addr.adrfam);
- return -EINVAL;
- }
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
+ queue_list) {
+ if (queue->port != nport)
+ continue;
- if (port->inline_data_size < 0) {
- port->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
- } else if (port->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
- pr_warn("inline_data_size %u is too large, reducing to %u\n",
- port->inline_data_size,
- NVMET_RDMA_MAX_INLINE_DATA_SIZE);
- port->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
+ list_del_init(&queue->queue_list);
+ __nvmet_rdma_queue_disconnect(queue);
}
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+}
- ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr,
- port->disc_addr.trsvcid, &addr);
- if (ret) {
- pr_err("malformed ip/port passed: %s:%s\n",
- port->disc_addr.traddr, port->disc_addr.trsvcid);
- return ret;
- }
+static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port)
+{
+ struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL);
+
+ if (cm_id)
+ rdma_destroy_id(cm_id);
+
+ /*
+ * Destroy the remaining queues, which are not belong to any
+ * controller yet. Do it here after the RDMA-CM was destroyed
+ * guarantees that no new queue will be created.
+ */
+ nvmet_rdma_destroy_port_queues(port);
+}
+
+static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
+{
+ struct sockaddr *addr = (struct sockaddr *)&port->addr;
+ struct rdma_cm_id *cm_id;
+ int ret;
cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
RDMA_PS_TCP, IB_QPT_RC);
@@ -1553,23 +1874,19 @@ static int nvmet_rdma_add_port(struct nvmet_port *port)
goto out_destroy_id;
}
- ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr);
+ ret = rdma_bind_addr(cm_id, addr);
if (ret) {
- pr_err("binding CM ID to %pISpcs failed (%d)\n",
- (struct sockaddr *)&addr, ret);
+ pr_err("binding CM ID to %pISpcs failed (%d)\n", addr, ret);
goto out_destroy_id;
}
ret = rdma_listen(cm_id, 128);
if (ret) {
- pr_err("listening to %pISpcs failed (%d)\n",
- (struct sockaddr *)&addr, ret);
+ pr_err("listening to %pISpcs failed (%d)\n", addr, ret);
goto out_destroy_id;
}
- pr_info("enabling port %d (%pISpcs)\n",
- le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr);
- port->priv = cm_id;
+ port->cm_id = cm_id;
return 0;
out_destroy_id:
@@ -1577,18 +1894,92 @@ out_destroy_id:
return ret;
}
-static void nvmet_rdma_remove_port(struct nvmet_port *port)
+static void nvmet_rdma_repair_port_work(struct work_struct *w)
{
- struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
+ struct nvmet_rdma_port *port = container_of(to_delayed_work(w),
+ struct nvmet_rdma_port, repair_work);
+ int ret;
- if (cm_id)
- rdma_destroy_id(cm_id);
+ nvmet_rdma_disable_port(port);
+ ret = nvmet_rdma_enable_port(port);
+ if (ret)
+ queue_delayed_work(nvmet_wq, &port->repair_work, 5 * HZ);
+}
+
+static int nvmet_rdma_add_port(struct nvmet_port *nport)
+{
+ struct nvmet_rdma_port *port;
+ __kernel_sa_family_t af;
+ int ret;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ nport->priv = port;
+ port->nport = nport;
+ INIT_DELAYED_WORK(&port->repair_work, nvmet_rdma_repair_port_work);
+
+ switch (nport->disc_addr.adrfam) {
+ case NVMF_ADDR_FAMILY_IP4:
+ af = AF_INET;
+ break;
+ case NVMF_ADDR_FAMILY_IP6:
+ af = AF_INET6;
+ break;
+ default:
+ pr_err("address family %d not supported\n",
+ nport->disc_addr.adrfam);
+ ret = -EINVAL;
+ goto out_free_port;
+ }
+
+ if (nport->inline_data_size < 0) {
+ nport->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
+ } else if (nport->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
+ pr_warn("inline_data_size %u is too large, reducing to %u\n",
+ nport->inline_data_size,
+ NVMET_RDMA_MAX_INLINE_DATA_SIZE);
+ nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
+ }
+
+ ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
+ nport->disc_addr.trsvcid, &port->addr);
+ if (ret) {
+ pr_err("malformed ip/port passed: %s:%s\n",
+ nport->disc_addr.traddr, nport->disc_addr.trsvcid);
+ goto out_free_port;
+ }
+
+ ret = nvmet_rdma_enable_port(port);
+ if (ret)
+ goto out_free_port;
+
+ pr_info("enabling port %d (%pISpcs)\n",
+ le16_to_cpu(nport->disc_addr.portid),
+ (struct sockaddr *)&port->addr);
+
+ return 0;
+
+out_free_port:
+ kfree(port);
+ return ret;
+}
+
+static void nvmet_rdma_remove_port(struct nvmet_port *nport)
+{
+ struct nvmet_rdma_port *port = nport->priv;
+
+ cancel_delayed_work_sync(&port->repair_work);
+ nvmet_rdma_disable_port(port);
+ kfree(port);
}
static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
- struct nvmet_port *port, char *traddr)
+ struct nvmet_port *nport, char *traddr)
{
- struct rdma_cm_id *cm_id = port->priv;
+ struct nvmet_rdma_port *port = nport->priv;
+ struct rdma_cm_id *cm_id = port->cm_id;
if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) {
struct nvmet_rdma_rsp *rsp =
@@ -1598,20 +1989,34 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
sprintf(traddr, "%pISc", addr);
} else {
- memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
+ memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
}
}
+static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
+{
+ if (ctrl->pi_support)
+ return NVMET_RDMA_MAX_METADATA_MDTS;
+ return NVMET_RDMA_MAX_MDTS;
+}
+
+static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl)
+{
+ return NVME_RDMA_MAX_QUEUE_SIZE;
+}
+
static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_RDMA,
.msdbd = 1,
- .has_keyed_sgls = 1,
+ .flags = NVMF_KEYED_SGLS | NVMF_METADATA_SUPPORTED,
.add_port = nvmet_rdma_add_port,
.remove_port = nvmet_rdma_remove_port,
.queue_response = nvmet_rdma_queue_response,
.delete_ctrl = nvmet_rdma_delete_ctrl,
.disc_traddr = nvmet_rdma_disc_port_addr,
+ .get_mdts = nvmet_rdma_get_mdts,
+ .get_max_queue_size = nvmet_rdma_get_max_queue_size,
};
static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
@@ -1648,7 +2053,7 @@ static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data
}
mutex_unlock(&nvmet_rdma_queue_mutex);
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
}
static struct ib_client nvmet_rdma_ib_client = {
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 5bb5342b8d0c..6c1476e086ef 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -19,6 +19,26 @@
#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
+/* Define the socket priority to use for connections were it is desirable
+ * that the NIC consider performing optimized packet processing or filtering.
+ * A non-zero value being sufficient to indicate general consideration of any
+ * possible optimization. Making it a module param allows for alternative
+ * values that may be unique for some NIC implementations.
+ */
+static int so_priority;
+module_param(so_priority, int, 0644);
+MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority");
+
+/* Define a time period (in usecs) that io_work() shall sample an activated
+ * queue before determining it to be idle. This optional module behavior
+ * can enable NIC solutions that support socket optimized packet processing
+ * using advanced interrupt moderation techniques.
+ */
+static int idle_poll_period_usecs;
+module_param(idle_poll_period_usecs, int, 0644);
+MODULE_PARM_DESC(idle_poll_period_usecs,
+ "nvmet tcp io_work poll till idle time period in usecs");
+
#define NVMET_TCP_RECV_BUDGET 8
#define NVMET_TCP_SEND_BUDGET 8
#define NVMET_TCP_IO_WORK_BUDGET 64
@@ -57,9 +77,8 @@ struct nvmet_tcp_cmd {
u32 pdu_len;
u32 pdu_recv;
int sg_idx;
- int nr_mapped;
struct msghdr recv_msg;
- struct kvec *iov;
+ struct bio_vec *iov;
u32 flags;
struct list_head entry;
@@ -84,7 +103,6 @@ struct nvmet_tcp_queue {
struct socket *sock;
struct nvmet_tcp_port *port;
struct work_struct io_work;
- int cpu;
struct nvmet_cq nvme_cq;
struct nvmet_sq nvme_sq;
@@ -110,6 +128,8 @@ struct nvmet_tcp_queue {
struct ahash_request *snd_hash;
struct ahash_request *rcv_hash;
+ unsigned long poll_end;
+
spinlock_t state_lock;
enum nvmet_tcp_queue_state state;
@@ -134,7 +154,6 @@ struct nvmet_tcp_port {
struct work_struct accept_work;
struct nvmet_port *nport;
struct sockaddr_storage addr;
- int last_cpu;
void (*data_ready)(struct sock *);
};
@@ -143,13 +162,18 @@ static LIST_HEAD(nvmet_tcp_queue_list);
static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
static struct workqueue_struct *nvmet_tcp_wq;
-static struct nvmet_fabrics_ops nvmet_tcp_ops;
+static const struct nvmet_fabrics_ops nvmet_tcp_ops;
static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
-static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
+static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
struct nvmet_tcp_cmd *cmd)
{
+ if (unlikely(!queue->nr_cmds)) {
+ /* We didn't allocate cmds yet, send 0xffff */
+ return USHRT_MAX;
+ }
+
return cmd - queue->cmds;
}
@@ -204,6 +228,11 @@ static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
list_add_tail(&cmd->entry, &cmd->queue->free_list);
}
+static inline int queue_cpu(struct nvmet_tcp_queue *queue)
+{
+ return queue->sock->sk->sk_incoming_cpu;
+}
+
static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
{
return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
@@ -267,43 +296,43 @@ static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
return 0;
}
-static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
+static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
{
- struct scatterlist *sg;
- int i;
-
- sg = &cmd->req.sg[cmd->sg_idx];
-
- for (i = 0; i < cmd->nr_mapped; i++)
- kunmap(sg_page(&sg[i]));
+ kfree(cmd->iov);
+ sgl_free(cmd->req.sg);
+ cmd->iov = NULL;
+ cmd->req.sg = NULL;
}
-static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
+static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
{
- struct kvec *iov = cmd->iov;
+ struct bio_vec *iov = cmd->iov;
struct scatterlist *sg;
u32 length, offset, sg_offset;
+ int nr_pages;
length = cmd->pdu_len;
- cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
+ nr_pages = DIV_ROUND_UP(length, PAGE_SIZE);
offset = cmd->rbytes_done;
- cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE);
+ cmd->sg_idx = offset / PAGE_SIZE;
sg_offset = offset % PAGE_SIZE;
sg = &cmd->req.sg[cmd->sg_idx];
while (length) {
u32 iov_len = min_t(u32, length, sg->length - sg_offset);
- iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset;
- iov->iov_len = iov_len;
+ iov->bv_page = sg_page(sg);
+ iov->bv_len = sg->length;
+ iov->bv_offset = sg->offset + sg_offset;
length -= iov_len;
sg = sg_next(sg);
iov++;
+ sg_offset = 0;
}
- iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
- cmd->nr_mapped, cmd->pdu_len);
+ iov_iter_bvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
+ nr_pages, cmd->pdu_len);
}
static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
@@ -315,6 +344,14 @@ static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
}
+static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
+{
+ if (status == -EPIPE || status == -ECONNRESET)
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+ else
+ nvmet_tcp_fatal_error(queue);
+}
+
static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
{
struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
@@ -348,11 +385,11 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
return 0;
err:
- sgl_free(cmd->req.sg);
+ nvmet_tcp_free_cmd_buffers(cmd);
return NVME_SC_INTERNAL;
}
-static void nvmet_tcp_ddgst(struct ahash_request *hash,
+static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
struct nvmet_tcp_cmd *cmd)
{
ahash_request_set_crypt(hash, cmd->req.sg,
@@ -384,7 +421,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
if (queue->data_digest) {
pdu->hdr.flags |= NVME_TCP_F_DDGST;
- nvmet_tcp_ddgst(queue->snd_hash, cmd);
+ nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
}
if (cmd->queue->hdr_digest) {
@@ -441,17 +478,11 @@ static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
{
struct llist_node *node;
+ struct nvmet_tcp_cmd *cmd;
- node = llist_del_all(&queue->resp_list);
- if (!node)
- return;
-
- while (node) {
- struct nvmet_tcp_cmd *cmd = llist_entry(node,
- struct nvmet_tcp_cmd, lentry);
-
+ for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
+ cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
list_add(&cmd->entry, &queue->resp_send_list);
- node = node->next;
queue->send_list_len++;
}
}
@@ -487,9 +518,34 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
struct nvmet_tcp_cmd *cmd =
container_of(req, struct nvmet_tcp_cmd, req);
struct nvmet_tcp_queue *queue = cmd->queue;
+ struct nvme_sgl_desc *sgl;
+ u32 len;
+
+ if (unlikely(cmd == queue->cmd)) {
+ sgl = &cmd->req.cmd->common.dptr.sgl;
+ len = le32_to_cpu(sgl->length);
+
+ /*
+ * Wait for inline data before processing the response.
+ * Avoid using helpers, this might happen before
+ * nvmet_req_init is completed.
+ */
+ if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
+ len && len <= cmd->req.port->inline_data_size &&
+ nvme_is_write(cmd->req.cmd))
+ return;
+ }
llist_add(&cmd->lentry, &queue->resp_list);
- queue_work_on(cmd->queue->cpu, nvmet_tcp_wq, &cmd->queue->io_work);
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
+}
+
+static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
+{
+ if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
+ nvmet_tcp_queue_response(&cmd->req);
+ else
+ cmd->req.execute(&cmd->req);
}
static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
@@ -500,7 +556,7 @@ static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
offset_in_page(cmd->data_pdu) + cmd->offset,
- left, MSG_DONTWAIT | MSG_MORE);
+ left, MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
if (ret <= 0)
return ret;
@@ -528,7 +584,7 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
if ((!last_in_batch && cmd->queue->send_list_len) ||
cmd->wbytes_done + left < cmd->req.transfer_len ||
queue->data_digest || !queue->nvme_sq.sqhd_disabled)
- flags |= MSG_MORE;
+ flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
left, flags);
@@ -557,10 +613,8 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
}
}
- if (queue->nvme_sq.sqhd_disabled) {
- kfree(cmd->iov);
- sgl_free(cmd->req.sg);
- }
+ if (queue->nvme_sq.sqhd_disabled)
+ nvmet_tcp_free_cmd_buffers(cmd);
return 1;
@@ -575,7 +629,7 @@ static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
int ret;
if (!last_in_batch && cmd->queue->send_list_len)
- flags |= MSG_MORE;
+ flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
else
flags |= MSG_EOR;
@@ -589,8 +643,7 @@ static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
if (left)
return -EAGAIN;
- kfree(cmd->iov);
- sgl_free(cmd->req.sg);
+ nvmet_tcp_free_cmd_buffers(cmd);
cmd->queue->snd_cmd = NULL;
nvmet_tcp_put_cmd(cmd);
return 1;
@@ -604,7 +657,7 @@ static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
int ret;
if (!last_in_batch && cmd->queue->send_list_len)
- flags |= MSG_MORE;
+ flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
else
flags |= MSG_EOR;
@@ -622,21 +675,31 @@ static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
return 1;
}
-static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
+static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
{
struct nvmet_tcp_queue *queue = cmd->queue;
+ int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
struct kvec iov = {
- .iov_base = &cmd->exp_ddgst + cmd->offset,
- .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
+ .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
+ .iov_len = left
};
int ret;
+ if (!last_in_batch && cmd->queue->send_list_len)
+ msg.msg_flags |= MSG_MORE;
+ else
+ msg.msg_flags |= MSG_EOR;
+
ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
if (unlikely(ret <= 0))
return ret;
cmd->offset += ret;
+ left -= ret;
+
+ if (left)
+ return -EAGAIN;
if (queue->nvme_sq.sqhd_disabled) {
cmd->queue->snd_cmd = NULL;
@@ -672,7 +735,7 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
}
if (cmd->state == NVMET_TCP_SEND_DDGST) {
- ret = nvmet_try_send_ddgst(cmd);
+ ret = nvmet_try_send_ddgst(cmd, last_in_batch);
if (ret <= 0)
goto done_send;
}
@@ -703,11 +766,15 @@ static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
for (i = 0; i < budget; i++) {
ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
- if (ret <= 0)
+ if (unlikely(ret < 0)) {
+ nvmet_tcp_socket_error(queue, ret);
+ goto done;
+ } else if (ret == 0) {
break;
+ }
(*sends)++;
}
-
+done:
return ret;
}
@@ -794,7 +861,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
icresp->hdr.pdo = 0;
icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
- icresp->maxdata = cpu_to_le32(0xffff); /* FIXME: support r2t */
+ icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
icresp->cpda = 0;
if (queue->hdr_digest)
icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
@@ -822,7 +889,14 @@ static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
int ret;
- if (!nvme_is_write(cmd->req.cmd) ||
+ /*
+ * This command has not been processed yet, hence we are trying to
+ * figure out if there is still pending data left to receive. If
+ * we don't, we can simply prepare for the next pdu and bail out,
+ * otherwise we will need to prepare a buffer and receive the
+ * stale data before continuing forward.
+ */
+ if (!nvme_is_write(cmd->req.cmd) || !data_len ||
data_len > cmd->req.port->inline_data_size) {
nvmet_prepare_receive_pdu(queue);
return;
@@ -836,7 +910,7 @@ static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
}
queue->rcv_state = NVMET_TCP_RECV_DATA;
- nvmet_tcp_map_pdu_iovec(cmd);
+ nvmet_tcp_build_pdu_iovec(cmd);
cmd->flags |= NVMET_TCP_F_INIT_FAILED;
}
@@ -845,7 +919,17 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
struct nvme_tcp_data_pdu *data = &queue->pdu.data;
struct nvmet_tcp_cmd *cmd;
- cmd = &queue->cmds[data->ttag];
+ if (likely(queue->nr_cmds)) {
+ if (unlikely(data->ttag >= queue->nr_cmds)) {
+ pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
+ queue->idx, data->ttag, queue->nr_cmds);
+ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
+ }
+ cmd = &queue->cmds[data->ttag];
+ } else {
+ cmd = &queue->connect;
+ }
if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
pr_err("ttag %u unexpected data offset %u (expected %u)\n",
@@ -859,7 +943,7 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
cmd->pdu_len = le32_to_cpu(data->data_length);
cmd->pdu_recv = 0;
- nvmet_tcp_map_pdu_iovec(cmd);
+ nvmet_tcp_build_pdu_iovec(cmd);
queue->cmd = cmd;
queue->rcv_state = NVMET_TCP_RECV_DATA;
@@ -883,6 +967,13 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
return nvmet_tcp_handle_icreq(queue);
}
+ if (unlikely(hdr->type == nvme_tcp_icreq)) {
+ pr_err("queue %d: received icreq pdu in state %d\n",
+ queue->idx, queue->state);
+ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
+ }
+
if (hdr->type == nvme_tcp_h2c_data) {
ret = nvmet_tcp_handle_h2c_data_pdu(queue);
if (unlikely(ret))
@@ -911,7 +1002,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
le32_to_cpu(req->cmd->common.dptr.sgl.length));
nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
- return -EAGAIN;
+ return 0;
}
ret = nvmet_tcp_map_data(queue->cmd);
@@ -928,7 +1019,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
if (nvmet_tcp_need_data_in(queue->cmd)) {
if (nvmet_tcp_has_inline_data(queue->cmd)) {
queue->rcv_state = NVMET_TCP_RECV_DATA;
- nvmet_tcp_map_pdu_iovec(queue->cmd);
+ nvmet_tcp_build_pdu_iovec(queue->cmd);
return 0;
}
/* send back R2T */
@@ -1009,7 +1100,7 @@ recv:
}
if (queue->hdr_digest &&
- nvmet_tcp_verify_hdgst(queue, &queue->pdu, queue->offset)) {
+ nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
nvmet_tcp_fatal_error(queue); /* fatal */
return -EPROTO;
}
@@ -1027,7 +1118,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
{
struct nvmet_tcp_queue *queue = cmd->queue;
- nvmet_tcp_ddgst(queue->rcv_hash, cmd);
+ nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
queue->offset = 0;
queue->left = NVME_TCP_DIGEST_LENGTH;
queue->rcv_state = NVMET_TCP_RECV_DDGST;
@@ -1048,17 +1139,14 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
cmd->rbytes_done += ret;
}
- nvmet_tcp_unmap_pdu_iovec(cmd);
-
- if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
- cmd->rbytes_done == cmd->req.transfer_len) {
- if (queue->data_digest) {
- nvmet_tcp_prep_recv_ddgst(cmd);
- return 0;
- }
- cmd->req.execute(&cmd->req);
+ if (queue->data_digest) {
+ nvmet_tcp_prep_recv_ddgst(cmd);
+ return 0;
}
+ if (cmd->rbytes_done == cmd->req.transfer_len)
+ nvmet_tcp_execute_request(cmd);
+
nvmet_prepare_receive_pdu(queue);
return 0;
}
@@ -1088,15 +1176,16 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
queue->idx, cmd->req.cmd->common.command_id,
queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
le32_to_cpu(cmd->exp_ddgst));
- nvmet_tcp_finish_cmd(cmd);
+ nvmet_req_uninit(&cmd->req);
+ nvmet_tcp_free_cmd_buffers(cmd);
nvmet_tcp_fatal_error(queue);
ret = -EPROTO;
goto out;
}
- if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
- cmd->rbytes_done == cmd->req.transfer_len)
- cmd->req.execute(&cmd->req);
+ if (cmd->rbytes_done == cmd->req.transfer_len)
+ nvmet_tcp_execute_request(cmd);
+
ret = 0;
out:
nvmet_prepare_receive_pdu(queue);
@@ -1144,11 +1233,15 @@ static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
for (i = 0; i < budget; i++) {
ret = nvmet_tcp_try_recv_one(queue);
- if (ret <= 0)
+ if (unlikely(ret < 0)) {
+ nvmet_tcp_socket_error(queue, ret);
+ goto done;
+ } else if (ret == 0) {
break;
+ }
(*recvs)++;
}
-
+done:
return ret;
}
@@ -1157,11 +1250,28 @@ static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
spin_lock(&queue->state_lock);
if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
queue->state = NVMET_TCP_Q_DISCONNECTING;
- schedule_work(&queue->release_work);
+ queue_work(nvmet_wq, &queue->release_work);
}
spin_unlock(&queue->state_lock);
}
+static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
+{
+ queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs);
+}
+
+static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
+ int ops)
+{
+ if (!idle_poll_period_usecs)
+ return false;
+
+ if (ops)
+ nvmet_tcp_arm_queue_deadline(queue);
+
+ return !time_after(jiffies, queue->poll_end);
+}
+
static void nvmet_tcp_io_work(struct work_struct *w)
{
struct nvmet_tcp_queue *queue =
@@ -1173,35 +1283,25 @@ static void nvmet_tcp_io_work(struct work_struct *w)
pending = false;
ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
- if (ret > 0) {
+ if (ret > 0)
pending = true;
- } else if (ret < 0) {
- if (ret == -EPIPE || ret == -ECONNRESET)
- kernel_sock_shutdown(queue->sock, SHUT_RDWR);
- else
- nvmet_tcp_fatal_error(queue);
+ else if (ret < 0)
return;
- }
ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
- if (ret > 0) {
- /* transmitted message/data */
+ if (ret > 0)
pending = true;
- } else if (ret < 0) {
- if (ret == -EPIPE || ret == -ECONNRESET)
- kernel_sock_shutdown(queue->sock, SHUT_RDWR);
- else
- nvmet_tcp_fatal_error(queue);
+ else if (ret < 0)
return;
- }
} while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
/*
- * We exahusted our budget, requeue our selves
+ * Requeue the worker if idle deadline period is in progress or any
+ * ops activity was recorded during the do-while loop above.
*/
- if (pending)
- queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+ if (nvmet_tcp_check_queue_deadline(queue, ops) || pending)
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
}
static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
@@ -1306,14 +1406,6 @@ static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
write_unlock_bh(&sock->sk->sk_callback_lock);
}
-static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
-{
- nvmet_req_uninit(&cmd->req);
- nvmet_tcp_unmap_pdu_iovec(cmd);
- kfree(cmd->iov);
- sgl_free(cmd->req.sg);
-}
-
static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
{
struct nvmet_tcp_cmd *cmd = queue->cmds;
@@ -1321,17 +1413,32 @@ static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
for (i = 0; i < queue->nr_cmds; i++, cmd++) {
if (nvmet_tcp_need_data_in(cmd))
- nvmet_tcp_finish_cmd(cmd);
+ nvmet_req_uninit(&cmd->req);
}
if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
/* failed in connect */
- nvmet_tcp_finish_cmd(&queue->connect);
+ nvmet_req_uninit(&queue->connect.req);
}
}
+static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
+{
+ struct nvmet_tcp_cmd *cmd = queue->cmds;
+ int i;
+
+ for (i = 0; i < queue->nr_cmds; i++, cmd++) {
+ if (nvmet_tcp_need_data_in(cmd))
+ nvmet_tcp_free_cmd_buffers(cmd);
+ }
+
+ if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
+ nvmet_tcp_free_cmd_buffers(&queue->connect);
+}
+
static void nvmet_tcp_release_queue_work(struct work_struct *w)
{
+ struct page *page;
struct nvmet_tcp_queue *queue =
container_of(w, struct nvmet_tcp_queue, release_work);
@@ -1340,17 +1447,22 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
mutex_unlock(&nvmet_tcp_queue_mutex);
nvmet_tcp_restore_socket_callbacks(queue);
- flush_work(&queue->io_work);
+ cancel_work_sync(&queue->io_work);
+ /* stop accepting incoming data */
+ queue->rcv_state = NVMET_TCP_RECV_ERR;
nvmet_tcp_uninit_data_in_cmds(queue);
nvmet_sq_destroy(&queue->nvme_sq);
cancel_work_sync(&queue->io_work);
+ nvmet_tcp_free_cmd_data_in_buffers(queue);
sock_release(queue->sock);
nvmet_tcp_free_cmds(queue);
if (queue->hdr_digest || queue->data_digest)
nvmet_tcp_free_crypto(queue);
- ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
+ ida_free(&nvmet_tcp_queue_ida, queue->idx);
+ page = virt_to_head_page(queue->pf_cache.va);
+ __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
kfree(queue);
}
@@ -1361,7 +1473,7 @@ static void nvmet_tcp_data_ready(struct sock *sk)
read_lock_bh(&sk->sk_callback_lock);
queue = sk->sk_user_data;
if (likely(queue))
- queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
read_unlock_bh(&sk->sk_callback_lock);
}
@@ -1381,7 +1493,7 @@ static void nvmet_tcp_write_space(struct sock *sk)
if (sk_stream_is_writeable(sk)) {
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
}
out:
read_unlock_bh(&sk->sk_callback_lock);
@@ -1391,17 +1503,19 @@ static void nvmet_tcp_state_change(struct sock *sk)
{
struct nvmet_tcp_queue *queue;
- write_lock_bh(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
queue = sk->sk_user_data;
if (!queue)
goto done;
switch (sk->sk_state) {
+ case TCP_FIN_WAIT2:
+ case TCP_LAST_ACK:
+ break;
case TCP_FIN_WAIT1:
case TCP_CLOSE_WAIT:
case TCP_CLOSE:
/* FALLTHRU */
- sk->sk_user_data = NULL;
nvmet_tcp_schedule_release_queue(queue);
break;
default:
@@ -1409,14 +1523,13 @@ static void nvmet_tcp_state_change(struct sock *sk)
queue->idx, sk->sk_state);
}
done:
- write_unlock_bh(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
{
struct socket *sock = queue->sock;
struct inet_sock *inet = inet_sk(sock->sk);
- struct linger sol = { .l_onoff = 1, .l_linger = 0 };
int ret;
ret = kernel_getsockname(sock,
@@ -1434,32 +1547,38 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
* close. This is done to prevent stale data from being sent should
* the network connection be restored before TCP times out.
*/
- ret = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
- (char *)&sol, sizeof(sol));
- if (ret)
- return ret;
+ sock_no_linger(sock->sk);
- /* Set socket type of service */
- if (inet->rcv_tos > 0) {
- int tos = inet->rcv_tos;
+ if (so_priority > 0)
+ sock_set_priority(sock->sk, so_priority);
- ret = kernel_setsockopt(sock, SOL_IP, IP_TOS,
- (char *)&tos, sizeof(tos));
- if (ret)
- return ret;
- }
+ /* Set socket type of service */
+ if (inet->rcv_tos > 0)
+ ip_sock_set_tos(sock->sk, inet->rcv_tos);
+ ret = 0;
write_lock_bh(&sock->sk->sk_callback_lock);
- sock->sk->sk_user_data = queue;
- queue->data_ready = sock->sk->sk_data_ready;
- sock->sk->sk_data_ready = nvmet_tcp_data_ready;
- queue->state_change = sock->sk->sk_state_change;
- sock->sk->sk_state_change = nvmet_tcp_state_change;
- queue->write_space = sock->sk->sk_write_space;
- sock->sk->sk_write_space = nvmet_tcp_write_space;
+ if (sock->sk->sk_state != TCP_ESTABLISHED) {
+ /*
+ * If the socket is already closing, don't even start
+ * consuming it
+ */
+ ret = -ENOTCONN;
+ } else {
+ sock->sk->sk_user_data = queue;
+ queue->data_ready = sock->sk->sk_data_ready;
+ sock->sk->sk_data_ready = nvmet_tcp_data_ready;
+ queue->state_change = sock->sk->sk_state_change;
+ sock->sk->sk_state_change = nvmet_tcp_state_change;
+ queue->write_space = sock->sk->sk_write_space;
+ sock->sk->sk_write_space = nvmet_tcp_write_space;
+ if (idle_poll_period_usecs)
+ nvmet_tcp_arm_queue_deadline(queue);
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
+ }
write_unlock_bh(&sock->sk->sk_callback_lock);
- return 0;
+ return ret;
}
static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
@@ -1483,7 +1602,7 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
init_llist_head(&queue->resp_list);
INIT_LIST_HEAD(&queue->resp_send_list);
- queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL);
+ queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
if (queue->idx < 0) {
ret = queue->idx;
goto out_free_queue;
@@ -1497,9 +1616,6 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
if (ret)
goto out_free_connect;
- port->last_cpu = cpumask_next_wrap(port->last_cpu,
- cpu_online_mask, -1, false);
- queue->cpu = port->last_cpu;
nvmet_prepare_receive_pdu(queue);
mutex_lock(&nvmet_tcp_queue_mutex);
@@ -1510,8 +1626,6 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
if (ret)
goto out_destroy_sq;
- queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
-
return 0;
out_destroy_sq:
mutex_lock(&nvmet_tcp_queue_mutex);
@@ -1521,7 +1635,7 @@ out_destroy_sq:
out_free_connect:
nvmet_tcp_free_cmd(&queue->connect);
out_ida_remove:
- ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
+ ida_free(&nvmet_tcp_queue_ida, queue->idx);
out_free_queue:
kfree(queue);
return ret;
@@ -1559,7 +1673,7 @@ static void nvmet_tcp_listen_data_ready(struct sock *sk)
goto out;
if (sk->sk_state == TCP_LISTEN)
- schedule_work(&port->accept_work);
+ queue_work(nvmet_wq, &port->accept_work);
out:
read_unlock_bh(&sk->sk_callback_lock);
}
@@ -1568,7 +1682,7 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport)
{
struct nvmet_tcp_port *port;
__kernel_sa_family_t af;
- int opt, ret;
+ int ret;
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
@@ -1597,7 +1711,6 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport)
}
port->nport = nport;
- port->last_cpu = -1;
INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
if (port->nport->inline_data_size < 0)
port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
@@ -1612,21 +1725,10 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport)
port->sock->sk->sk_user_data = port;
port->data_ready = port->sock->sk->sk_data_ready;
port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
-
- opt = 1;
- ret = kernel_setsockopt(port->sock, IPPROTO_TCP,
- TCP_NODELAY, (char *)&opt, sizeof(opt));
- if (ret) {
- pr_err("failed to set TCP_NODELAY sock opt %d\n", ret);
- goto err_sock;
- }
-
- ret = kernel_setsockopt(port->sock, SOL_SOCKET, SO_REUSEADDR,
- (char *)&opt, sizeof(opt));
- if (ret) {
- pr_err("failed to set SO_REUSEADDR sock opt %d\n", ret);
- goto err_sock;
- }
+ sock_set_reuseaddr(port->sock->sk);
+ tcp_sock_set_nodelay(port->sock->sk);
+ if (so_priority > 0)
+ sock_set_priority(port->sock->sk, so_priority);
ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
sizeof(port->addr));
@@ -1654,6 +1756,17 @@ err_port:
return ret;
}
+static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
+{
+ struct nvmet_tcp_queue *queue;
+
+ mutex_lock(&nvmet_tcp_queue_mutex);
+ list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
+ if (queue->port == port)
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+ mutex_unlock(&nvmet_tcp_queue_mutex);
+}
+
static void nvmet_tcp_remove_port(struct nvmet_port *nport)
{
struct nvmet_tcp_port *port = nport->priv;
@@ -1663,6 +1776,11 @@ static void nvmet_tcp_remove_port(struct nvmet_port *nport)
port->sock->sk->sk_user_data = NULL;
write_unlock_bh(&port->sock->sk->sk_callback_lock);
cancel_work_sync(&port->accept_work);
+ /*
+ * Destroy the remaining queues, which are not belong to any
+ * controller yet.
+ */
+ nvmet_tcp_destroy_port_queues(port);
sock_release(port->sock);
kfree(port);
@@ -1686,7 +1804,7 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
if (sq->qid == 0) {
/* Let inflight controller teardown complete */
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
}
queue->nr_cmds = sq->size * 2;
@@ -1711,11 +1829,10 @@ static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
}
}
-static struct nvmet_fabrics_ops nvmet_tcp_ops = {
+static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_TCP,
.msdbd = 1,
- .has_keyed_sgls = 0,
.add_port = nvmet_tcp_add_port,
.remove_port = nvmet_tcp_remove_port,
.queue_response = nvmet_tcp_queue_response,
@@ -1728,7 +1845,8 @@ static int __init nvmet_tcp_init(void)
{
int ret;
- nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0);
+ nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
if (!nvmet_tcp_wq)
return -ENOMEM;
@@ -1748,12 +1866,12 @@ static void __exit nvmet_tcp_exit(void)
nvmet_unregister_transport(&nvmet_tcp_ops);
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
mutex_lock(&nvmet_tcp_queue_mutex);
list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
mutex_unlock(&nvmet_tcp_queue_mutex);
- flush_scheduled_work();
+ flush_workqueue(nvmet_wq);
destroy_workqueue(nvmet_tcp_wq);
}
diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c
index 1373a3c67962..bff454d46255 100644
--- a/drivers/nvme/target/trace.c
+++ b/drivers/nvme/target/trace.c
@@ -27,7 +27,7 @@ static const char *nvmet_trace_admin_get_features(struct trace_seq *p,
u8 sel = cdw10[1] & 0x7;
u32 cdw11 = get_unaligned_le32(cdw10 + 4);
- trace_seq_printf(p, "fid=0x%x sel=0x%x cdw11=0x%x", fid, sel, cdw11);
+ trace_seq_printf(p, "fid=0x%x, sel=0x%x, cdw11=0x%x", fid, sel, cdw11);
trace_seq_putc(p, 0);
return ret;
@@ -49,6 +49,20 @@ static const char *nvmet_trace_get_lba_status(struct trace_seq *p,
return ret;
}
+static const char *nvmet_trace_admin_set_features(struct trace_seq *p,
+ u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 fid = cdw10[0];
+ u8 sv = cdw10[3] & 0x8;
+ u32 cdw11 = get_unaligned_le32(cdw10 + 4);
+
+ trace_seq_printf(p, "fid=0x%x, sv=0x%x, cdw11=0x%x", fid, sv, cdw11);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
static const char *nvmet_trace_read_write(struct trace_seq *p, u8 *cdw10)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -94,6 +108,8 @@ const char *nvmet_trace_parse_admin_cmd(struct trace_seq *p,
switch (opcode) {
case nvme_admin_identify:
return nvmet_trace_admin_identify(p, cdw10);
+ case nvme_admin_set_features:
+ return nvmet_trace_admin_set_features(p, cdw10);
case nvme_admin_get_features:
return nvmet_trace_admin_get_features(p, cdw10);
case nvme_admin_get_lba_status:
diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h
index e645caa882dd..6109b3806b12 100644
--- a/drivers/nvme/target/trace.h
+++ b/drivers/nvme/target/trace.h
@@ -46,19 +46,15 @@ static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
return req->sq->ctrl;
}
-static inline void __assign_disk_name(char *name, struct nvmet_req *req,
- bool init)
+static inline void __assign_req_name(char *name, struct nvmet_req *req)
{
- struct nvmet_ctrl *ctrl = nvmet_req_to_ctrl(req);
- struct nvmet_ns *ns;
-
- if ((init && req->sq->qid) || (!init && req->cq->qid)) {
- ns = nvmet_find_namespace(ctrl, req->cmd->rw.nsid);
- strncpy(name, ns->device_path, DISK_NAME_LEN);
+ if (!req->ns) {
+ memset(name, 0, DISK_NAME_LEN);
return;
}
- memset(name, 0, DISK_NAME_LEN);
+ strncpy(name, req->ns->device_path,
+ min_t(size_t, DISK_NAME_LEN, strlen(req->ns->device_path)));
}
#endif
@@ -81,7 +77,7 @@ TRACE_EVENT(nvmet_req_init,
TP_fast_assign(
__entry->cmd = cmd;
__entry->ctrl = nvmet_req_to_ctrl(req);
- __assign_disk_name(__entry->disk, req, true);
+ __assign_req_name(__entry->disk, req);
__entry->qid = req->sq->qid;
__entry->cid = cmd->common.command_id;
__entry->opcode = cmd->common.opcode;
@@ -121,7 +117,7 @@ TRACE_EVENT(nvmet_req_complete,
__entry->cid = req->cqe->command_id;
__entry->result = le64_to_cpu(req->cqe->result.u64);
__entry->status = le16_to_cpu(req->cqe->status) >> 1;
- __assign_disk_name(__entry->disk, req, false);
+ __assign_req_name(__entry->disk, req);
),
TP_printk("nvmet%s: %sqid=%d, cmdid=%u, res=%#llx, status=%#x",
__print_ctrl_name(__entry->ctrl),
@@ -130,6 +126,34 @@ TRACE_EVENT(nvmet_req_complete,
);
+#define aer_name(aer) { aer, #aer }
+
+TRACE_EVENT(nvmet_async_event,
+ TP_PROTO(struct nvmet_ctrl *ctrl, __le32 result),
+ TP_ARGS(ctrl, result),
+ TP_STRUCT__entry(
+ __field(int, ctrl_id)
+ __field(u32, result)
+ ),
+ TP_fast_assign(
+ __entry->ctrl_id = ctrl->cntlid;
+ __entry->result = (le32_to_cpu(result) & 0xff00) >> 8;
+ ),
+ TP_printk("nvmet%d: NVME_AEN=%#08x [%s]",
+ __entry->ctrl_id, __entry->result,
+ __print_symbolic(__entry->result,
+ aer_name(NVME_AER_NOTICE_NS_CHANGED),
+ aer_name(NVME_AER_NOTICE_ANA),
+ aer_name(NVME_AER_NOTICE_FW_ACT_STARTING),
+ aer_name(NVME_AER_NOTICE_DISC_CHANGED),
+ aer_name(NVME_AER_ERROR),
+ aer_name(NVME_AER_SMART),
+ aer_name(NVME_AER_CSS),
+ aer_name(NVME_AER_VS))
+ )
+);
+#undef aer_name
+
#endif /* _TRACE_NVMET_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
new file mode 100644
index 000000000000..1254cf57e008
--- /dev/null
+++ b/drivers/nvme/target/zns.c
@@ -0,0 +1,627 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe ZNS-ZBD command implementation.
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/nvme.h>
+#include <linux/blkdev.h>
+#include "nvmet.h"
+
+/*
+ * We set the Memory Page Size Minimum (MPSMIN) for target controller to 0
+ * which gets added by 12 in the nvme_enable_ctrl() which results in 2^12 = 4k
+ * as page_shift value. When calculating the ZASL use shift by 12.
+ */
+#define NVMET_MPSMIN_SHIFT 12
+
+static inline u8 nvmet_zasl(unsigned int zone_append_sects)
+{
+ /*
+ * Zone Append Size Limit (zasl) is expressed as a power of 2 value
+ * with the minimum memory page size (i.e. 12) as unit.
+ */
+ return ilog2(zone_append_sects >> (NVMET_MPSMIN_SHIFT - 9));
+}
+
+static int validate_conv_zones_cb(struct blk_zone *z,
+ unsigned int i, void *data)
+{
+ if (z->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
+{
+ u8 zasl = nvmet_zasl(bdev_max_zone_append_sectors(ns->bdev));
+ struct gendisk *bd_disk = ns->bdev->bd_disk;
+ int ret;
+
+ if (ns->subsys->zasl) {
+ if (ns->subsys->zasl > zasl)
+ return false;
+ }
+ ns->subsys->zasl = zasl;
+
+ /*
+ * Generic zoned block devices may have a smaller last zone which is
+ * not supported by ZNS. Exclude zoned drives that have such smaller
+ * last zone.
+ */
+ if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1))
+ return false;
+ /*
+ * ZNS does not define a conventional zone type. If the underlying
+ * device has a bitmap set indicating the existence of conventional
+ * zones, reject the device. Otherwise, use report zones to detect if
+ * the device has conventional zones.
+ */
+ if (ns->bdev->bd_disk->conv_zones_bitmap)
+ return false;
+
+ ret = blkdev_report_zones(ns->bdev, 0, bdev_nr_zones(ns->bdev),
+ validate_conv_zones_cb, NULL);
+ if (ret < 0)
+ return false;
+
+ ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
+
+ return true;
+}
+
+void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req)
+{
+ u8 zasl = req->sq->ctrl->subsys->zasl;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_id_ctrl_zns *id;
+ u16 status;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ if (ctrl->ops->get_mdts)
+ id->zasl = min_t(u8, ctrl->ops->get_mdts(ctrl), zasl);
+ else
+ id->zasl = zasl;
+
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
+ kfree(id);
+out:
+ nvmet_req_complete(req, status);
+}
+
+void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
+{
+ struct nvme_id_ns_zns *id_zns;
+ u64 zsze;
+ u16 status;
+ u32 mar, mor;
+
+ if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
+ req->error_loc = offsetof(struct nvme_identify, nsid);
+ status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ goto out;
+ }
+
+ id_zns = kzalloc(sizeof(*id_zns), GFP_KERNEL);
+ if (!id_zns) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ status = nvmet_req_find_ns(req);
+ if (status)
+ goto done;
+
+ if (!bdev_is_zoned(req->ns->bdev)) {
+ req->error_loc = offsetof(struct nvme_identify, nsid);
+ goto done;
+ }
+
+ if (nvmet_ns_revalidate(req->ns)) {
+ mutex_lock(&req->ns->subsys->lock);
+ nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
+ mutex_unlock(&req->ns->subsys->lock);
+ }
+ zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
+ req->ns->blksize_shift;
+ id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
+
+ mor = bdev_max_open_zones(req->ns->bdev);
+ if (!mor)
+ mor = U32_MAX;
+ else
+ mor--;
+ id_zns->mor = cpu_to_le32(mor);
+
+ mar = bdev_max_active_zones(req->ns->bdev);
+ if (!mar)
+ mar = U32_MAX;
+ else
+ mar--;
+ id_zns->mar = cpu_to_le32(mar);
+
+done:
+ status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
+ kfree(id_zns);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
+{
+ sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
+ u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
+
+ if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba);
+ return NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ }
+
+ if (out_bufsize < sizeof(struct nvme_zone_report)) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ switch (req->cmd->zmr.pr) {
+ case 0:
+ case 1:
+ break;
+ default:
+ req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ switch (req->cmd->zmr.zrasf) {
+ case NVME_ZRASF_ZONE_REPORT_ALL:
+ case NVME_ZRASF_ZONE_STATE_EMPTY:
+ case NVME_ZRASF_ZONE_STATE_IMP_OPEN:
+ case NVME_ZRASF_ZONE_STATE_EXP_OPEN:
+ case NVME_ZRASF_ZONE_STATE_CLOSED:
+ case NVME_ZRASF_ZONE_STATE_FULL:
+ case NVME_ZRASF_ZONE_STATE_READONLY:
+ case NVME_ZRASF_ZONE_STATE_OFFLINE:
+ break;
+ default:
+ req->error_loc =
+ offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ return NVME_SC_SUCCESS;
+}
+
+struct nvmet_report_zone_data {
+ struct nvmet_req *req;
+ u64 out_buf_offset;
+ u64 out_nr_zones;
+ u64 nr_zones;
+ u8 zrasf;
+};
+
+static int nvmet_bdev_report_zone_cb(struct blk_zone *z, unsigned i, void *d)
+{
+ static const unsigned int nvme_zrasf_to_blk_zcond[] = {
+ [NVME_ZRASF_ZONE_STATE_EMPTY] = BLK_ZONE_COND_EMPTY,
+ [NVME_ZRASF_ZONE_STATE_IMP_OPEN] = BLK_ZONE_COND_IMP_OPEN,
+ [NVME_ZRASF_ZONE_STATE_EXP_OPEN] = BLK_ZONE_COND_EXP_OPEN,
+ [NVME_ZRASF_ZONE_STATE_CLOSED] = BLK_ZONE_COND_CLOSED,
+ [NVME_ZRASF_ZONE_STATE_READONLY] = BLK_ZONE_COND_READONLY,
+ [NVME_ZRASF_ZONE_STATE_FULL] = BLK_ZONE_COND_FULL,
+ [NVME_ZRASF_ZONE_STATE_OFFLINE] = BLK_ZONE_COND_OFFLINE,
+ };
+ struct nvmet_report_zone_data *rz = d;
+
+ if (rz->zrasf != NVME_ZRASF_ZONE_REPORT_ALL &&
+ z->cond != nvme_zrasf_to_blk_zcond[rz->zrasf])
+ return 0;
+
+ if (rz->nr_zones < rz->out_nr_zones) {
+ struct nvme_zone_descriptor zdesc = { };
+ u16 status;
+
+ zdesc.zcap = nvmet_sect_to_lba(rz->req->ns, z->capacity);
+ zdesc.zslba = nvmet_sect_to_lba(rz->req->ns, z->start);
+ zdesc.wp = nvmet_sect_to_lba(rz->req->ns, z->wp);
+ zdesc.za = z->reset ? 1 << 2 : 0;
+ zdesc.zs = z->cond << 4;
+ zdesc.zt = z->type;
+
+ status = nvmet_copy_to_sgl(rz->req, rz->out_buf_offset, &zdesc,
+ sizeof(zdesc));
+ if (status)
+ return -EINVAL;
+
+ rz->out_buf_offset += sizeof(zdesc);
+ }
+
+ rz->nr_zones++;
+
+ return 0;
+}
+
+static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req)
+{
+ unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
+
+ return bdev_nr_zones(req->ns->bdev) -
+ (sect >> ilog2(bdev_zone_sectors(req->ns->bdev)));
+}
+
+static unsigned long get_nr_zones_from_buf(struct nvmet_req *req, u32 bufsize)
+{
+ if (bufsize <= sizeof(struct nvme_zone_report))
+ return 0;
+
+ return (bufsize - sizeof(struct nvme_zone_report)) /
+ sizeof(struct nvme_zone_descriptor);
+}
+
+static void nvmet_bdev_zone_zmgmt_recv_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
+ sector_t start_sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
+ unsigned long req_slba_nr_zones = nvmet_req_nr_zones_from_slba(req);
+ u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
+ __le64 nr_zones;
+ u16 status;
+ int ret;
+ struct nvmet_report_zone_data rz_data = {
+ .out_nr_zones = get_nr_zones_from_buf(req, out_bufsize),
+ /* leave the place for report zone header */
+ .out_buf_offset = sizeof(struct nvme_zone_report),
+ .zrasf = req->cmd->zmr.zrasf,
+ .nr_zones = 0,
+ .req = req,
+ };
+
+ status = nvmet_bdev_validate_zone_mgmt_recv(req);
+ if (status)
+ goto out;
+
+ if (!req_slba_nr_zones) {
+ status = NVME_SC_SUCCESS;
+ goto out;
+ }
+
+ ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones,
+ nvmet_bdev_report_zone_cb, &rz_data);
+ if (ret < 0) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ /*
+ * When partial bit is set nr_zones must indicate the number of zone
+ * descriptors actually transferred.
+ */
+ if (req->cmd->zmr.pr)
+ rz_data.nr_zones = min(rz_data.nr_zones, rz_data.out_nr_zones);
+
+ nr_zones = cpu_to_le64(rz_data.nr_zones);
+ status = nvmet_copy_to_sgl(req, 0, &nr_zones, sizeof(nr_zones));
+
+out:
+ nvmet_req_complete(req, status);
+}
+
+void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req)
+{
+ INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zone_zmgmt_recv_work);
+ queue_work(zbd_wq, &req->z.zmgmt_work);
+}
+
+static inline enum req_op zsa_req_op(u8 zsa)
+{
+ switch (zsa) {
+ case NVME_ZONE_OPEN:
+ return REQ_OP_ZONE_OPEN;
+ case NVME_ZONE_CLOSE:
+ return REQ_OP_ZONE_CLOSE;
+ case NVME_ZONE_FINISH:
+ return REQ_OP_ZONE_FINISH;
+ case NVME_ZONE_RESET:
+ return REQ_OP_ZONE_RESET;
+ default:
+ return REQ_OP_LAST;
+ }
+}
+
+static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret)
+{
+ switch (ret) {
+ case 0:
+ return NVME_SC_SUCCESS;
+ case -EINVAL:
+ case -EIO:
+ return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
+ default:
+ return NVME_SC_INTERNAL;
+ }
+}
+
+struct nvmet_zone_mgmt_send_all_data {
+ unsigned long *zbitmap;
+ struct nvmet_req *req;
+};
+
+static int zmgmt_send_scan_cb(struct blk_zone *z, unsigned i, void *d)
+{
+ struct nvmet_zone_mgmt_send_all_data *data = d;
+
+ switch (zsa_req_op(data->req->cmd->zms.zsa)) {
+ case REQ_OP_ZONE_OPEN:
+ switch (z->cond) {
+ case BLK_ZONE_COND_CLOSED:
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case REQ_OP_ZONE_CLOSE:
+ switch (z->cond) {
+ case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ break;
+ default:
+ return 0;
+ }
+ break;
+ case REQ_OP_ZONE_FINISH:
+ switch (z->cond) {
+ case BLK_ZONE_COND_IMP_OPEN:
+ case BLK_ZONE_COND_EXP_OPEN:
+ case BLK_ZONE_COND_CLOSED:
+ break;
+ default:
+ return 0;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ set_bit(i, data->zbitmap);
+
+ return 0;
+}
+
+static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
+{
+ struct block_device *bdev = req->ns->bdev;
+ unsigned int nr_zones = bdev_nr_zones(bdev);
+ struct bio *bio = NULL;
+ sector_t sector = 0;
+ int ret;
+ struct nvmet_zone_mgmt_send_all_data d = {
+ .req = req,
+ };
+
+ d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)),
+ GFP_NOIO, bdev->bd_disk->node_id);
+ if (!d.zbitmap) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Scan and build bitmap of the eligible zones */
+ ret = blkdev_report_zones(bdev, 0, nr_zones, zmgmt_send_scan_cb, &d);
+ if (ret != nr_zones) {
+ if (ret > 0)
+ ret = -EIO;
+ goto out;
+ } else {
+ /* We scanned all the zones */
+ ret = 0;
+ }
+
+ while (sector < bdev_nr_sectors(bdev)) {
+ if (test_bit(disk_zone_no(bdev->bd_disk, sector), d.zbitmap)) {
+ bio = blk_next_bio(bio, bdev, 0,
+ zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC,
+ GFP_KERNEL);
+ bio->bi_iter.bi_sector = sector;
+ /* This may take a while, so be nice to others */
+ cond_resched();
+ }
+ sector += bdev_zone_sectors(bdev);
+ }
+
+ if (bio) {
+ ret = submit_bio_wait(bio);
+ bio_put(bio);
+ }
+
+out:
+ kfree(d.zbitmap);
+
+ return blkdev_zone_mgmt_errno_to_nvme_status(ret);
+}
+
+static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req)
+{
+ int ret;
+
+ switch (zsa_req_op(req->cmd->zms.zsa)) {
+ case REQ_OP_ZONE_RESET:
+ ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0,
+ get_capacity(req->ns->bdev->bd_disk),
+ GFP_KERNEL);
+ if (ret < 0)
+ return blkdev_zone_mgmt_errno_to_nvme_status(ret);
+ break;
+ case REQ_OP_ZONE_OPEN:
+ case REQ_OP_ZONE_CLOSE:
+ case REQ_OP_ZONE_FINISH:
+ return nvmet_bdev_zone_mgmt_emulate_all(req);
+ default:
+ /* this is needed to quiet compiler warning */
+ req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ return NVME_SC_SUCCESS;
+}
+
+static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
+{
+ struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
+ sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba);
+ enum req_op op = zsa_req_op(req->cmd->zms.zsa);
+ struct block_device *bdev = req->ns->bdev;
+ sector_t zone_sectors = bdev_zone_sectors(bdev);
+ u16 status = NVME_SC_SUCCESS;
+ int ret;
+
+ if (op == REQ_OP_LAST) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
+ status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
+ goto out;
+ }
+
+ /* when select all bit is set slba field is ignored */
+ if (req->cmd->zms.select_all) {
+ status = nvmet_bdev_execute_zmgmt_send_all(req);
+ goto out;
+ }
+
+ if (sect >= get_capacity(bdev->bd_disk)) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
+ status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ goto out;
+ }
+
+ if (sect & (zone_sectors - 1)) {
+ req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out;
+ }
+
+ ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors, GFP_KERNEL);
+ if (ret < 0)
+ status = blkdev_zone_mgmt_errno_to_nvme_status(ret);
+
+out:
+ nvmet_req_complete(req, status);
+}
+
+void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req)
+{
+ INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zmgmt_send_work);
+ queue_work(zbd_wq, &req->z.zmgmt_work);
+}
+
+static void nvmet_bdev_zone_append_bio_done(struct bio *bio)
+{
+ struct nvmet_req *req = bio->bi_private;
+
+ if (bio->bi_status == BLK_STS_OK) {
+ req->cqe->result.u64 =
+ nvmet_sect_to_lba(req->ns, bio->bi_iter.bi_sector);
+ }
+
+ nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
+ nvmet_req_bio_put(req, bio);
+}
+
+void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
+{
+ sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
+ const blk_opf_t opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
+ u16 status = NVME_SC_SUCCESS;
+ unsigned int total_len = 0;
+ struct scatterlist *sg;
+ struct bio *bio;
+ int sg_cnt;
+
+ /* Request is completed on len mismatch in nvmet_check_transter_len() */
+ if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
+ return;
+
+ if (!req->sg_cnt) {
+ nvmet_req_complete(req, 0);
+ return;
+ }
+
+ if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
+ req->error_loc = offsetof(struct nvme_rw_command, slba);
+ status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ goto out;
+ }
+
+ if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) {
+ req->error_loc = offsetof(struct nvme_rw_command, slba);
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out;
+ }
+
+ if (nvmet_use_inline_bvec(req)) {
+ bio = &req->z.inline_bio;
+ bio_init(bio, req->ns->bdev, req->inline_bvec,
+ ARRAY_SIZE(req->inline_bvec), opf);
+ } else {
+ bio = bio_alloc(req->ns->bdev, req->sg_cnt, opf, GFP_KERNEL);
+ }
+
+ bio->bi_end_io = nvmet_bdev_zone_append_bio_done;
+ bio->bi_iter.bi_sector = sect;
+ bio->bi_private = req;
+ if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
+ bio->bi_opf |= REQ_FUA;
+
+ for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) {
+ struct page *p = sg_page(sg);
+ unsigned int l = sg->length;
+ unsigned int o = sg->offset;
+ unsigned int ret;
+
+ ret = bio_add_zone_append_page(bio, p, l, o);
+ if (ret != sg->length) {
+ status = NVME_SC_INTERNAL;
+ goto out_put_bio;
+ }
+ total_len += sg->length;
+ }
+
+ if (total_len != nvmet_rw_data_len(req)) {
+ status = NVME_SC_INTERNAL | NVME_SC_DNR;
+ goto out_put_bio;
+ }
+
+ submit_bio(bio);
+ return;
+
+out_put_bio:
+ nvmet_req_bio_put(req, bio);
+out:
+ nvmet_req_complete(req, status);
+}
+
+u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->common.opcode) {
+ case nvme_cmd_zone_append:
+ req->execute = nvmet_bdev_execute_zone_append;
+ return 0;
+ case nvme_cmd_zone_mgmt_recv:
+ req->execute = nvmet_bdev_execute_zone_mgmt_recv;
+ return 0;
+ case nvme_cmd_zone_mgmt_send:
+ req->execute = nvmet_bdev_execute_zone_mgmt_send;
+ return 0;
+ default:
+ return nvmet_bdev_parse_io_cmd(req);
+ }
+}