aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/nvme
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/core.c257
-rw-r--r--drivers/nvme/host/fabrics.c7
-rw-r--r--drivers/nvme/host/fabrics.h2
-rw-r--r--drivers/nvme/host/fc.c15
-rw-r--r--drivers/nvme/host/nvme.h12
-rw-r--r--drivers/nvme/host/pci.c27
-rw-r--r--drivers/nvme/host/rdma.c50
-rw-r--r--drivers/nvme/target/admin-cmd.c4
-rw-r--r--drivers/nvme/target/core.c10
-rw-r--r--drivers/nvme/target/discovery.c4
-rw-r--r--drivers/nvme/target/fabrics-cmd.c6
-rw-r--r--drivers/nvme/target/fc.c8
-rw-r--r--drivers/nvme/target/loop.c3
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/rdma.c7
15 files changed, 327 insertions, 86 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 44a1a257e0b5..25ec4e585220 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -26,6 +26,7 @@
#include <linux/ptrace.h>
#include <linux/nvme_ioctl.h>
#include <linux/t10-pi.h>
+#include <linux/pm_qos.h>
#include <scsi/sg.h>
#include <asm/unaligned.h>
@@ -56,6 +57,11 @@ EXPORT_SYMBOL_GPL(nvme_max_retries);
static int nvme_char_major;
module_param(nvme_char_major, int, 0);
+static unsigned long default_ps_max_latency_us = 25000;
+module_param(default_ps_max_latency_us, ulong, 0644);
+MODULE_PARM_DESC(default_ps_max_latency_us,
+ "max power saving latency for new devices; use PM QOS to change per device");
+
static LIST_HEAD(nvme_ctrl_list);
static DEFINE_SPINLOCK(dev_list_lock);
@@ -560,7 +566,7 @@ int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
c.identify.opcode = nvme_admin_identify;
- c.identify.cns = cpu_to_le32(NVME_ID_CNS_CTRL);
+ c.identify.cns = NVME_ID_CNS_CTRL;
*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
if (!*id)
@@ -578,7 +584,7 @@ static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *n
struct nvme_command c = { };
c.identify.opcode = nvme_admin_identify;
- c.identify.cns = cpu_to_le32(NVME_ID_CNS_NS_ACTIVE_LIST);
+ c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST;
c.identify.nsid = cpu_to_le32(nsid);
return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000);
}
@@ -590,8 +596,9 @@ int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
int error;
/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
- c.identify.opcode = nvme_admin_identify,
- c.identify.nsid = cpu_to_le32(nsid),
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.nsid = cpu_to_le32(nsid);
+ c.identify.cns = NVME_ID_CNS_NS;
*id = kmalloc(sizeof(struct nvme_id_ns), GFP_KERNEL);
if (!*id)
@@ -1251,6 +1258,176 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
blk_queue_write_cache(q, vwc, vwc);
}
+static void nvme_configure_apst(struct nvme_ctrl *ctrl)
+{
+ /*
+ * APST (Autonomous Power State Transition) lets us program a
+ * table of power state transitions that the controller will
+ * perform automatically. We configure it with a simple
+ * heuristic: we are willing to spend at most 2% of the time
+ * transitioning between power states. Therefore, when running
+ * in any given state, we will enter the next lower-power
+ * non-operational state after waiting 100 * (enlat + exlat)
+ * microseconds, as long as that state's total latency is under
+ * the requested maximum latency.
+ *
+ * We will not autonomously enter any non-operational state for
+ * which the total latency exceeds ps_max_latency_us. Users
+ * can set ps_max_latency_us to zero to turn off APST.
+ */
+
+ unsigned apste;
+ struct nvme_feat_auto_pst *table;
+ int ret;
+
+ /*
+ * If APST isn't supported or if we haven't been initialized yet,
+ * then don't do anything.
+ */
+ if (!ctrl->apsta)
+ return;
+
+ if (ctrl->npss > 31) {
+ dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
+ return;
+ }
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return;
+
+ if (ctrl->ps_max_latency_us == 0) {
+ /* Turn off APST. */
+ apste = 0;
+ } else {
+ __le64 target = cpu_to_le64(0);
+ int state;
+
+ /*
+ * Walk through all states from lowest- to highest-power.
+ * According to the spec, lower-numbered states use more
+ * power. NPSS, despite the name, is the index of the
+ * lowest-power state, not the number of states.
+ */
+ for (state = (int)ctrl->npss; state >= 0; state--) {
+ u64 total_latency_us, transition_ms;
+
+ if (target)
+ table->entries[state] = target;
+
+ /*
+ * Is this state a useful non-operational state for
+ * higher-power states to autonomously transition to?
+ */
+ if (!(ctrl->psd[state].flags &
+ NVME_PS_FLAGS_NON_OP_STATE))
+ continue;
+
+ total_latency_us =
+ (u64)le32_to_cpu(ctrl->psd[state].entry_lat) +
+ + le32_to_cpu(ctrl->psd[state].exit_lat);
+ if (total_latency_us > ctrl->ps_max_latency_us)
+ continue;
+
+ /*
+ * This state is good. Use it as the APST idle
+ * target for higher power states.
+ */
+ transition_ms = total_latency_us + 19;
+ do_div(transition_ms, 20);
+ if (transition_ms > (1 << 24) - 1)
+ transition_ms = (1 << 24) - 1;
+
+ target = cpu_to_le64((state << 3) |
+ (transition_ms << 8));
+ }
+
+ apste = 1;
+ }
+
+ ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
+ table, sizeof(*table), NULL);
+ if (ret)
+ dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
+
+ kfree(table);
+}
+
+static void nvme_set_latency_tolerance(struct device *dev, s32 val)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ u64 latency;
+
+ switch (val) {
+ case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
+ case PM_QOS_LATENCY_ANY:
+ latency = U64_MAX;
+ break;
+
+ default:
+ latency = val;
+ }
+
+ if (ctrl->ps_max_latency_us != latency) {
+ ctrl->ps_max_latency_us = latency;
+ nvme_configure_apst(ctrl);
+ }
+}
+
+struct nvme_core_quirk_entry {
+ /*
+ * NVMe model and firmware strings are padded with spaces. For
+ * simplicity, strings in the quirk table are padded with NULLs
+ * instead.
+ */
+ u16 vid;
+ const char *mn;
+ const char *fr;
+ unsigned long quirks;
+};
+
+static const struct nvme_core_quirk_entry core_quirks[] = {
+ /*
+ * Seen on a Samsung "SM951 NVMe SAMSUNG 256GB": using APST causes
+ * the controller to go out to lunch. It dies when the watchdog
+ * timer reads CSTS and gets 0xffffffff.
+ */
+ {
+ .vid = 0x144d,
+ .fr = "BXW75D0Q",
+ .quirks = NVME_QUIRK_NO_APST,
+ },
+};
+
+/* match is null-terminated but idstr is space-padded. */
+static bool string_matches(const char *idstr, const char *match, size_t len)
+{
+ size_t matchlen;
+
+ if (!match)
+ return true;
+
+ matchlen = strlen(match);
+ WARN_ON_ONCE(matchlen > len);
+
+ if (memcmp(idstr, match, matchlen))
+ return false;
+
+ for (; matchlen < len; matchlen++)
+ if (idstr[matchlen] != ' ')
+ return false;
+
+ return true;
+}
+
+static bool quirk_matches(const struct nvme_id_ctrl *id,
+ const struct nvme_core_quirk_entry *q)
+{
+ return q->vid == le16_to_cpu(id->vid) &&
+ string_matches(id->mn, q->mn, sizeof(id->mn)) &&
+ string_matches(id->fr, q->fr, sizeof(id->fr));
+}
+
/*
* Initialize the cached copies of the Identify data and various controller
* register in our nvme_ctrl structure. This should be called as soon as
@@ -1262,6 +1439,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
u64 cap;
int ret, page_shift;
u32 max_hw_sectors;
+ u8 prev_apsta;
ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
if (ret) {
@@ -1285,6 +1463,24 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
return -EIO;
}
+ if (!ctrl->identified) {
+ /*
+ * Check for quirks. Quirk can depend on firmware version,
+ * so, in principle, the set of quirks present can change
+ * across a reset. As a possible future enhancement, we
+ * could re-scan for quirks every time we reinitialize
+ * the device, but we'd have to make sure that the driver
+ * behaves intelligently if the quirks change.
+ */
+
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
+ if (quirk_matches(id, &core_quirks[i]))
+ ctrl->quirks |= core_quirks[i].quirks;
+ }
+ }
+
ctrl->oacs = le16_to_cpu(id->oacs);
ctrl->vid = le16_to_cpu(id->vid);
ctrl->oncs = le16_to_cpup(&id->oncs);
@@ -1305,6 +1501,11 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->sgls = le32_to_cpu(id->sgls);
ctrl->kas = le16_to_cpu(id->kas);
+ ctrl->npss = id->npss;
+ prev_apsta = ctrl->apsta;
+ ctrl->apsta = (ctrl->quirks & NVME_QUIRK_NO_APST) ? 0 : id->apsta;
+ memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
+
if (ctrl->ops->is_fabrics) {
ctrl->icdoff = le16_to_cpu(id->icdoff);
ctrl->ioccsz = le32_to_cpu(id->ioccsz);
@@ -1328,6 +1529,16 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
}
kfree(id);
+
+ if (ctrl->apsta && !prev_apsta)
+ dev_pm_qos_expose_latency_tolerance(ctrl->device);
+ else if (!ctrl->apsta && prev_apsta)
+ dev_pm_qos_hide_latency_tolerance(ctrl->device);
+
+ nvme_configure_apst(ctrl);
+
+ ctrl->identified = true;
+
return ret;
}
EXPORT_SYMBOL_GPL(nvme_init_identify);
@@ -1577,6 +1788,29 @@ static ssize_t nvme_sysfs_show_transport(struct device *dev,
}
static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
+static ssize_t nvme_sysfs_show_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ static const char *const state_name[] = {
+ [NVME_CTRL_NEW] = "new",
+ [NVME_CTRL_LIVE] = "live",
+ [NVME_CTRL_RESETTING] = "resetting",
+ [NVME_CTRL_RECONNECTING]= "reconnecting",
+ [NVME_CTRL_DELETING] = "deleting",
+ [NVME_CTRL_DEAD] = "dead",
+ };
+
+ if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
+ state_name[ctrl->state])
+ return sprintf(buf, "%s\n", state_name[ctrl->state]);
+
+ return sprintf(buf, "unknown state\n");
+}
+
+static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
+
static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1609,6 +1843,7 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_transport.attr,
&dev_attr_subsysnqn.attr,
&dev_attr_address.attr,
+ &dev_attr_state.attr,
NULL
};
@@ -2065,6 +2300,14 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
list_add_tail(&ctrl->node, &nvme_ctrl_list);
spin_unlock(&dev_list_lock);
+ /*
+ * Initialize latency tolerance controls. The sysfs files won't
+ * be visible to userspace unless the device actually supports APST.
+ */
+ ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
+ dev_pm_qos_update_user_latency_tolerance(ctrl->device,
+ min(default_ps_max_latency_us, (unsigned long)S32_MAX));
+
return 0;
out_release_instance:
nvme_release_instance(ctrl);
@@ -2090,9 +2333,9 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
* Revalidating a dead namespace sets capacity to 0. This will
* end buffered writers dirtying pages that can't be synced.
*/
- if (ns->disk && !test_and_set_bit(NVME_NS_DEAD, &ns->flags))
- revalidate_disk(ns->disk);
-
+ if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
+ continue;
+ revalidate_disk(ns->disk);
blk_set_queue_dying(ns->queue);
blk_mq_abort_requeue_list(ns->queue);
blk_mq_start_stopped_hw_queues(ns->queue, true);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 916d13608059..5b7386f69f4d 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -480,11 +480,16 @@ EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
* being implemented to the common NVMe fabrics library. Part of
* the overall init sequence of starting up a fabrics driver.
*/
-void nvmf_register_transport(struct nvmf_transport_ops *ops)
+int nvmf_register_transport(struct nvmf_transport_ops *ops)
{
+ if (!ops->create_ctrl)
+ return -EINVAL;
+
mutex_lock(&nvmf_transports_mutex);
list_add_tail(&ops->entry, &nvmf_transports);
mutex_unlock(&nvmf_transports_mutex);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(nvmf_register_transport);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 924145c979f1..156018182ce4 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -128,7 +128,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl);
int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid);
-void nvmf_register_transport(struct nvmf_transport_ops *ops);
+int nvmf_register_transport(struct nvmf_transport_ops *ops);
void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
void nvmf_free_options(struct nvmf_ctrl_options *opts);
const char *nvmf_get_subsysnqn(struct nvme_ctrl *ctrl);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index fb51a8de9b29..9690beb15e69 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2353,18 +2353,6 @@ __nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
/* sanity checks */
- /* FC-NVME supports 64-byte SQE only */
- if (ctrl->ctrl.ioccsz != 4) {
- dev_err(ctrl->ctrl.device, "ioccsz %d is not supported!\n",
- ctrl->ctrl.ioccsz);
- goto out_remove_admin_queue;
- }
- /* FC-NVME supports 16-byte CQE only */
- if (ctrl->ctrl.iorcsz != 1) {
- dev_err(ctrl->ctrl.device, "iorcsz %d is not supported!\n",
- ctrl->ctrl.iorcsz);
- goto out_remove_admin_queue;
- }
/* FC-NVME does not have other data in the capsule */
if (ctrl->ctrl.icdoff) {
dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
@@ -2562,8 +2550,7 @@ static int __init nvme_fc_init_module(void)
if (!nvme_fc_wq)
return -ENOMEM;
- nvmf_register_transport(&nvme_fc_transport);
- return 0;
+ return nvmf_register_transport(&nvme_fc_transport);
}
static void __exit nvme_fc_exit_module(void)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 14cfc6f7facb..a3da1e90b99d 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -78,6 +78,11 @@ enum nvme_quirks {
* readiness, which is done by reading the NVME_CSTS_RDY bit.
*/
NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3),
+
+ /*
+ * APST should not be used.
+ */
+ NVME_QUIRK_NO_APST = (1 << 4),
};
/*
@@ -112,6 +117,7 @@ enum nvme_ctrl_state {
struct nvme_ctrl {
enum nvme_ctrl_state state;
+ bool identified;
spinlock_t lock;
const struct nvme_ctrl_ops *ops;
struct request_queue *admin_q;
@@ -147,13 +153,19 @@ struct nvme_ctrl {
u32 vs;
u32 sgls;
u16 kas;
+ u8 npss;
+ u8 apsta;
unsigned int kato;
bool subsystem;
unsigned long quirks;
+ struct nvme_id_power_state psd[32];
struct work_struct scan_work;
struct work_struct async_event_work;
struct delayed_work ka_work;
+ /* Power saving configuration */
+ u64 ps_max_latency_us;
+
/* Fabrics only */
u16 sqsize;
u32 ioccsz;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index ddc51adb594d..57a1af52b06e 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -613,10 +613,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
spin_lock_irq(&nvmeq->q_lock);
if (unlikely(nvmeq->cq_vector < 0)) {
- if (ns && !test_bit(NVME_NS_DEAD, &ns->flags))
- ret = BLK_MQ_RQ_QUEUE_BUSY;
- else
- ret = BLK_MQ_RQ_QUEUE_ERROR;
+ ret = BLK_MQ_RQ_QUEUE_ERROR;
spin_unlock_irq(&nvmeq->q_lock);
goto out_cleanup_iod;
}
@@ -1739,7 +1736,7 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
if (dev->ctrl.admin_q)
blk_put_queue(dev->ctrl.admin_q);
kfree(dev->queues);
- kfree(dev->ctrl.opal_dev);
+ free_opal_dev(dev->ctrl.opal_dev);
kfree(dev);
}
@@ -1789,14 +1786,17 @@ static void nvme_reset_work(struct work_struct *work)
if (result)
goto out;
- if ((dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) && !dev->ctrl.opal_dev) {
- dev->ctrl.opal_dev =
- init_opal_dev(&dev->ctrl, &nvme_sec_submit);
+ if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) {
+ if (!dev->ctrl.opal_dev)
+ dev->ctrl.opal_dev =
+ init_opal_dev(&dev->ctrl, &nvme_sec_submit);
+ else if (was_suspend)
+ opal_unlock_from_suspend(dev->ctrl.opal_dev);
+ } else {
+ free_opal_dev(dev->ctrl.opal_dev);
+ dev->ctrl.opal_dev = NULL;
}
- if (was_suspend)
- opal_unlock_from_suspend(dev->ctrl.opal_dev);
-
result = nvme_setup_io_queues(dev);
if (result)
goto out;
@@ -2001,8 +2001,10 @@ static void nvme_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
- if (!pci_device_is_present(pdev))
+ if (!pci_device_is_present(pdev)) {
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
+ nvme_dev_disable(dev, false);
+ }
flush_work(&dev->reset_work);
nvme_uninit_ctrl(&dev->ctrl);
@@ -2121,6 +2123,7 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, nvme_id_table);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index a75e95d42b3f..bc20a2442a04 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -42,28 +42,6 @@
#define NVME_RDMA_MAX_INLINE_SEGMENTS 1
-static const char *const nvme_rdma_cm_status_strs[] = {
- [NVME_RDMA_CM_INVALID_LEN] = "invalid length",
- [NVME_RDMA_CM_INVALID_RECFMT] = "invalid record format",
- [NVME_RDMA_CM_INVALID_QID] = "invalid queue ID",
- [NVME_RDMA_CM_INVALID_HSQSIZE] = "invalid host SQ size",
- [NVME_RDMA_CM_INVALID_HRQSIZE] = "invalid host RQ size",
- [NVME_RDMA_CM_NO_RSC] = "resource not found",
- [NVME_RDMA_CM_INVALID_IRD] = "invalid IRD",
- [NVME_RDMA_CM_INVALID_ORD] = "Invalid ORD",
-};
-
-static const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status)
-{
- size_t index = status;
-
- if (index < ARRAY_SIZE(nvme_rdma_cm_status_strs) &&
- nvme_rdma_cm_status_strs[index])
- return nvme_rdma_cm_status_strs[index];
- else
- return "unrecognized reason";
-};
-
/*
* We handle AEN commands ourselves and don't even let the
* block layer know about them.
@@ -155,6 +133,10 @@ struct nvme_rdma_ctrl {
struct sockaddr addr;
struct sockaddr_in addr_in;
};
+ union {
+ struct sockaddr src_addr;
+ struct sockaddr_in src_addr_in;
+ };
struct nvme_ctrl ctrl;
};
@@ -567,6 +549,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
int idx, size_t queue_size)
{
struct nvme_rdma_queue *queue;
+ struct sockaddr *src_addr = NULL;
int ret;
queue = &ctrl->queues[idx];
@@ -589,7 +572,10 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
}
queue->cm_error = -ETIMEDOUT;
- ret = rdma_resolve_addr(queue->cm_id, NULL, &ctrl->addr,
+ if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
+ src_addr = &ctrl->src_addr;
+
+ ret = rdma_resolve_addr(queue->cm_id, src_addr, &ctrl->addr,
NVME_RDMA_CONNECT_TIMEOUT_MS);
if (ret) {
dev_info(ctrl->ctrl.device,
@@ -1265,7 +1251,7 @@ static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
dev = nvme_rdma_find_get_device(queue->cm_id);
if (!dev) {
- dev_err(queue->cm_id->device->dma_device,
+ dev_err(queue->cm_id->device->dev.parent,
"no client data found!\n");
return -ECONNREFUSED;
}
@@ -1905,6 +1891,16 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
goto out_free_ctrl;
}
+ if (opts->mask & NVMF_OPT_HOST_TRADDR) {
+ ret = nvme_rdma_parse_ipaddr(&ctrl->src_addr_in,
+ opts->host_traddr);
+ if (ret) {
+ pr_err("malformed src IP address passed: %s\n",
+ opts->host_traddr);
+ goto out_free_ctrl;
+ }
+ }
+
if (opts->mask & NVMF_OPT_TRSVCID) {
u16 port;
@@ -2016,7 +2012,8 @@ out_free_ctrl:
static struct nvmf_transport_ops nvme_rdma_transport = {
.name = "rdma",
.required_opts = NVMF_OPT_TRADDR,
- .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY,
+ .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
+ NVMF_OPT_HOST_TRADDR,
.create_ctrl = nvme_rdma_create_ctrl,
};
@@ -2063,8 +2060,7 @@ static int __init nvme_rdma_init_module(void)
return ret;
}
- nvmf_register_transport(&nvme_rdma_transport);
- return 0;
+ return nvmf_register_transport(&nvme_rdma_transport);
}
static void __exit nvme_rdma_cleanup_module(void)
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 95ae52390478..94e524fea568 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -41,7 +41,7 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
if (!ns) {
status = NVME_SC_INVALID_NS;
- pr_err("nvmet : Counld not find namespace id : %d\n",
+ pr_err("nvmet : Could not find namespace id : %d\n",
le32_to_cpu(req->cmd->get_log_page.nsid));
goto out;
}
@@ -509,7 +509,7 @@ int nvmet_parse_admin_cmd(struct nvmet_req *req)
break;
case nvme_admin_identify:
req->data_len = 4096;
- switch (le32_to_cpu(cmd->identify.cns)) {
+ switch (cmd->identify.cns) {
case NVME_ID_CNS_NS:
req->execute = nvmet_execute_identify_ns;
return 0;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index fc5ba2f9e15f..5267ce20c12d 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -17,6 +17,7 @@
#include "nvmet.h"
static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
+static DEFINE_IDA(cntlid_ida);
/*
* This read/write semaphore is used to synchronize access to configuration
@@ -749,7 +750,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (!ctrl->sqs)
goto out_free_cqs;
- ret = ida_simple_get(&subsys->cntlid_ida,
+ ret = ida_simple_get(&cntlid_ida,
NVME_CNTLID_MIN, NVME_CNTLID_MAX,
GFP_KERNEL);
if (ret < 0) {
@@ -819,7 +820,7 @@ static void nvmet_ctrl_free(struct kref *ref)
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fatal_err_work);
- ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid);
+ ida_simple_remove(&cntlid_ida, ctrl->cntlid);
nvmet_subsys_put(subsys);
kfree(ctrl->sqs);
@@ -918,9 +919,6 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
mutex_init(&subsys->lock);
INIT_LIST_HEAD(&subsys->namespaces);
INIT_LIST_HEAD(&subsys->ctrls);
-
- ida_init(&subsys->cntlid_ida);
-
INIT_LIST_HEAD(&subsys->hosts);
return subsys;
@@ -933,7 +931,6 @@ static void nvmet_subsys_free(struct kref *ref)
WARN_ON_ONCE(!list_empty(&subsys->namespaces));
- ida_destroy(&subsys->cntlid_ida);
kfree(subsys->subsysnqn);
kfree(subsys);
}
@@ -976,6 +973,7 @@ static void __exit nvmet_exit(void)
{
nvmet_exit_configfs();
nvmet_exit_discovery();
+ ida_destroy(&cntlid_ida);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 12f39eea569f..af8aabf05335 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -186,14 +186,14 @@ int nvmet_parse_discovery_cmd(struct nvmet_req *req)
}
case nvme_admin_identify:
req->data_len = 4096;
- switch (le32_to_cpu(cmd->identify.cns)) {
+ switch (cmd->identify.cns) {
case NVME_ID_CNS_CTRL:
req->execute =
nvmet_execute_identify_disc_ctrl;
return 0;
default:
pr_err("nvmet: unsupported identify cns %d\n",
- le32_to_cpu(cmd->identify.cns));
+ cmd->identify.cns);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
default:
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index f4088198cd0d..8bd022af3df6 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -153,8 +153,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
goto out;
}
- pr_info("creating controller %d for NQN %s.\n",
- ctrl->cntlid, ctrl->hostnqn);
+ pr_info("creating controller %d for subsystem %s for NQN %s.\n",
+ ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn);
req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
out:
@@ -220,7 +220,7 @@ int nvmet_parse_connect_cmd(struct nvmet_req *req)
req->ns = NULL;
- if (req->cmd->common.opcode != nvme_fabrics_command) {
+ if (cmd->common.opcode != nvme_fabrics_command) {
pr_err("invalid command 0x%x on unconnected queue.\n",
cmd->fabrics.opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index ba57f9852bde..8f483ee7868c 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1817,16 +1817,14 @@ nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
/* data no longer needed */
nvmet_fc_free_tgt_pgs(fod);
- if (fcpreq->fcp_error || abort)
- nvmet_req_complete(&fod->req, fcpreq->fcp_error);
-
+ nvmet_req_complete(&fod->req, fcpreq->fcp_error);
return;
}
switch (fcpreq->op) {
case NVMET_FCOP_WRITEDATA:
- if (abort || fcpreq->fcp_error ||
+ if (fcpreq->fcp_error ||
fcpreq->transferred_length != fcpreq->transfer_length) {
nvmet_req_complete(&fod->req,
NVME_SC_FC_TRANSPORT_ERROR);
@@ -1849,7 +1847,7 @@ nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
case NVMET_FCOP_READDATA:
case NVMET_FCOP_READDATA_RSP:
- if (abort || fcpreq->fcp_error ||
+ if (fcpreq->fcp_error ||
fcpreq->transferred_length != fcpreq->transfer_length) {
/* data no longer needed */
nvmet_fc_free_tgt_pgs(fod);
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index f3862e38f574..d1f06e7768ff 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -724,8 +724,7 @@ static int __init nvme_loop_init_module(void)
ret = nvmet_register_transport(&nvme_loop_ops);
if (ret)
return ret;
- nvmf_register_transport(&nvme_loop_transport);
- return 0;
+ return nvmf_register_transport(&nvme_loop_transport);
}
static void __exit nvme_loop_cleanup_module(void)
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index cc7ad06b43a7..1370eee0a3c0 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -142,7 +142,6 @@ struct nvmet_subsys {
unsigned int max_nsid;
struct list_head ctrls;
- struct ida cntlid_ida;
struct list_head hosts;
bool allow_any_host;
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 60990220bd83..9aa1da3778b3 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1041,6 +1041,9 @@ static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
{
struct nvme_rdma_cm_rej rej;
+ pr_debug("rejecting connect request: status %d (%s)\n",
+ status, nvme_rdma_cm_msg(status));
+
rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
rej.sts = cpu_to_le16(status);
@@ -1091,7 +1094,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
if (queue->idx < 0) {
ret = NVME_RDMA_CM_NO_RSC;
- goto out_free_queue;
+ goto out_destroy_sq;
}
ret = nvmet_rdma_alloc_rsps(queue);
@@ -1135,7 +1138,6 @@ out_destroy_sq:
out_free_queue:
kfree(queue);
out_reject:
- pr_debug("rejecting connect request with status code %d\n", ret);
nvmet_rdma_cm_reject(cm_id, ret);
return NULL;
}
@@ -1188,7 +1190,6 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
ndev = nvmet_rdma_find_get_device(cm_id);
if (!ndev) {
- pr_err("no client data!\n");
nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
return -ECONNREFUSED;
}