aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/nvme/target/admin-cmd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/target/admin-cmd.c')
-rw-r--r--drivers/nvme/target/admin-cmd.c146
1 files changed, 100 insertions, 46 deletions
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 1179f6314323..11baeb14c388 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -19,19 +19,6 @@
#include <asm/unaligned.h>
#include "nvmet.h"
-/*
- * This helper allows us to clear the AEN based on the RAE bit,
- * Please use this helper when processing the log pages which are
- * associated with the AEN.
- */
-static inline void nvmet_clear_aen(struct nvmet_req *req, u32 aen_bit)
-{
- int rae = le32_to_cpu(req->cmd->common.cdw10[0]) & 1 << 15;
-
- if (!rae)
- clear_bit(aen_bit, &req->sq->ctrl->aen_masked);
-}
-
u32 nvmet_get_log_page_len(struct nvme_command *cmd)
{
u32 len = le16_to_cpu(cmd->get_log_page.numdu);
@@ -50,6 +37,34 @@ static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
}
+static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u16 status = NVME_SC_SUCCESS;
+ unsigned long flags;
+ off_t offset = 0;
+ u64 slot;
+ u64 i;
+
+ spin_lock_irqsave(&ctrl->error_lock, flags);
+ slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
+
+ for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
+ status = nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
+ sizeof(struct nvme_error_slot));
+ if (status)
+ break;
+
+ if (slot == 0)
+ slot = NVMET_ERROR_LOG_SLOTS - 1;
+ else
+ slot--;
+ offset += sizeof(struct nvme_error_slot);
+ }
+ spin_unlock_irqrestore(&ctrl->error_lock, flags);
+ nvmet_req_complete(req, status);
+}
+
static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
struct nvme_smart_log *slog)
{
@@ -60,6 +75,7 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
if (!ns) {
pr_err("Could not find namespace id : %d\n",
le32_to_cpu(req->cmd->get_log_page.nsid));
+ req->error_loc = offsetof(struct nvme_rw_command, nsid);
return NVME_SC_INVALID_NS;
}
@@ -119,6 +135,7 @@ static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
{
struct nvme_smart_log *log;
u16 status = NVME_SC_INTERNAL;
+ unsigned long flags;
if (req->data_len != sizeof(*log))
goto out;
@@ -134,6 +151,11 @@ static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
if (status)
goto out_free_log;
+ spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
+ put_unaligned_le64(req->sq->ctrl->err_counter,
+ &log->num_err_log_entries);
+ spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
+
status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
out_free_log:
kfree(log);
@@ -189,7 +211,7 @@ static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
if (!status)
status = nvmet_zero_sgl(req, len, req->data_len - len);
ctrl->nr_changed_ns = 0;
- nvmet_clear_aen(req, NVME_AEN_CFG_NS_ATTR);
+ nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
mutex_unlock(&ctrl->lock);
out:
nvmet_req_complete(req, status);
@@ -252,7 +274,7 @@ static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
hdr.ngrps = cpu_to_le16(ngrps);
- nvmet_clear_aen(req, NVME_AEN_CFG_ANA_CHANGE);
+ nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
up_read(&nvmet_ana_sem);
kfree(desc);
@@ -304,7 +326,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
/* XXX: figure out what to do about RTD3R/RTD3 */
id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
- id->ctratt = cpu_to_le32(1 << 0);
+ id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
+ NVME_CTRL_ATTR_TBKAS);
id->oacs = 0;
@@ -392,6 +415,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
u16 status = 0;
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
+ req->error_loc = offsetof(struct nvme_identify, nsid);
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
goto out;
}
@@ -512,6 +536,7 @@ static void nvmet_execute_identify_desclist(struct nvmet_req *req)
ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
if (!ns) {
+ req->error_loc = offsetof(struct nvme_identify, nsid);
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
goto out;
}
@@ -569,13 +594,15 @@ static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
{
- u32 write_protect = le32_to_cpu(req->cmd->common.cdw10[1]);
+ u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
- if (unlikely(!req->ns))
+ if (unlikely(!req->ns)) {
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
return status;
+ }
mutex_lock(&subsys->lock);
switch (write_protect) {
@@ -599,11 +626,36 @@ static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
return status;
}
+u16 nvmet_set_feat_kato(struct nvmet_req *req)
+{
+ u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
+
+ req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
+
+ nvmet_set_result(req, req->sq->ctrl->kato);
+
+ return 0;
+}
+
+u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
+{
+ u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
+
+ if (val32 & ~mask) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw11);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
+ nvmet_set_result(req, val32);
+
+ return 0;
+}
+
static void nvmet_execute_set_features(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
- u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
- u32 val32;
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 status = 0;
switch (cdw10 & 0xff) {
@@ -612,19 +664,10 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
break;
case NVME_FEAT_KATO:
- val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
- req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
- nvmet_set_result(req, req->sq->ctrl->kato);
+ status = nvmet_set_feat_kato(req);
break;
case NVME_FEAT_ASYNC_EVENT:
- val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
- if (val32 & ~NVMET_AEN_CFG_ALL) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
- break;
- }
-
- WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
- nvmet_set_result(req, val32);
+ status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
break;
case NVME_FEAT_HOST_ID:
status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
@@ -633,6 +676,7 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
status = nvmet_set_feat_write_protect(req);
break;
default:
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
break;
}
@@ -646,9 +690,10 @@ static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
u32 result;
req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
- if (!req->ns)
+ if (!req->ns) {
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
return NVME_SC_INVALID_NS | NVME_SC_DNR;
-
+ }
mutex_lock(&subsys->lock);
if (req->ns->readonly == true)
result = NVME_NS_WRITE_PROTECT;
@@ -660,10 +705,20 @@ static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
return 0;
}
+void nvmet_get_feat_kato(struct nvmet_req *req)
+{
+ nvmet_set_result(req, req->sq->ctrl->kato * 1000);
+}
+
+void nvmet_get_feat_async_event(struct nvmet_req *req)
+{
+ nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
+}
+
static void nvmet_execute_get_features(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
- u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 status = 0;
switch (cdw10 & 0xff) {
@@ -689,7 +744,7 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
break;
#endif
case NVME_FEAT_ASYNC_EVENT:
- nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
+ nvmet_get_feat_async_event(req);
break;
case NVME_FEAT_VOLATILE_WC:
nvmet_set_result(req, 1);
@@ -699,11 +754,13 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
(subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
break;
case NVME_FEAT_KATO:
- nvmet_set_result(req, req->sq->ctrl->kato * 1000);
+ nvmet_get_feat_kato(req);
break;
case NVME_FEAT_HOST_ID:
/* need 128-bit host identifier flag */
- if (!(req->cmd->common.cdw10[1] & cpu_to_le32(1 << 0))) {
+ if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
+ req->error_loc =
+ offsetof(struct nvme_common_command, cdw11);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
break;
}
@@ -715,6 +772,8 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
status = nvmet_get_feat_write_protect(req);
break;
default:
+ req->error_loc =
+ offsetof(struct nvme_common_command, cdw10);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
break;
}
@@ -722,7 +781,7 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
nvmet_req_complete(req, status);
}
-static void nvmet_execute_async_event(struct nvmet_req *req)
+void nvmet_execute_async_event(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -738,7 +797,7 @@ static void nvmet_execute_async_event(struct nvmet_req *req)
schedule_work(&ctrl->async_event_work);
}
-static void nvmet_execute_keep_alive(struct nvmet_req *req)
+void nvmet_execute_keep_alive(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -764,13 +823,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
switch (cmd->get_log_page.lid) {
case NVME_LOG_ERROR:
- /*
- * We currently never set the More bit in the status
- * field, so all error log entries are invalid and can
- * be zeroed out. This is called a minum viable
- * implementation (TM) of this mandatory log page.
- */
- req->execute = nvmet_execute_get_log_page_noop;
+ req->execute = nvmet_execute_get_log_page_error;
return 0;
case NVME_LOG_SMART:
req->execute = nvmet_execute_get_log_page_smart;
@@ -836,5 +889,6 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
req->sq->qid);
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}