aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target/target_core_transport.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target/target_core_transport.c')
-rw-r--r--drivers/target/target_core_transport.c170
1 files changed, 67 insertions, 103 deletions
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index ff26ab0a5f60..fca4bd079d02 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -215,7 +215,7 @@ static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
{
struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
- wake_up(&sess->cmd_list_wq);
+ wake_up(&sess->cmd_count_wq);
}
/**
@@ -228,9 +228,10 @@ int transport_init_session(struct se_session *se_sess)
{
INIT_LIST_HEAD(&se_sess->sess_list);
INIT_LIST_HEAD(&se_sess->sess_acl_list);
- INIT_LIST_HEAD(&se_sess->sess_cmd_list);
spin_lock_init(&se_sess->sess_cmd_lock);
- init_waitqueue_head(&se_sess->cmd_list_wq);
+ init_waitqueue_head(&se_sess->cmd_count_wq);
+ init_completion(&se_sess->stop_done);
+ atomic_set(&se_sess->stopped, 0);
return percpu_ref_init(&se_sess->cmd_count,
target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
}
@@ -238,6 +239,14 @@ EXPORT_SYMBOL(transport_init_session);
void transport_uninit_session(struct se_session *se_sess)
{
+ /*
+ * Drivers like iscsi and loop do not call target_stop_session
+ * during session shutdown so we have to drop the ref taken at init
+ * time here.
+ */
+ if (!atomic_read(&se_sess->stopped))
+ percpu_ref_put(&se_sess->cmd_count);
+
percpu_ref_exit(&se_sess->cmd_count);
}
@@ -650,12 +659,12 @@ static void target_remove_from_state_list(struct se_cmd *cmd)
if (!dev)
return;
- spin_lock_irqsave(&dev->execute_task_lock, flags);
+ spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags);
if (cmd->state_active) {
list_del(&cmd->state_list);
cmd->state_active = false;
}
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+ spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags);
}
/*
@@ -866,10 +875,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
INIT_WORK(&cmd->work, success ? target_complete_ok_work :
target_complete_failure_work);
- if (cmd->se_cmd_flags & SCF_USE_CPUID)
- queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
- else
- queue_work(target_completion_wq, &cmd->work);
+ queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
}
EXPORT_SYMBOL(target_complete_cmd);
@@ -897,12 +903,13 @@ static void target_add_to_state_list(struct se_cmd *cmd)
struct se_device *dev = cmd->se_dev;
unsigned long flags;
- spin_lock_irqsave(&dev->execute_task_lock, flags);
+ spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags);
if (!cmd->state_active) {
- list_add_tail(&cmd->state_list, &dev->state_list);
+ list_add_tail(&cmd->state_list,
+ &dev->queues[cmd->cpuid].state_list);
cmd->state_active = true;
}
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+ spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags);
}
/*
@@ -1390,6 +1397,9 @@ void transport_init_se_cmd(
cmd->sense_buffer = sense_buffer;
cmd->orig_fe_lun = unpacked_lun;
+ if (!(cmd->se_cmd_flags & SCF_USE_CPUID))
+ cmd->cpuid = smp_processor_id();
+
cmd->state_active = false;
}
EXPORT_SYMBOL(transport_init_se_cmd);
@@ -1607,6 +1617,9 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
BUG_ON(!se_tpg);
BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
BUG_ON(in_interrupt());
+
+ if (flags & TARGET_SCF_USE_CPUID)
+ se_cmd->se_cmd_flags |= SCF_USE_CPUID;
/*
* Initialize se_cmd for target operation. From this point
* exceptions are handled by sending exception status via
@@ -1616,17 +1629,11 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
data_length, data_dir, task_attr, sense,
unpacked_lun);
- if (flags & TARGET_SCF_USE_CPUID)
- se_cmd->se_cmd_flags |= SCF_USE_CPUID;
- else
- se_cmd->cpuid = WORK_CPU_UNBOUND;
-
if (flags & TARGET_SCF_UNKNOWN_SIZE)
se_cmd->unknown_data_length = 1;
/*
- * Obtain struct se_cmd->cmd_kref reference and add new cmd to
- * se_sess->sess_cmd_list. A second kref_get here is necessary
- * for fabrics using TARGET_SCF_ACK_KREF that expect a second
+ * Obtain struct se_cmd->cmd_kref reference. A second kref_get here is
+ * necessary for fabrics using TARGET_SCF_ACK_KREF that expect a second
* kref_put() to happen during fabric packet acknowledgement.
*/
ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
@@ -1764,29 +1771,6 @@ static void target_complete_tmr_failure(struct work_struct *work)
transport_cmd_check_stop_to_fabric(se_cmd);
}
-static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag,
- u64 *unpacked_lun)
-{
- struct se_cmd *se_cmd;
- unsigned long flags;
- bool ret = false;
-
- spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
- if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
- continue;
-
- if (se_cmd->tag == tag) {
- *unpacked_lun = se_cmd->orig_fe_lun;
- ret = true;
- break;
- }
- }
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
-
- return ret;
-}
-
/**
* target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
* for TMR CDBs
@@ -1834,16 +1818,6 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
core_tmr_release_req(se_cmd->se_tmr_req);
return ret;
}
- /*
- * If this is ABORT_TASK with no explicit fabric provided LUN,
- * go ahead and search active session tags for a match to figure
- * out unpacked_lun for the original se_cmd.
- */
- if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) {
- if (!target_lookup_lun_from_tag(se_sess, tag,
- &se_cmd->orig_fe_lun))
- goto failure;
- }
ret = transport_lookup_tmr_lun(se_cmd);
if (ret)
@@ -2788,14 +2762,13 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
EXPORT_SYMBOL(transport_generic_free_cmd);
/**
- * target_get_sess_cmd - Add command to active ->sess_cmd_list
+ * target_get_sess_cmd - Verify the session is accepting cmds and take ref
* @se_cmd: command descriptor to add
* @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
*/
int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
{
struct se_session *se_sess = se_cmd->se_sess;
- unsigned long flags;
int ret = 0;
/*
@@ -2810,15 +2783,8 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
se_cmd->se_cmd_flags |= SCF_ACK_KREF;
}
- spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- if (se_sess->sess_tearing_down) {
+ if (!percpu_ref_tryget_live(&se_sess->cmd_count))
ret = -ESHUTDOWN;
- goto out;
- }
- list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
- percpu_ref_get(&se_sess->cmd_count);
-out:
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
if (ret && ack_kref)
target_put_sess_cmd(se_cmd);
@@ -2843,13 +2809,6 @@ static void target_release_cmd_kref(struct kref *kref)
struct se_session *se_sess = se_cmd->se_sess;
struct completion *free_compl = se_cmd->free_compl;
struct completion *abrt_compl = se_cmd->abrt_compl;
- unsigned long flags;
-
- if (se_sess) {
- spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- list_del_init(&se_cmd->se_cmd_list);
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- }
target_free_cmd_mem(se_cmd);
se_cmd->se_tfo->release_cmd(se_cmd);
@@ -2977,21 +2936,25 @@ void target_show_cmd(const char *pfx, struct se_cmd *cmd)
}
EXPORT_SYMBOL(target_show_cmd);
+static void target_stop_session_confirm(struct percpu_ref *ref)
+{
+ struct se_session *se_sess = container_of(ref, struct se_session,
+ cmd_count);
+ complete_all(&se_sess->stop_done);
+}
+
/**
- * target_sess_cmd_list_set_waiting - Set sess_tearing_down so no new commands are queued.
- * @se_sess: session to flag
+ * target_stop_session - Stop new IO from being queued on the session.
+ * @se_sess: session to stop
*/
-void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
+void target_stop_session(struct se_session *se_sess)
{
- unsigned long flags;
-
- spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- se_sess->sess_tearing_down = 1;
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
-
- percpu_ref_kill(&se_sess->cmd_count);
+ pr_debug("Stopping session queue.\n");
+ if (atomic_cmpxchg(&se_sess->stopped, 0, 1) == 0)
+ percpu_ref_kill_and_confirm(&se_sess->cmd_count,
+ target_stop_session_confirm);
}
-EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
+EXPORT_SYMBOL(target_stop_session);
/**
* target_wait_for_sess_cmds - Wait for outstanding commands
@@ -2999,19 +2962,19 @@ EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
*/
void target_wait_for_sess_cmds(struct se_session *se_sess)
{
- struct se_cmd *cmd;
int ret;
- WARN_ON_ONCE(!se_sess->sess_tearing_down);
+ WARN_ON_ONCE(!atomic_read(&se_sess->stopped));
do {
- ret = wait_event_timeout(se_sess->cmd_list_wq,
+ pr_debug("Waiting for running cmds to complete.\n");
+ ret = wait_event_timeout(se_sess->cmd_count_wq,
percpu_ref_is_zero(&se_sess->cmd_count),
180 * HZ);
- list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
- target_show_cmd("session shutdown: still waiting for ",
- cmd);
} while (ret <= 0);
+
+ wait_for_completion(&se_sess->stop_done);
+ pr_debug("Waiting for cmds done.\n");
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);
@@ -3094,14 +3057,14 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
}
EXPORT_SYMBOL(transport_wait_for_tasks);
-struct sense_info {
+struct sense_detail {
u8 key;
u8 asc;
u8 ascq;
- bool add_sector_info;
+ bool add_sense_info;
};
-static const struct sense_info sense_info_table[] = {
+static const struct sense_detail sense_detail_table[] = {
[TCM_NO_SENSE] = {
.key = NOT_READY
},
@@ -3196,24 +3159,25 @@ static const struct sense_info sense_info_table[] = {
.key = MISCOMPARE,
.asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
.ascq = 0x00,
+ .add_sense_info = true,
},
[TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
.key = ABORTED_COMMAND,
.asc = 0x10,
.ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
- .add_sector_info = true,
+ .add_sense_info = true,
},
[TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
.key = ABORTED_COMMAND,
.asc = 0x10,
.ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
- .add_sector_info = true,
+ .add_sense_info = true,
},
[TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
.key = ABORTED_COMMAND,
.asc = 0x10,
.ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
- .add_sector_info = true,
+ .add_sense_info = true,
},
[TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
.key = COPY_ABORTED,
@@ -3261,42 +3225,42 @@ static const struct sense_info sense_info_table[] = {
*/
static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
{
- const struct sense_info *si;
+ const struct sense_detail *sd;
u8 *buffer = cmd->sense_buffer;
int r = (__force int)reason;
u8 key, asc, ascq;
bool desc_format = target_sense_desc_format(cmd->se_dev);
- if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
- si = &sense_info_table[r];
+ if (r < ARRAY_SIZE(sense_detail_table) && sense_detail_table[r].key)
+ sd = &sense_detail_table[r];
else
- si = &sense_info_table[(__force int)
+ sd = &sense_detail_table[(__force int)
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
- key = si->key;
+ key = sd->key;
if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc,
&ascq)) {
cmd->scsi_status = SAM_STAT_BUSY;
return;
}
- } else if (si->asc == 0) {
+ } else if (sd->asc == 0) {
WARN_ON_ONCE(cmd->scsi_asc == 0);
asc = cmd->scsi_asc;
ascq = cmd->scsi_ascq;
} else {
- asc = si->asc;
- ascq = si->ascq;
+ asc = sd->asc;
+ ascq = sd->ascq;
}
cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq);
- if (si->add_sector_info)
+ if (sd->add_sense_info)
WARN_ON_ONCE(scsi_set_sense_information(buffer,
cmd->scsi_sense_length,
- cmd->bad_sector) < 0);
+ cmd->sense_info) < 0);
}
int