diff options
Diffstat (limited to 'drivers/target/target_core_transport.c')
-rw-r--r-- | drivers/target/target_core_transport.c | 589 |
1 files changed, 282 insertions, 307 deletions
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 3400ae6e93f8..d3ddd1361949 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -45,23 +45,18 @@ #include <scsi/scsi_tcq.h> #include <target/target_core_base.h> -#include <target/target_core_device.h> -#include <target/target_core_tmr.h> -#include <target/target_core_tpg.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> #include <target/target_core_configfs.h> +#include "target_core_internal.h" #include "target_core_alua.h" -#include "target_core_cdb.h" -#include "target_core_hba.h" #include "target_core_pr.h" #include "target_core_ua.h" static int sub_api_initialized; static struct workqueue_struct *target_completion_wq; -static struct kmem_cache *se_cmd_cache; static struct kmem_cache *se_sess_cache; struct kmem_cache *se_tmr_req_cache; struct kmem_cache *se_ua_cache; @@ -73,7 +68,7 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; static int transport_generic_write_pending(struct se_cmd *); static int transport_processing_thread(void *param); -static int __transport_execute_tasks(struct se_device *dev); +static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *); static void transport_complete_task_attr(struct se_cmd *cmd); static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev); @@ -82,24 +77,18 @@ static int transport_generic_get_mem(struct se_cmd *cmd); static void transport_put_cmd(struct se_cmd *cmd); static void transport_remove_cmd_from_queue(struct se_cmd *cmd); static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); -static void transport_generic_request_failure(struct se_cmd *, int, int); +static void transport_generic_request_failure(struct se_cmd *); static void target_complete_ok_work(struct work_struct *work); int init_se_kmem_caches(void) { - se_cmd_cache = kmem_cache_create("se_cmd_cache", - sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); - if (!se_cmd_cache) { - pr_err("kmem_cache_create for struct se_cmd failed\n"); - goto out; - } se_tmr_req_cache = kmem_cache_create("se_tmr_cache", sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), 0, NULL); if (!se_tmr_req_cache) { pr_err("kmem_cache_create() for struct se_tmr_req" " failed\n"); - goto out_free_cmd_cache; + goto out; } se_sess_cache = kmem_cache_create("se_sess_cache", sizeof(struct se_session), __alignof__(struct se_session), @@ -182,8 +171,6 @@ out_free_sess_cache: kmem_cache_destroy(se_sess_cache); out_free_tmr_req_cache: kmem_cache_destroy(se_tmr_req_cache); -out_free_cmd_cache: - kmem_cache_destroy(se_cmd_cache); out: return -ENOMEM; } @@ -191,7 +178,6 @@ out: void release_se_kmem_caches(void) { destroy_workqueue(target_completion_wq); - kmem_cache_destroy(se_cmd_cache); kmem_cache_destroy(se_tmr_req_cache); kmem_cache_destroy(se_sess_cache); kmem_cache_destroy(se_ua_cache); @@ -222,14 +208,13 @@ u32 scsi_get_new_index(scsi_index_t type) return new_index; } -void transport_init_queue_obj(struct se_queue_obj *qobj) +static void transport_init_queue_obj(struct se_queue_obj *qobj) { atomic_set(&qobj->queue_cnt, 0); INIT_LIST_HEAD(&qobj->qobj_list); init_waitqueue_head(&qobj->thread_wq); spin_lock_init(&qobj->cmd_queue_lock); } -EXPORT_SYMBOL(transport_init_queue_obj); void transport_subsystem_check_init(void) { @@ -436,18 +421,18 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) if (task->task_flags & TF_ACTIVE) continue; - if (!atomic_read(&task->task_state_active)) - continue; - spin_lock_irqsave(&dev->execute_task_lock, flags); - list_del(&task->t_state_list); - pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n", - cmd->se_tfo->get_task_tag(cmd), dev, task); - spin_unlock_irqrestore(&dev->execute_task_lock, flags); + if (task->t_state_active) { + pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n", + cmd->se_tfo->get_task_tag(cmd), dev, task); - atomic_set(&task->task_state_active, 0); - atomic_dec(&cmd->t_task_cdbs_ex_left); + list_del(&task->t_state_list); + atomic_dec(&cmd->t_task_cdbs_ex_left); + task->t_state_active = false; + } + spin_unlock_irqrestore(&dev->execute_task_lock, flags); } + } /* transport_cmd_check_stop(): @@ -680,9 +665,9 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good) task->task_scsi_status = GOOD; } else { task->task_scsi_status = SAM_STAT_CHECK_CONDITION; - task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; - task->task_se_cmd->transport_error_status = - PYX_TRANSPORT_ILLEGAL_REQUEST; + task->task_se_cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } transport_complete_task(task, good); @@ -693,7 +678,7 @@ static void target_complete_failure_work(struct work_struct *work) { struct se_cmd *cmd = container_of(work, struct se_cmd, work); - transport_generic_request_failure(cmd, 1, 1); + transport_generic_request_failure(cmd); } /* transport_complete_task(): @@ -706,12 +691,6 @@ void transport_complete_task(struct se_task *task, int success) struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; unsigned long flags; -#if 0 - pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task, - cmd->t_task_cdb[0], dev); -#endif - if (dev) - atomic_inc(&dev->depth_left); spin_lock_irqsave(&cmd->t_state_lock, flags); task->task_flags &= ~TF_ACTIVE; @@ -724,7 +703,7 @@ void transport_complete_task(struct se_task *task, int success) if (dev && dev->transport->transport_complete) { if (dev->transport->transport_complete(task) != 0) { cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; - task->task_sense = 1; + task->task_flags |= TF_HAS_SENSE; success = 1; } } @@ -753,12 +732,7 @@ void transport_complete_task(struct se_task *task, int success) } if (cmd->t_tasks_failed) { - if (!task->task_error_status) { - task->task_error_status = - PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; - cmd->transport_error_status = - PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; - } + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; INIT_WORK(&cmd->work, target_complete_failure_work); } else { atomic_set(&cmd->t_transport_complete, 1); @@ -833,7 +807,7 @@ static void __transport_add_task_to_execute_queue( head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev); atomic_inc(&dev->execute_tasks); - if (atomic_read(&task->task_state_active)) + if (task->t_state_active) return; /* * Determine if this task needs to go to HEAD_OF_QUEUE for the @@ -847,7 +821,7 @@ static void __transport_add_task_to_execute_queue( else list_add_tail(&task->t_state_list, &dev->state_task_list); - atomic_set(&task->task_state_active, 1); + task->t_state_active = true; pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), @@ -862,29 +836,26 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) spin_lock_irqsave(&cmd->t_state_lock, flags); list_for_each_entry(task, &cmd->t_task_list, t_list) { - if (atomic_read(&task->task_state_active)) - continue; - spin_lock(&dev->execute_task_lock); - list_add_tail(&task->t_state_list, &dev->state_task_list); - atomic_set(&task->task_state_active, 1); - - pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", - task->task_se_cmd->se_tfo->get_task_tag( - task->task_se_cmd), task, dev); - + if (!task->t_state_active) { + list_add_tail(&task->t_state_list, + &dev->state_task_list); + task->t_state_active = true; + + pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", + task->task_se_cmd->se_tfo->get_task_tag( + task->task_se_cmd), task, dev); + } spin_unlock(&dev->execute_task_lock); } spin_unlock_irqrestore(&cmd->t_state_lock, flags); } -static void transport_add_tasks_from_cmd(struct se_cmd *cmd) +static void __transport_add_tasks_from_cmd(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; struct se_task *task, *task_prev = NULL; - unsigned long flags; - spin_lock_irqsave(&dev->execute_task_lock, flags); list_for_each_entry(task, &cmd->t_task_list, t_list) { if (!list_empty(&task->t_execute_list)) continue; @@ -895,6 +866,15 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd) __transport_add_task_to_execute_queue(task, task_prev, dev); task_prev = task; } +} + +static void transport_add_tasks_from_cmd(struct se_cmd *cmd) +{ + unsigned long flags; + struct se_device *dev = cmd->se_dev; + + spin_lock_irqsave(&dev->execute_task_lock, flags); + __transport_add_tasks_from_cmd(cmd); spin_unlock_irqrestore(&dev->execute_task_lock, flags); } @@ -905,7 +885,7 @@ void __transport_remove_task_from_execute_queue(struct se_task *task, atomic_dec(&dev->execute_tasks); } -void transport_remove_task_from_execute_queue( +static void transport_remove_task_from_execute_queue( struct se_task *task, struct se_device *dev) { @@ -992,9 +972,8 @@ void transport_dump_dev_state( break; } - *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d", - atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), - dev->queue_depth); + *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d", + atomic_read(&dev->execute_tasks), dev->queue_depth); *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); *bl += sprintf(b + *bl, " "); @@ -1335,29 +1314,20 @@ struct se_device *transport_add_device_to_core_hba( dev->se_hba = hba; dev->se_sub_dev = se_dev; dev->transport = transport; - atomic_set(&dev->active_cmds, 0); INIT_LIST_HEAD(&dev->dev_list); INIT_LIST_HEAD(&dev->dev_sep_list); INIT_LIST_HEAD(&dev->dev_tmr_list); INIT_LIST_HEAD(&dev->execute_task_list); INIT_LIST_HEAD(&dev->delayed_cmd_list); - INIT_LIST_HEAD(&dev->ordered_cmd_list); INIT_LIST_HEAD(&dev->state_task_list); INIT_LIST_HEAD(&dev->qf_cmd_list); spin_lock_init(&dev->execute_task_lock); spin_lock_init(&dev->delayed_cmd_lock); - spin_lock_init(&dev->ordered_cmd_lock); - spin_lock_init(&dev->state_task_lock); - spin_lock_init(&dev->dev_alua_lock); spin_lock_init(&dev->dev_reservation_lock); spin_lock_init(&dev->dev_status_lock); - spin_lock_init(&dev->dev_status_thr_lock); spin_lock_init(&dev->se_port_lock); spin_lock_init(&dev->se_tmr_lock); spin_lock_init(&dev->qf_cmd_lock); - - dev->queue_depth = dev_limits->queue_depth; - atomic_set(&dev->depth_left, dev->queue_depth); atomic_set(&dev->dev_ordered_id, 0); se_dev_set_default_attribs(dev, dev_limits); @@ -1507,7 +1477,6 @@ void transport_init_se_cmd( { INIT_LIST_HEAD(&cmd->se_lun_node); INIT_LIST_HEAD(&cmd->se_delayed_node); - INIT_LIST_HEAD(&cmd->se_ordered_node); INIT_LIST_HEAD(&cmd->se_qf_node); INIT_LIST_HEAD(&cmd->se_queue_node); INIT_LIST_HEAD(&cmd->se_cmd_list); @@ -1573,6 +1542,8 @@ int transport_generic_allocate_tasks( pr_err("Received SCSI CDB with command_size: %d that" " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; return -EINVAL; } /* @@ -1588,6 +1559,9 @@ int transport_generic_allocate_tasks( " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", scsi_command_size(cdb), (unsigned long)sizeof(cmd->__t_task_cdb)); + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return -ENOMEM; } } else @@ -1658,15 +1632,87 @@ int transport_handle_cdb_direct( * and call transport_generic_request_failure() if necessary.. */ ret = transport_generic_new_cmd(cmd); - if (ret < 0) { - cmd->transport_error_status = ret; - transport_generic_request_failure(cmd, 0, - (cmd->data_direction != DMA_TO_DEVICE)); - } + if (ret < 0) + transport_generic_request_failure(cmd); + return 0; } EXPORT_SYMBOL(transport_handle_cdb_direct); +/** + * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd + * + * @se_cmd: command descriptor to submit + * @se_sess: associated se_sess for endpoint + * @cdb: pointer to SCSI CDB + * @sense: pointer to SCSI sense buffer + * @unpacked_lun: unpacked LUN to reference for struct se_lun + * @data_length: fabric expected data transfer length + * @task_addr: SAM task attribute + * @data_dir: DMA data direction + * @flags: flags for command submission from target_sc_flags_tables + * + * This may only be called from process context, and also currently + * assumes internal allocation of fabric payload buffer by target-core. + **/ +int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, + unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, + u32 data_length, int task_attr, int data_dir, int flags) +{ + struct se_portal_group *se_tpg; + int rc; + + se_tpg = se_sess->se_tpg; + BUG_ON(!se_tpg); + BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); + BUG_ON(in_interrupt()); + /* + * Initialize se_cmd for target operation. From this point + * exceptions are handled by sending exception status via + * target_core_fabric_ops->queue_status() callback + */ + transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, + data_length, data_dir, task_attr, sense); + /* + * Obtain struct se_cmd->cmd_kref reference and add new cmd to + * se_sess->sess_cmd_list. A second kref_get here is necessary + * for fabrics using TARGET_SCF_ACK_KREF that expect a second + * kref_put() to happen during fabric packet acknowledgement. + */ + target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); + /* + * Signal bidirectional data payloads to target-core + */ + if (flags & TARGET_SCF_BIDI_OP) + se_cmd->se_cmd_flags |= SCF_BIDI; + /* + * Locate se_lun pointer and attach it to struct se_cmd + */ + if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) + goto out_check_cond; + /* + * Sanitize CDBs via transport_generic_cmd_sequencer() and + * allocate the necessary tasks to complete the received CDB+data + */ + rc = transport_generic_allocate_tasks(se_cmd, cdb); + if (rc != 0) + goto out_check_cond; + /* + * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend + * for immediate execution of READs, otherwise wait for + * transport_generic_handle_data() to be called for WRITEs + * when fabric has filled the incoming buffer. + */ + transport_handle_cdb_direct(se_cmd); + return 0; + +out_check_cond: + transport_send_check_condition_and_sense(se_cmd, + se_cmd->scsi_sense_reason, 0); + return 0; +} +EXPORT_SYMBOL(target_submit_cmd); + /* * Used by fabric module frontends defining a TFO->new_cmd_map() caller * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to @@ -1798,20 +1844,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) /* * Handle SAM-esque emulation for generic transport request failures. */ -static void transport_generic_request_failure( - struct se_cmd *cmd, - int complete, - int sc) +static void transport_generic_request_failure(struct se_cmd *cmd) { int ret = 0; pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), cmd->t_task_cdb[0]); - pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n", + pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n", cmd->se_tfo->get_cmd_state(cmd), - cmd->t_state, - cmd->transport_error_status); + cmd->t_state, cmd->scsi_sense_reason); pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" " t_transport_active: %d t_transport_stop: %d" @@ -1829,46 +1871,19 @@ static void transport_generic_request_failure( if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) transport_complete_task_attr(cmd); - if (complete) { - cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; - } - - switch (cmd->transport_error_status) { - case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE: - cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; - break; - case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS: - cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; - break; - case PYX_TRANSPORT_INVALID_CDB_FIELD: - cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; - break; - case PYX_TRANSPORT_INVALID_PARAMETER_LIST: - cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; - break; - case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES: - if (!sc) - transport_new_cmd_failure(cmd); - /* - * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES, - * we force this session to fall back to session - * recovery. - */ - cmd->se_tfo->fall_back_to_erl0(cmd->se_sess); - cmd->se_tfo->stop_session(cmd->se_sess, 0, 0); - - goto check_stop; - case PYX_TRANSPORT_LU_COMM_FAILURE: - case PYX_TRANSPORT_ILLEGAL_REQUEST: - cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - break; - case PYX_TRANSPORT_UNKNOWN_MODE_PAGE: - cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE; - break; - case PYX_TRANSPORT_WRITE_PROTECTED: - cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; + switch (cmd->scsi_sense_reason) { + case TCM_NON_EXISTENT_LUN: + case TCM_UNSUPPORTED_SCSI_OPCODE: + case TCM_INVALID_CDB_FIELD: + case TCM_INVALID_PARAMETER_LIST: + case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: + case TCM_UNKNOWN_MODE_PAGE: + case TCM_WRITE_PROTECTED: + case TCM_CHECK_CONDITION_ABORT_CMD: + case TCM_CHECK_CONDITION_UNIT_ATTENTION: + case TCM_CHECK_CONDITION_NOT_READY: break; - case PYX_TRANSPORT_RESERVATION_CONFLICT: + case TCM_RESERVATION_CONFLICT: /* * No SENSE Data payload for this case, set SCSI Status * and queue the response to $FABRIC_MOD. @@ -1893,15 +1908,9 @@ static void transport_generic_request_failure( if (ret == -EAGAIN || ret == -ENOMEM) goto queue_full; goto check_stop; - case PYX_TRANSPORT_USE_SENSE_REASON: - /* - * struct se_cmd->scsi_sense_reason already set - */ - break; default: pr_err("Unknown transport error for CDB 0x%02x: %d\n", - cmd->t_task_cdb[0], - cmd->transport_error_status); + cmd->t_task_cdb[0], cmd->scsi_sense_reason); cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; break; } @@ -1912,14 +1921,10 @@ static void transport_generic_request_failure( * transport_send_check_condition_and_sense() after handling * possible unsoliticied write data payloads. */ - if (!sc && !cmd->se_tfo->new_cmd_map) - transport_new_cmd_failure(cmd); - else { - ret = transport_send_check_condition_and_sense(cmd, - cmd->scsi_sense_reason, 0); - if (ret == -EAGAIN || ret == -ENOMEM) - goto queue_full; - } + ret = transport_send_check_condition_and_sense(cmd, + cmd->scsi_sense_reason, 0); + if (ret == -EAGAIN || ret == -ENOMEM) + goto queue_full; check_stop: transport_lun_remove_cmd(cmd); @@ -1974,18 +1979,6 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); } -static inline int transport_tcq_window_closed(struct se_device *dev) -{ - if (dev->dev_tcq_window_closed++ < - PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) { - msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT); - } else - msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG); - - wake_up_interruptible(&dev->dev_queue_obj.thread_wq); - return 0; -} - /* * Called from Fabric Module context from transport_execute_tasks() * @@ -2002,19 +1995,12 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) * to allow the passed struct se_cmd list of tasks to the front of the list. */ if (cmd->sam_task_attr == MSG_HEAD_TAG) { - atomic_inc(&cmd->se_dev->dev_hoq_count); - smp_mb__after_atomic_inc(); pr_debug("Added HEAD_OF_QUEUE for CDB:" " 0x%02x, se_ordered_id: %u\n", cmd->t_task_cdb[0], cmd->se_ordered_id); return 1; } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { - spin_lock(&cmd->se_dev->ordered_cmd_lock); - list_add_tail(&cmd->se_ordered_node, - &cmd->se_dev->ordered_cmd_list); - spin_unlock(&cmd->se_dev->ordered_cmd_lock); - atomic_inc(&cmd->se_dev->dev_ordered_sync); smp_mb__after_atomic_inc(); @@ -2075,13 +2061,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) static int transport_execute_tasks(struct se_cmd *cmd) { int add_tasks; - - if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { - cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; - transport_generic_request_failure(cmd, 0, 1); - return 0; - } - + struct se_device *se_dev = cmd->se_dev; /* * Call transport_cmd_check_stop() to see if a fabric exception * has occurred that prevents execution. @@ -2095,19 +2075,16 @@ static int transport_execute_tasks(struct se_cmd *cmd) if (!add_tasks) goto execute_tasks; /* - * This calls transport_add_tasks_from_cmd() to handle - * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation - * (if enabled) in __transport_add_task_to_execute_queue() and - * transport_add_task_check_sam_attr(). + * __transport_execute_tasks() -> __transport_add_tasks_from_cmd() + * adds associated se_tasks while holding dev->execute_task_lock + * before I/O dispath to avoid a double spinlock access. */ - transport_add_tasks_from_cmd(cmd); + __transport_execute_tasks(se_dev, cmd); + return 0; } - /* - * Kick the execution queue for the cmd associated struct se_device - * storage object. - */ + execute_tasks: - __transport_execute_tasks(cmd->se_dev); + __transport_execute_tasks(se_dev, NULL); return 0; } @@ -2117,24 +2094,18 @@ execute_tasks: * * Called from transport_processing_thread() */ -static int __transport_execute_tasks(struct se_device *dev) +static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd) { int error; struct se_cmd *cmd = NULL; struct se_task *task = NULL; unsigned long flags; - /* - * Check if there is enough room in the device and HBA queue to send - * struct se_tasks to the selected transport. - */ check_depth: - if (!atomic_read(&dev->depth_left)) - return transport_tcq_window_closed(dev); - - dev->dev_tcq_window_closed = 0; - spin_lock_irq(&dev->execute_task_lock); + if (new_cmd != NULL) + __transport_add_tasks_from_cmd(new_cmd); + if (list_empty(&dev->execute_task_list)) { spin_unlock_irq(&dev->execute_task_lock); return 0; @@ -2144,10 +2115,7 @@ check_depth: __transport_remove_task_from_execute_queue(task, dev); spin_unlock_irq(&dev->execute_task_lock); - atomic_dec(&dev->depth_left); - cmd = task->task_se_cmd; - spin_lock_irqsave(&cmd->t_state_lock, flags); task->task_flags |= (TF_ACTIVE | TF_SENT); atomic_inc(&cmd->t_task_cdbs_sent); @@ -2163,34 +2131,20 @@ check_depth: else error = dev->transport->do_task(task); if (error != 0) { - cmd->transport_error_status = error; spin_lock_irqsave(&cmd->t_state_lock, flags); task->task_flags &= ~TF_ACTIVE; spin_unlock_irqrestore(&cmd->t_state_lock, flags); atomic_set(&cmd->t_transport_sent, 0); transport_stop_tasks_for_cmd(cmd); - atomic_inc(&dev->depth_left); - transport_generic_request_failure(cmd, 0, 1); + transport_generic_request_failure(cmd); } + new_cmd = NULL; goto check_depth; return 0; } -void transport_new_cmd_failure(struct se_cmd *se_cmd) -{ - unsigned long flags; - /* - * Any unsolicited data will get dumped for failed command inside of - * the fabric plugin - */ - spin_lock_irqsave(&se_cmd->t_state_lock, flags); - se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; - se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); -} - static inline u32 transport_get_sectors_6( unsigned char *cdb, struct se_cmd *cmd, @@ -2213,10 +2167,15 @@ static inline u32 transport_get_sectors_6( /* * Everything else assume TYPE_DISK Sector CDB location. - * Use 8-bit sector value. + * Use 8-bit sector value. SBC-3 says: + * + * A TRANSFER LENGTH field set to zero specifies that 256 + * logical blocks shall be written. Any other value + * specifies the number of logical blocks that shall be + * written. */ type_disk: - return (u32)cdb[4]; + return cdb[4] ? : 256; } static inline u32 transport_get_sectors_10( @@ -2421,7 +2380,7 @@ static int transport_get_sense_data(struct se_cmd *cmd) list_for_each_entry_safe(task, task_tmp, &cmd->t_task_list, t_list) { - if (!task->task_sense) + if (!(task->task_flags & TF_HAS_SENSE)) continue; if (!dev->transport->get_sense_buffer) { @@ -2460,27 +2419,6 @@ static int transport_get_sense_data(struct se_cmd *cmd) return -1; } -static int -transport_handle_reservation_conflict(struct se_cmd *cmd) -{ - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; - cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; - /* - * For UA Interlock Code 11b, a RESERVATION CONFLICT will - * establish a UNIT ATTENTION with PREVIOUS RESERVATION - * CONFLICT STATUS. - * - * See spc4r17, section 7.4.6 Control Mode Page, Table 349 - */ - if (cmd->se_sess && - cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) - core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, - cmd->orig_fe_lun, 0x2C, - ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); - return -EINVAL; -} - static inline long long transport_dev_end_lba(struct se_device *dev) { return dev->transport->get_blocks(dev) + 1; @@ -2595,8 +2533,12 @@ static int transport_generic_cmd_sequencer( */ if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) { if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( - cmd, cdb, pr_reg_type) != 0) - return transport_handle_reservation_conflict(cmd); + cmd, cdb, pr_reg_type) != 0) { + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EBUSY; + } /* * This means the CDB is allowed for the SCSI Initiator port * when said port is *NOT* holding the legacy SPC-2 or @@ -2658,7 +2600,8 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->t_task_lba = transport_lba_32(cdb); - cmd->t_tasks_fua = (cdb[1] & 0x8); + if (cdb[1] & 0x8) + cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_12: @@ -2667,7 +2610,8 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->t_task_lba = transport_lba_32(cdb); - cmd->t_tasks_fua = (cdb[1] & 0x8); + if (cdb[1] & 0x8) + cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_16: @@ -2676,12 +2620,13 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->t_task_lba = transport_lba_64(cdb); - cmd->t_tasks_fua = (cdb[1] & 0x8); + if (cdb[1] & 0x8) + cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case XDWRITEREAD_10: if ((cmd->data_direction != DMA_TO_DEVICE) || - !(cmd->t_tasks_bidi)) + !(cmd->se_cmd_flags & SCF_BIDI)) goto out_invalid_cdb_field; sectors = transport_get_sectors_10(cdb, cmd, §or_ret); if (sector_ret) @@ -2700,7 +2645,8 @@ static int transport_generic_cmd_sequencer( * Setup BIDI XOR callback to be run after I/O completion. */ cmd->transport_complete_callback = &transport_xor_callback; - cmd->t_tasks_fua = (cdb[1] & 0x8); + if (cdb[1] & 0x8) + cmd->se_cmd_flags |= SCF_FUA; break; case VARIABLE_LENGTH_CMD: service_action = get_unaligned_be16(&cdb[8]); @@ -2728,7 +2674,8 @@ static int transport_generic_cmd_sequencer( * completion. */ cmd->transport_complete_callback = &transport_xor_callback; - cmd->t_tasks_fua = (cdb[10] & 0x8); + if (cdb[1] & 0x8) + cmd->se_cmd_flags |= SCF_FUA; break; case WRITE_SAME_32: sectors = transport_get_sectors_32(cdb, cmd, §or_ret); @@ -3171,18 +3118,13 @@ static void transport_complete_task_attr(struct se_cmd *cmd) " SIMPLE: %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { - atomic_dec(&dev->dev_hoq_count); - smp_mb__after_atomic_dec(); dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for" " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { - spin_lock(&dev->ordered_cmd_lock); - list_del(&cmd->se_ordered_node); atomic_dec(&dev->dev_ordered_sync); smp_mb__after_atomic_dec(); - spin_unlock(&dev->ordered_cmd_lock); dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" @@ -3433,6 +3375,32 @@ static inline void transport_free_pages(struct se_cmd *cmd) } /** + * transport_release_cmd - free a command + * @cmd: command to free + * + * This routine unconditionally frees a command, and reference counting + * or list removal must be done in the caller. + */ +static void transport_release_cmd(struct se_cmd *cmd) +{ + BUG_ON(!cmd->se_tfo); + + if (cmd->se_tmr_req) + core_tmr_release_req(cmd->se_tmr_req); + if (cmd->t_task_cdb != cmd->__t_task_cdb) + kfree(cmd->t_task_cdb); + /* + * If this cmd has been setup with target_get_sess_cmd(), drop + * the kref and call ->release_cmd() in kref callback. + */ + if (cmd->check_release != 0) { + target_put_sess_cmd(cmd->se_sess, cmd); + return; + } + cmd->se_tfo->release_cmd(cmd); +} + +/** * transport_put_cmd - release a reference to a command * @cmd: command to release * @@ -3495,6 +3463,18 @@ int transport_generic_map_mem_to_cmd( if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { + /* + * Reject SCSI data overflow with map_mem_to_cmd() as incoming + * scatterlists already have been set to follow what the fabric + * passes for the original expected data transfer length. + */ + if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { + pr_warn("Rejecting SCSI DATA overflow for fabric using" + " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -EINVAL; + } cmd->t_data_sg = sgl; cmd->t_data_nents = sgl_count; @@ -3813,7 +3793,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd) cmd->data_length) { ret = transport_generic_get_mem(cmd); if (ret < 0) - return ret; + goto out_fail; } /* @@ -3842,8 +3822,15 @@ int transport_generic_new_cmd(struct se_cmd *cmd) task_cdbs = transport_allocate_control_task(cmd); } - if (task_cdbs <= 0) + if (task_cdbs < 0) goto out_fail; + else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { + cmd->t_state = TRANSPORT_COMPLETE; + atomic_set(&cmd->t_transport_active, 1); + INIT_WORK(&cmd->work, target_complete_ok_work); + queue_work(target_completion_wq, &cmd->work); + return 0; + } if (set_counts) { atomic_inc(&cmd->t_fe_count); @@ -3929,7 +3916,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd) else if (ret < 0) return ret; - return PYX_TRANSPORT_WRITE_PENDING; + return 1; queue_full: pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); @@ -3938,33 +3925,6 @@ queue_full: return 0; } -/** - * transport_release_cmd - free a command - * @cmd: command to free - * - * This routine unconditionally frees a command, and reference counting - * or list removal must be done in the caller. - */ -void transport_release_cmd(struct se_cmd *cmd) -{ - BUG_ON(!cmd->se_tfo); - - if (cmd->se_tmr_req) - core_tmr_release_req(cmd->se_tmr_req); - if (cmd->t_task_cdb != cmd->__t_task_cdb) - kfree(cmd->t_task_cdb); - /* - * Check if target_wait_for_sess_cmds() is expecting to - * release se_cmd directly here.. - */ - if (cmd->check_release != 0 && cmd->se_tfo->check_release_cmd) - if (cmd->se_tfo->check_release_cmd(cmd) != 0) - return; - - cmd->se_tfo->release_cmd(cmd); -} -EXPORT_SYMBOL(transport_release_cmd); - void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) { if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { @@ -3991,11 +3951,22 @@ EXPORT_SYMBOL(transport_generic_free_cmd); /* target_get_sess_cmd - Add command to active ->sess_cmd_list * @se_sess: session to reference * @se_cmd: command descriptor to add + * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() */ -void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) +void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, + bool ack_kref) { unsigned long flags; + kref_init(&se_cmd->cmd_kref); + /* + * Add a second kref if the fabric caller is expecting to handle + * fabric acknowledgement that requires two target_put_sess_cmd() + * invocations before se_cmd descriptor release. + */ + if (ack_kref == true) + kref_get(&se_cmd->cmd_kref); + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); se_cmd->check_release = 1; @@ -4003,30 +3974,36 @@ void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) } EXPORT_SYMBOL(target_get_sess_cmd); -/* target_put_sess_cmd - Check for active I/O shutdown or list delete - * @se_sess: session to reference - * @se_cmd: command descriptor to drop - */ -int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) +static void target_release_cmd_kref(struct kref *kref) { + struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); + struct se_session *se_sess = se_cmd->se_sess; unsigned long flags; spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); if (list_empty(&se_cmd->se_cmd_list)) { spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); WARN_ON(1); - return 0; + return; } - if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); complete(&se_cmd->cmd_wait_comp); - return 1; + return; } list_del(&se_cmd->se_cmd_list); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - return 0; + se_cmd->se_tfo->release_cmd(se_cmd); +} + +/* target_put_sess_cmd - Check for active I/O shutdown via kref_put + * @se_sess: session to reference + * @se_cmd: command descriptor to drop + */ +int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) +{ + return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); } EXPORT_SYMBOL(target_put_sess_cmd); @@ -4242,7 +4219,7 @@ check_cond: static int transport_clear_lun_thread(void *p) { - struct se_lun *lun = (struct se_lun *)p; + struct se_lun *lun = p; __transport_clear_lun_from_sessions(lun); complete(&lun->lun_shutdown_comp); @@ -4421,6 +4398,7 @@ int transport_send_check_condition_and_sense( case TCM_NON_EXISTENT_LUN: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ILLEGAL REQUEST */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; /* LOGICAL UNIT NOT SUPPORTED */ @@ -4430,6 +4408,7 @@ int transport_send_check_condition_and_sense( case TCM_SECTOR_COUNT_TOO_MANY: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ILLEGAL REQUEST */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; /* INVALID COMMAND OPERATION CODE */ @@ -4438,6 +4417,7 @@ int transport_send_check_condition_and_sense( case TCM_UNKNOWN_MODE_PAGE: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ILLEGAL REQUEST */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; /* INVALID FIELD IN CDB */ @@ -4446,6 +4426,7 @@ int transport_send_check_condition_and_sense( case TCM_CHECK_CONDITION_ABORT_CMD: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ABORTED COMMAND */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; /* BUS DEVICE RESET FUNCTION OCCURRED */ @@ -4455,6 +4436,7 @@ int transport_send_check_condition_and_sense( case TCM_INCORRECT_AMOUNT_OF_DATA: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ABORTED COMMAND */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; /* WRITE ERROR */ @@ -4465,6 +4447,7 @@ int transport_send_check_condition_and_sense( case TCM_INVALID_CDB_FIELD: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ABORTED COMMAND */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; /* INVALID FIELD IN CDB */ @@ -4473,6 +4456,7 @@ int transport_send_check_condition_and_sense( case TCM_INVALID_PARAMETER_LIST: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ABORTED COMMAND */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; /* INVALID FIELD IN PARAMETER LIST */ @@ -4481,6 +4465,7 @@ int transport_send_check_condition_and_sense( case TCM_UNEXPECTED_UNSOLICITED_DATA: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ABORTED COMMAND */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; /* WRITE ERROR */ @@ -4491,6 +4476,7 @@ int transport_send_check_condition_and_sense( case TCM_SERVICE_CRC_ERROR: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ABORTED COMMAND */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; /* PROTOCOL SERVICE CRC ERROR */ @@ -4501,6 +4487,7 @@ int transport_send_check_condition_and_sense( case TCM_SNACK_REJECTED: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ABORTED COMMAND */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; /* READ ERROR */ @@ -4511,6 +4498,7 @@ int transport_send_check_condition_and_sense( case TCM_WRITE_PROTECTED: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* DATA PROTECT */ buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; /* WRITE PROTECTED */ @@ -4519,6 +4507,7 @@ int transport_send_check_condition_and_sense( case TCM_CHECK_CONDITION_UNIT_ATTENTION: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* UNIT ATTENTION */ buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); @@ -4528,6 +4517,7 @@ int transport_send_check_condition_and_sense( case TCM_CHECK_CONDITION_NOT_READY: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* Not Ready */ buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY; transport_get_sense_codes(cmd, &asc, &ascq); @@ -4538,6 +4528,7 @@ int transport_send_check_condition_and_sense( default: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ILLEGAL REQUEST */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; /* LOGICAL UNIT COMMUNICATION FAILURE */ @@ -4602,9 +4593,6 @@ void transport_send_task_abort(struct se_cmd *cmd) if (cmd->se_tfo->write_pending_status(cmd) != 0) { atomic_inc(&cmd->t_transport_aborted); smp_mb__after_atomic_inc(); - cmd->scsi_status = SAM_STAT_TASK_ABORTED; - transport_new_cmd_failure(cmd); - return; } } cmd->scsi_status = SAM_STAT_TASK_ABORTED; @@ -4616,11 +4604,7 @@ void transport_send_task_abort(struct se_cmd *cmd) cmd->se_tfo->queue_status(cmd); } -/* transport_generic_do_tmr(): - * - * - */ -int transport_generic_do_tmr(struct se_cmd *cmd) +static int transport_generic_do_tmr(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; struct se_tmr_req *tmr = cmd->se_tmr_req; @@ -4668,9 +4652,7 @@ static int transport_processing_thread(void *param) { int ret; struct se_cmd *cmd; - struct se_device *dev = (struct se_device *) param; - - set_user_nice(current, -20); + struct se_device *dev = param; while (!kthread_should_stop()) { ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, @@ -4680,8 +4662,6 @@ static int transport_processing_thread(void *param) goto out; get_cmd: - __transport_execute_tasks(dev); - cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj); if (!cmd) continue; @@ -4698,18 +4678,13 @@ get_cmd: } ret = cmd->se_tfo->new_cmd_map(cmd); if (ret < 0) { - cmd->transport_error_status = ret; - transport_generic_request_failure(cmd, - 0, (cmd->data_direction != - DMA_TO_DEVICE)); + transport_generic_request_failure(cmd); break; } ret = transport_generic_new_cmd(cmd); if (ret < 0) { - cmd->transport_error_status = ret; - transport_generic_request_failure(cmd, - 0, (cmd->data_direction != - DMA_TO_DEVICE)); + transport_generic_request_failure(cmd); + break; } break; case TRANSPORT_PROCESS_WRITE: |