diff options
Diffstat (limited to 'drivers/target/target_core_transport.c')
-rw-r--r-- | drivers/target/target_core_transport.c | 3626 |
1 files changed, 1324 insertions, 2302 deletions
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 4b9b7169bdd9..46352d658e35 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -58,139 +58,12 @@ #include "target_core_scdb.h" #include "target_core_ua.h" -/* #define DEBUG_CDB_HANDLER */ -#ifdef DEBUG_CDB_HANDLER -#define DEBUG_CDB_H(x...) printk(KERN_INFO x) -#else -#define DEBUG_CDB_H(x...) -#endif - -/* #define DEBUG_CMD_MAP */ -#ifdef DEBUG_CMD_MAP -#define DEBUG_CMD_M(x...) printk(KERN_INFO x) -#else -#define DEBUG_CMD_M(x...) -#endif - -/* #define DEBUG_MEM_ALLOC */ -#ifdef DEBUG_MEM_ALLOC -#define DEBUG_MEM(x...) printk(KERN_INFO x) -#else -#define DEBUG_MEM(x...) -#endif - -/* #define DEBUG_MEM2_ALLOC */ -#ifdef DEBUG_MEM2_ALLOC -#define DEBUG_MEM2(x...) printk(KERN_INFO x) -#else -#define DEBUG_MEM2(x...) -#endif - -/* #define DEBUG_SG_CALC */ -#ifdef DEBUG_SG_CALC -#define DEBUG_SC(x...) printk(KERN_INFO x) -#else -#define DEBUG_SC(x...) -#endif - -/* #define DEBUG_SE_OBJ */ -#ifdef DEBUG_SE_OBJ -#define DEBUG_SO(x...) printk(KERN_INFO x) -#else -#define DEBUG_SO(x...) -#endif - -/* #define DEBUG_CMD_VOL */ -#ifdef DEBUG_CMD_VOL -#define DEBUG_VOL(x...) printk(KERN_INFO x) -#else -#define DEBUG_VOL(x...) -#endif - -/* #define DEBUG_CMD_STOP */ -#ifdef DEBUG_CMD_STOP -#define DEBUG_CS(x...) printk(KERN_INFO x) -#else -#define DEBUG_CS(x...) -#endif - -/* #define DEBUG_PASSTHROUGH */ -#ifdef DEBUG_PASSTHROUGH -#define DEBUG_PT(x...) printk(KERN_INFO x) -#else -#define DEBUG_PT(x...) -#endif - -/* #define DEBUG_TASK_STOP */ -#ifdef DEBUG_TASK_STOP -#define DEBUG_TS(x...) printk(KERN_INFO x) -#else -#define DEBUG_TS(x...) -#endif - -/* #define DEBUG_TRANSPORT_STOP */ -#ifdef DEBUG_TRANSPORT_STOP -#define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x) -#else -#define DEBUG_TRANSPORT_S(x...) -#endif - -/* #define DEBUG_TASK_FAILURE */ -#ifdef DEBUG_TASK_FAILURE -#define DEBUG_TF(x...) printk(KERN_INFO x) -#else -#define DEBUG_TF(x...) -#endif - -/* #define DEBUG_DEV_OFFLINE */ -#ifdef DEBUG_DEV_OFFLINE -#define DEBUG_DO(x...) printk(KERN_INFO x) -#else -#define DEBUG_DO(x...) -#endif - -/* #define DEBUG_TASK_STATE */ -#ifdef DEBUG_TASK_STATE -#define DEBUG_TSTATE(x...) printk(KERN_INFO x) -#else -#define DEBUG_TSTATE(x...) -#endif - -/* #define DEBUG_STATUS_THR */ -#ifdef DEBUG_STATUS_THR -#define DEBUG_ST(x...) printk(KERN_INFO x) -#else -#define DEBUG_ST(x...) -#endif - -/* #define DEBUG_TASK_TIMEOUT */ -#ifdef DEBUG_TASK_TIMEOUT -#define DEBUG_TT(x...) printk(KERN_INFO x) -#else -#define DEBUG_TT(x...) -#endif - -/* #define DEBUG_GENERIC_REQUEST_FAILURE */ -#ifdef DEBUG_GENERIC_REQUEST_FAILURE -#define DEBUG_GRF(x...) printk(KERN_INFO x) -#else -#define DEBUG_GRF(x...) -#endif - -/* #define DEBUG_SAM_TASK_ATTRS */ -#ifdef DEBUG_SAM_TASK_ATTRS -#define DEBUG_STA(x...) printk(KERN_INFO x) -#else -#define DEBUG_STA(x...) -#endif - -struct se_global *se_global; +static int sub_api_initialized; static struct kmem_cache *se_cmd_cache; static struct kmem_cache *se_sess_cache; struct kmem_cache *se_tmr_req_cache; struct kmem_cache *se_ua_cache; -struct kmem_cache *se_mem_cache; struct kmem_cache *t10_pr_reg_cache; struct kmem_cache *t10_alua_lu_gp_cache; struct kmem_cache *t10_alua_lu_gp_mem_cache; @@ -201,116 +74,87 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; typedef int (*map_func_t)(struct se_task *, u32); static int transport_generic_write_pending(struct se_cmd *); -static int transport_processing_thread(void *); +static int transport_processing_thread(void *param); static int __transport_execute_tasks(struct se_device *dev); static void transport_complete_task_attr(struct se_cmd *cmd); +static int transport_complete_qf(struct se_cmd *cmd); +static void transport_handle_queue_full(struct se_cmd *cmd, + struct se_device *dev, int (*qf_callback)(struct se_cmd *)); static void transport_direct_request_timeout(struct se_cmd *cmd); static void transport_free_dev_tasks(struct se_cmd *cmd); -static u32 transport_generic_get_cdb_count(struct se_cmd *cmd, - unsigned long long starting_lba, u32 sectors, +static u32 transport_allocate_tasks(struct se_cmd *cmd, + unsigned long long starting_lba, enum dma_data_direction data_direction, - struct list_head *mem_list, int set_counts); -static int transport_generic_get_mem(struct se_cmd *cmd, u32 length, - u32 dma_size); + struct scatterlist *sgl, unsigned int nents); +static int transport_generic_get_mem(struct se_cmd *cmd); static int transport_generic_remove(struct se_cmd *cmd, - int release_to_pool, int session_reinstatement); -static int transport_get_sectors(struct se_cmd *cmd); -static struct list_head *transport_init_se_mem_list(void); -static int transport_map_sg_to_mem(struct se_cmd *cmd, - struct list_head *se_mem_list, void *in_mem, - u32 *se_mem_cnt); -static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd, - unsigned char *dst, struct list_head *se_mem_list); + int session_reinstatement); static void transport_release_fe_cmd(struct se_cmd *cmd); static void transport_remove_cmd_from_queue(struct se_cmd *cmd, struct se_queue_obj *qobj); static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); static void transport_stop_all_task_timers(struct se_cmd *cmd); -int init_se_global(void) +int init_se_kmem_caches(void) { - struct se_global *global; - - global = kzalloc(sizeof(struct se_global), GFP_KERNEL); - if (!(global)) { - printk(KERN_ERR "Unable to allocate memory for struct se_global\n"); - return -1; - } - - INIT_LIST_HEAD(&global->g_lu_gps_list); - INIT_LIST_HEAD(&global->g_se_tpg_list); - INIT_LIST_HEAD(&global->g_hba_list); - INIT_LIST_HEAD(&global->g_se_dev_list); - spin_lock_init(&global->g_device_lock); - spin_lock_init(&global->hba_lock); - spin_lock_init(&global->se_tpg_lock); - spin_lock_init(&global->lu_gps_lock); - spin_lock_init(&global->plugin_class_lock); - se_cmd_cache = kmem_cache_create("se_cmd_cache", sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); - if (!(se_cmd_cache)) { - printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n"); + if (!se_cmd_cache) { + pr_err("kmem_cache_create for struct se_cmd failed\n"); goto out; } se_tmr_req_cache = kmem_cache_create("se_tmr_cache", sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), 0, NULL); - if (!(se_tmr_req_cache)) { - printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req" + if (!se_tmr_req_cache) { + pr_err("kmem_cache_create() for struct se_tmr_req" " failed\n"); goto out; } se_sess_cache = kmem_cache_create("se_sess_cache", sizeof(struct se_session), __alignof__(struct se_session), 0, NULL); - if (!(se_sess_cache)) { - printk(KERN_ERR "kmem_cache_create() for struct se_session" + if (!se_sess_cache) { + pr_err("kmem_cache_create() for struct se_session" " failed\n"); goto out; } se_ua_cache = kmem_cache_create("se_ua_cache", sizeof(struct se_ua), __alignof__(struct se_ua), 0, NULL); - if (!(se_ua_cache)) { - printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n"); - goto out; - } - se_mem_cache = kmem_cache_create("se_mem_cache", - sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL); - if (!(se_mem_cache)) { - printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n"); + if (!se_ua_cache) { + pr_err("kmem_cache_create() for struct se_ua failed\n"); goto out; } t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", sizeof(struct t10_pr_registration), __alignof__(struct t10_pr_registration), 0, NULL); - if (!(t10_pr_reg_cache)) { - printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration" + if (!t10_pr_reg_cache) { + pr_err("kmem_cache_create() for struct t10_pr_registration" " failed\n"); goto out; } t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 0, NULL); - if (!(t10_alua_lu_gp_cache)) { - printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache" + if (!t10_alua_lu_gp_cache) { + pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" " failed\n"); goto out; } t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", sizeof(struct t10_alua_lu_gp_member), __alignof__(struct t10_alua_lu_gp_member), 0, NULL); - if (!(t10_alua_lu_gp_mem_cache)) { - printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_" + if (!t10_alua_lu_gp_mem_cache) { + pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" "cache failed\n"); goto out; } t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", sizeof(struct t10_alua_tg_pt_gp), __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); - if (!(t10_alua_tg_pt_gp_cache)) { - printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" + if (!t10_alua_tg_pt_gp_cache) { + pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" "cache failed\n"); goto out; } @@ -319,14 +163,12 @@ int init_se_global(void) sizeof(struct t10_alua_tg_pt_gp_member), __alignof__(struct t10_alua_tg_pt_gp_member), 0, NULL); - if (!(t10_alua_tg_pt_gp_mem_cache)) { - printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" + if (!t10_alua_tg_pt_gp_mem_cache) { + pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" "mem_t failed\n"); goto out; } - se_global = global; - return 0; out: if (se_cmd_cache) @@ -337,8 +179,6 @@ out: kmem_cache_destroy(se_sess_cache); if (se_ua_cache) kmem_cache_destroy(se_ua_cache); - if (se_mem_cache) - kmem_cache_destroy(se_mem_cache); if (t10_pr_reg_cache) kmem_cache_destroy(t10_pr_reg_cache); if (t10_alua_lu_gp_cache) @@ -349,45 +189,25 @@ out: kmem_cache_destroy(t10_alua_tg_pt_gp_cache); if (t10_alua_tg_pt_gp_mem_cache) kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); - kfree(global); - return -1; + return -ENOMEM; } -void release_se_global(void) +void release_se_kmem_caches(void) { - struct se_global *global; - - global = se_global; - if (!(global)) - return; - kmem_cache_destroy(se_cmd_cache); kmem_cache_destroy(se_tmr_req_cache); kmem_cache_destroy(se_sess_cache); kmem_cache_destroy(se_ua_cache); - kmem_cache_destroy(se_mem_cache); kmem_cache_destroy(t10_pr_reg_cache); kmem_cache_destroy(t10_alua_lu_gp_cache); kmem_cache_destroy(t10_alua_lu_gp_mem_cache); kmem_cache_destroy(t10_alua_tg_pt_gp_cache); kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); - kfree(global); - - se_global = NULL; } -/* SCSI statistics table index */ -static struct scsi_index_table scsi_index_table; - -/* - * Initialize the index table for allocating unique row indexes to various mib - * tables. - */ -void init_scsi_index_table(void) -{ - memset(&scsi_index_table, 0, sizeof(struct scsi_index_table)); - spin_lock_init(&scsi_index_table.lock); -} +/* This code ensures unique mib indexes are handed out. */ +static DEFINE_SPINLOCK(scsi_mib_index_lock); +static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; /* * Allocate a new row index for the entry type specified @@ -396,16 +216,11 @@ u32 scsi_get_new_index(scsi_index_t type) { u32 new_index; - if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) { - printk(KERN_ERR "Invalid index type %d\n", type); - return -EINVAL; - } + BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); - spin_lock(&scsi_index_table.lock); - new_index = ++scsi_index_table.scsi_mib_index[type]; - if (new_index == 0) - new_index = ++scsi_index_table.scsi_mib_index[type]; - spin_unlock(&scsi_index_table.lock); + spin_lock(&scsi_mib_index_lock); + new_index = ++scsi_mib_index[type]; + spin_unlock(&scsi_mib_index_lock); return new_index; } @@ -425,34 +240,37 @@ static int transport_subsystem_reqmods(void) ret = request_module("target_core_iblock"); if (ret != 0) - printk(KERN_ERR "Unable to load target_core_iblock\n"); + pr_err("Unable to load target_core_iblock\n"); ret = request_module("target_core_file"); if (ret != 0) - printk(KERN_ERR "Unable to load target_core_file\n"); + pr_err("Unable to load target_core_file\n"); ret = request_module("target_core_pscsi"); if (ret != 0) - printk(KERN_ERR "Unable to load target_core_pscsi\n"); + pr_err("Unable to load target_core_pscsi\n"); ret = request_module("target_core_stgt"); if (ret != 0) - printk(KERN_ERR "Unable to load target_core_stgt\n"); + pr_err("Unable to load target_core_stgt\n"); return 0; } int transport_subsystem_check_init(void) { - if (se_global->g_sub_api_initialized) + int ret; + + if (sub_api_initialized) return 0; /* * Request the loading of known TCM subsystem plugins.. */ - if (transport_subsystem_reqmods() < 0) - return -1; + ret = transport_subsystem_reqmods(); + if (ret < 0) + return ret; - se_global->g_sub_api_initialized = 1; + sub_api_initialized = 1; return 0; } @@ -461,8 +279,8 @@ struct se_session *transport_init_session(void) struct se_session *se_sess; se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); - if (!(se_sess)) { - printk(KERN_ERR "Unable to allocate struct se_session from" + if (!se_sess) { + pr_err("Unable to allocate struct se_session from" " se_sess_cache\n"); return ERR_PTR(-ENOMEM); } @@ -497,9 +315,9 @@ void __transport_register_session( * If the fabric module supports an ISID based TransportID, * save this value in binary from the fabric I_T Nexus now. */ - if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) { + if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { memset(&buf[0], 0, PR_REG_ISID_LEN); - TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, + se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, &buf[0], PR_REG_ISID_LEN); se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); } @@ -516,8 +334,8 @@ void __transport_register_session( } list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); - printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", - TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr); + pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", + se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); } EXPORT_SYMBOL(__transport_register_session); @@ -541,7 +359,7 @@ void transport_deregister_session_configfs(struct se_session *se_sess) * Used by struct se_node_acl's under ConfigFS to locate active struct se_session */ se_nacl = se_sess->se_node_acl; - if ((se_nacl)) { + if (se_nacl) { spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); list_del(&se_sess->sess_acl_list); /* @@ -572,7 +390,7 @@ void transport_deregister_session(struct se_session *se_sess) struct se_portal_group *se_tpg = se_sess->se_tpg; struct se_node_acl *se_nacl; - if (!(se_tpg)) { + if (!se_tpg) { transport_free_session(se_sess); return; } @@ -588,18 +406,18 @@ void transport_deregister_session(struct se_session *se_sess) * struct se_node_acl if it had been previously dynamically generated. */ se_nacl = se_sess->se_node_acl; - if ((se_nacl)) { + if (se_nacl) { spin_lock_bh(&se_tpg->acl_node_lock); if (se_nacl->dynamic_node_acl) { - if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache( - se_tpg))) { + if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( + se_tpg)) { list_del(&se_nacl->acl_list); se_tpg->num_node_acls--; spin_unlock_bh(&se_tpg->acl_node_lock); core_tpg_wait_for_nacl_pr_ref(se_nacl); core_free_device_list_for_node(se_nacl, se_tpg); - TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, + se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, se_nacl); spin_lock_bh(&se_tpg->acl_node_lock); } @@ -609,13 +427,13 @@ void transport_deregister_session(struct se_session *se_sess) transport_free_session(se_sess); - printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n", - TPG_TFO(se_tpg)->get_fabric_name()); + pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", + se_tpg->se_tpg_tfo->get_fabric_name()); } EXPORT_SYMBOL(transport_deregister_session); /* - * Called with T_TASK(cmd)->t_state_lock held. + * Called with cmd->t_state_lock held. */ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) { @@ -623,28 +441,25 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) struct se_task *task; unsigned long flags; - if (!T_TASK(cmd)) - return; - - list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { + list_for_each_entry(task, &cmd->t_task_list, t_list) { dev = task->se_dev; - if (!(dev)) + if (!dev) continue; if (atomic_read(&task->task_active)) continue; - if (!(atomic_read(&task->task_state_active))) + if (!atomic_read(&task->task_state_active)) continue; spin_lock_irqsave(&dev->execute_task_lock, flags); list_del(&task->t_state_list); - DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n", - CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task); + pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n", + cmd->se_tfo->get_task_tag(cmd), dev, task); spin_unlock_irqrestore(&dev->execute_task_lock, flags); atomic_set(&task->task_state_active, 0); - atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left); + atomic_dec(&cmd->t_task_cdbs_ex_left); } } @@ -663,34 +478,34 @@ static int transport_cmd_check_stop( { unsigned long flags; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); /* * Determine if IOCTL context caller in requesting the stopping of this * command for LUN shutdown purposes. */ - if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) { - DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)" + if (atomic_read(&cmd->transport_lun_stop)) { + pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)" " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, - CMD_TFO(cmd)->get_task_tag(cmd)); + cmd->se_tfo->get_task_tag(cmd)); cmd->deferred_t_state = cmd->t_state; cmd->t_state = TRANSPORT_DEFERRED_CMD; - atomic_set(&T_TASK(cmd)->t_transport_active, 0); + atomic_set(&cmd->t_transport_active, 0); if (transport_off == 2) transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&T_TASK(cmd)->transport_lun_stop_comp); + complete(&cmd->transport_lun_stop_comp); return 1; } /* * Determine if frontend context caller is requesting the stopping of - * this command for frontend excpections. + * this command for frontend exceptions. */ - if (atomic_read(&T_TASK(cmd)->t_transport_stop)) { - DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) ==" + if (atomic_read(&cmd->t_transport_stop)) { + pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) ==" " TRUE for ITT: 0x%08x\n", __func__, __LINE__, - CMD_TFO(cmd)->get_task_tag(cmd)); + cmd->se_tfo->get_task_tag(cmd)); cmd->deferred_t_state = cmd->t_state; cmd->t_state = TRANSPORT_DEFERRED_CMD; @@ -703,13 +518,13 @@ static int transport_cmd_check_stop( */ if (transport_off == 2) cmd->se_lun = NULL; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&T_TASK(cmd)->t_transport_stop_comp); + complete(&cmd->t_transport_stop_comp); return 1; } if (transport_off) { - atomic_set(&T_TASK(cmd)->t_transport_active, 0); + atomic_set(&cmd->t_transport_active, 0); if (transport_off == 2) { transport_all_task_dev_remove_state(cmd); /* @@ -722,20 +537,20 @@ static int transport_cmd_check_stop( * their internally allocated I/O reference now and * struct se_cmd now. */ - if (CMD_TFO(cmd)->check_stop_free != NULL) { + if (cmd->se_tfo->check_stop_free != NULL) { spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_state_lock, flags); - CMD_TFO(cmd)->check_stop_free(cmd); + cmd->se_tfo->check_stop_free(cmd); return 1; } } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 0; } else if (t_state) cmd->t_state = t_state; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 0; } @@ -747,30 +562,30 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) static void transport_lun_remove_cmd(struct se_cmd *cmd) { - struct se_lun *lun = SE_LUN(cmd); + struct se_lun *lun = cmd->se_lun; unsigned long flags; if (!lun) return; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (!atomic_read(&cmd->transport_dev_active)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); goto check_lun; } - atomic_set(&T_TASK(cmd)->transport_dev_active, 0); + atomic_set(&cmd->transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); check_lun: spin_lock_irqsave(&lun->lun_cmd_lock, flags); - if (atomic_read(&T_TASK(cmd)->transport_lun_active)) { - list_del(&cmd->se_lun_list); - atomic_set(&T_TASK(cmd)->transport_lun_active, 0); + if (atomic_read(&cmd->transport_lun_active)) { + list_del(&cmd->se_lun_node); + atomic_set(&cmd->transport_lun_active, 0); #if 0 - printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n" - CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun); + pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n" + cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun); #endif } spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); @@ -778,92 +593,59 @@ check_lun: void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) { - transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); + transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop_to_fabric(cmd)) return; if (remove) - transport_generic_remove(cmd, 0, 0); + transport_generic_remove(cmd, 0); } void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) { - transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); + transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); if (transport_cmd_check_stop_to_fabric(cmd)) return; - transport_generic_remove(cmd, 0, 0); + transport_generic_remove(cmd, 0); } -static int transport_add_cmd_to_queue( +static void transport_add_cmd_to_queue( struct se_cmd *cmd, int t_state) { struct se_device *dev = cmd->se_dev; - struct se_queue_obj *qobj = dev->dev_queue_obj; - struct se_queue_req *qr; + struct se_queue_obj *qobj = &dev->dev_queue_obj; unsigned long flags; - qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC); - if (!(qr)) { - printk(KERN_ERR "Unable to allocate memory for" - " struct se_queue_req\n"); - return -1; - } - INIT_LIST_HEAD(&qr->qr_list); - - qr->cmd = (void *)cmd; - qr->state = t_state; + INIT_LIST_HEAD(&cmd->se_queue_node); if (t_state) { - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); cmd->t_state = t_state; - atomic_set(&T_TASK(cmd)->t_transport_active, 1); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + atomic_set(&cmd->t_transport_active, 1); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } spin_lock_irqsave(&qobj->cmd_queue_lock, flags); - list_add_tail(&qr->qr_list, &qobj->qobj_list); - atomic_inc(&T_TASK(cmd)->t_transport_queue_active); + if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) { + cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL; + list_add(&cmd->se_queue_node, &qobj->qobj_list); + } else + list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); + atomic_inc(&cmd->t_transport_queue_active); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); atomic_inc(&qobj->queue_cnt); wake_up_interruptible(&qobj->thread_wq); - return 0; -} - -/* - * Called with struct se_queue_obj->cmd_queue_lock held. - */ -static struct se_queue_req * -__transport_get_qr_from_queue(struct se_queue_obj *qobj) -{ - struct se_cmd *cmd; - struct se_queue_req *qr = NULL; - - if (list_empty(&qobj->qobj_list)) - return NULL; - - list_for_each_entry(qr, &qobj->qobj_list, qr_list) - break; - - if (qr->cmd) { - cmd = (struct se_cmd *)qr->cmd; - atomic_dec(&T_TASK(cmd)->t_transport_queue_active); - } - list_del(&qr->qr_list); - atomic_dec(&qobj->queue_cnt); - - return qr; } -static struct se_queue_req * -transport_get_qr_from_queue(struct se_queue_obj *qobj) +static struct se_cmd * +transport_get_cmd_from_queue(struct se_queue_obj *qobj) { struct se_cmd *cmd; - struct se_queue_req *qr; unsigned long flags; spin_lock_irqsave(&qobj->cmd_queue_lock, flags); @@ -871,50 +653,42 @@ transport_get_qr_from_queue(struct se_queue_obj *qobj) spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); return NULL; } + cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); - list_for_each_entry(qr, &qobj->qobj_list, qr_list) - break; + atomic_dec(&cmd->t_transport_queue_active); - if (qr->cmd) { - cmd = (struct se_cmd *)qr->cmd; - atomic_dec(&T_TASK(cmd)->t_transport_queue_active); - } - list_del(&qr->qr_list); + list_del(&cmd->se_queue_node); atomic_dec(&qobj->queue_cnt); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - return qr; + return cmd; } static void transport_remove_cmd_from_queue(struct se_cmd *cmd, struct se_queue_obj *qobj) { - struct se_cmd *q_cmd; - struct se_queue_req *qr = NULL, *qr_p = NULL; + struct se_cmd *t; unsigned long flags; spin_lock_irqsave(&qobj->cmd_queue_lock, flags); - if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) { + if (!atomic_read(&cmd->t_transport_queue_active)) { spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); return; } - list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) { - q_cmd = (struct se_cmd *)qr->cmd; - if (q_cmd != cmd) - continue; - - atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active); - atomic_dec(&qobj->queue_cnt); - list_del(&qr->qr_list); - kfree(qr); - } + list_for_each_entry(t, &qobj->qobj_list, se_queue_node) + if (t == cmd) { + atomic_dec(&cmd->t_transport_queue_active); + atomic_dec(&qobj->queue_cnt); + list_del(&cmd->se_queue_node); + break; + } spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) { - printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n", - CMD_TFO(cmd)->get_task_tag(cmd), - atomic_read(&T_TASK(cmd)->t_transport_queue_active)); + if (atomic_read(&cmd->t_transport_queue_active)) { + pr_err("ITT: 0x%08x t_transport_queue_active: %d\n", + cmd->se_tfo->get_task_tag(cmd), + atomic_read(&cmd->t_transport_queue_active)); } } @@ -924,7 +698,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd, */ void transport_complete_sync_cache(struct se_cmd *cmd, int good) { - struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next, + struct se_task *task = list_entry(cmd->t_task_list.next, struct se_task, t_list); if (good) { @@ -933,7 +707,7 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good) } else { task->task_scsi_status = SAM_STAT_CHECK_CONDITION; task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; - TASK_CMD(task)->transport_error_status = + task->task_se_cmd->transport_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; } @@ -948,22 +722,18 @@ EXPORT_SYMBOL(transport_complete_sync_cache); */ void transport_complete_task(struct se_task *task, int success) { - struct se_cmd *cmd = TASK_CMD(task); + struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = task->se_dev; int t_state; unsigned long flags; #if 0 - printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task, - T_TASK(cmd)->t_task_cdb[0], dev); + pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task, + cmd->t_task_cdb[0], dev); #endif - if (dev) { - spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); + if (dev) atomic_inc(&dev->depth_left); - atomic_inc(&SE_HBA(dev)->left_queue_depth); - spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); - } - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); atomic_set(&task->task_active, 0); /* @@ -985,14 +755,14 @@ void transport_complete_task(struct se_task *task, int success) */ if (atomic_read(&task->task_stop)) { /* - * Decrement T_TASK(cmd)->t_se_count if this task had + * Decrement cmd->t_se_count if this task had * previously thrown its timeout exception handler. */ if (atomic_read(&task->task_timeout)) { - atomic_dec(&T_TASK(cmd)->t_se_count); + atomic_dec(&cmd->t_se_count); atomic_set(&task->task_timeout, 0); } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); complete(&task->task_stop_comp); return; @@ -1003,34 +773,34 @@ void transport_complete_task(struct se_task *task, int success) * the processing thread. */ if (atomic_read(&task->task_timeout)) { - if (!(atomic_dec_and_test( - &T_TASK(cmd)->t_task_cdbs_timeout_left))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + if (!atomic_dec_and_test( + &cmd->t_task_cdbs_timeout_left)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } t_state = TRANSPORT_COMPLETE_TIMEOUT; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_add_cmd_to_queue(cmd, t_state); return; } - atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left); + atomic_dec(&cmd->t_task_cdbs_timeout_left); /* * Decrement the outstanding t_task_cdbs_left count. The last * struct se_task from struct se_cmd will complete itself into the * device queue depending upon int success. */ - if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) { + if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { if (!success) - T_TASK(cmd)->t_tasks_failed = 1; + cmd->t_tasks_failed = 1; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } - if (!success || T_TASK(cmd)->t_tasks_failed) { + if (!success || cmd->t_tasks_failed) { t_state = TRANSPORT_COMPLETE_FAILURE; if (!task->task_error_status) { task->task_error_status = @@ -1039,10 +809,10 @@ void transport_complete_task(struct se_task *task, int success) PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } } else { - atomic_set(&T_TASK(cmd)->t_transport_complete, 1); + atomic_set(&cmd->t_transport_complete, 1); t_state = TRANSPORT_COMPLETE_OK; } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_add_cmd_to_queue(cmd, t_state); } @@ -1080,9 +850,9 @@ static inline int transport_add_task_check_sam_attr( &task_prev->t_execute_list : &dev->execute_task_list); - DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x" + pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x" " in execution queue\n", - T_TASK(task->task_se_cmd)->t_task_cdb[0]); + task->task_se_cmd->t_task_cdb[0]); return 1; } /* @@ -1124,8 +894,8 @@ static void __transport_add_task_to_execute_queue( atomic_set(&task->task_state_active, 1); - DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", - CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd), + pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", + task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), task, dev); } @@ -1135,8 +905,8 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) struct se_task *task; unsigned long flags; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { + spin_lock_irqsave(&cmd->t_state_lock, flags); + list_for_each_entry(task, &cmd->t_task_list, t_list) { dev = task->se_dev; if (atomic_read(&task->task_state_active)) @@ -1146,23 +916,23 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) list_add_tail(&task->t_state_list, &dev->state_task_list); atomic_set(&task->task_state_active, 1); - DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", - CMD_TFO(task->task_se_cmd)->get_task_tag( + pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", + task->task_se_cmd->se_tfo->get_task_tag( task->task_se_cmd), task, dev); spin_unlock(&dev->execute_task_lock); } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } static void transport_add_tasks_from_cmd(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_dev; struct se_task *task, *task_prev = NULL; unsigned long flags; spin_lock_irqsave(&dev->execute_task_lock, flags); - list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { + list_for_each_entry(task, &cmd->t_task_list, t_list) { if (atomic_read(&task->task_execute_queue)) continue; /* @@ -1174,30 +944,6 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd) task_prev = task; } spin_unlock_irqrestore(&dev->execute_task_lock, flags); - - return; -} - -/* transport_get_task_from_execute_queue(): - * - * Called with dev->execute_task_lock held. - */ -static struct se_task * -transport_get_task_from_execute_queue(struct se_device *dev) -{ - struct se_task *task; - - if (list_empty(&dev->execute_task_list)) - return NULL; - - list_for_each_entry(task, &dev->execute_task_list, t_execute_list) - break; - - list_del(&task->t_execute_list); - atomic_set(&task->task_execute_queue, 0); - atomic_dec(&dev->execute_tasks); - - return task; } /* transport_remove_task_from_execute_queue(): @@ -1222,6 +968,40 @@ void transport_remove_task_from_execute_queue( spin_unlock_irqrestore(&dev->execute_task_lock, flags); } +/* + * Handle QUEUE_FULL / -EAGAIN status + */ + +static void target_qf_do_work(struct work_struct *work) +{ + struct se_device *dev = container_of(work, struct se_device, + qf_work_queue); + struct se_cmd *cmd, *cmd_tmp; + + spin_lock_irq(&dev->qf_cmd_lock); + list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) { + + list_del(&cmd->se_qf_node); + atomic_dec(&dev->dev_qf_count); + smp_mb__after_atomic_dec(); + spin_unlock_irq(&dev->qf_cmd_lock); + + pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" + " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, + (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" : + (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" + : "UNKNOWN"); + /* + * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd + * has been added to head of queue + */ + transport_add_cmd_to_queue(cmd, cmd->t_state); + + spin_lock_irq(&dev->qf_cmd_lock); + } + spin_unlock_irq(&dev->qf_cmd_lock); +} + unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) { switch (cmd->data_direction) { @@ -1269,7 +1049,7 @@ void transport_dump_dev_state( atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), dev->queue_depth); *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", - DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors); + dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); *bl += sprintf(b + *bl, " "); } @@ -1279,33 +1059,29 @@ void transport_dump_dev_state( */ static void transport_release_all_cmds(struct se_device *dev) { - struct se_cmd *cmd = NULL; - struct se_queue_req *qr = NULL, *qr_p = NULL; + struct se_cmd *cmd, *tcmd; int bug_out = 0, t_state; unsigned long flags; - spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); - list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list, - qr_list) { - - cmd = (struct se_cmd *)qr->cmd; - t_state = qr->state; - list_del(&qr->qr_list); - kfree(qr); - spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, + spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); + list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list, + se_queue_node) { + t_state = cmd->t_state; + list_del(&cmd->se_queue_node); + spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags); - printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u," + pr_err("Releasing ITT: 0x%08x, i_state: %u," " t_state: %u directly\n", - CMD_TFO(cmd)->get_task_tag(cmd), - CMD_TFO(cmd)->get_cmd_state(cmd), t_state); + cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), t_state); transport_release_fe_cmd(cmd); bug_out = 1; - spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); + spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); } - spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags); + spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags); #if 0 if (bug_out) BUG(); @@ -1362,7 +1138,7 @@ void transport_dump_vpd_proto_id( if (p_buf) strncpy(p_buf, buf, p_buf_len); else - printk(KERN_INFO "%s", buf); + pr_debug("%s", buf); } void @@ -1387,7 +1163,8 @@ int transport_dump_vpd_assoc( int p_buf_len) { unsigned char buf[VPD_TMP_BUF_SIZE]; - int ret = 0, len; + int ret = 0; + int len; memset(buf, 0, VPD_TMP_BUF_SIZE); len = sprintf(buf, "T10 VPD Identifier Association: "); @@ -1404,14 +1181,14 @@ int transport_dump_vpd_assoc( break; default: sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); - ret = -1; + ret = -EINVAL; break; } if (p_buf) strncpy(p_buf, buf, p_buf_len); else - printk("%s", buf); + pr_debug("%s", buf); return ret; } @@ -1434,7 +1211,8 @@ int transport_dump_vpd_ident_type( int p_buf_len) { unsigned char buf[VPD_TMP_BUF_SIZE]; - int ret = 0, len; + int ret = 0; + int len; memset(buf, 0, VPD_TMP_BUF_SIZE); len = sprintf(buf, "T10 VPD Identifier Type: "); @@ -1461,14 +1239,17 @@ int transport_dump_vpd_ident_type( default: sprintf(buf+len, "Unsupported: 0x%02x\n", vpd->device_identifier_type); - ret = -1; + ret = -EINVAL; break; } - if (p_buf) + if (p_buf) { + if (p_buf_len < strlen(buf)+1) + return -EINVAL; strncpy(p_buf, buf, p_buf_len); - else - printk("%s", buf); + } else { + pr_debug("%s", buf); + } return ret; } @@ -1511,14 +1292,14 @@ int transport_dump_vpd_ident( default: sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" " 0x%02x", vpd->device_identifier_code_set); - ret = -1; + ret = -EINVAL; break; } if (p_buf) strncpy(p_buf, buf, p_buf_len); else - printk("%s", buf); + pr_debug("%s", buf); return ret; } @@ -1569,51 +1350,51 @@ static void core_setup_task_attr_emulation(struct se_device *dev) * This is currently not available in upsream Linux/SCSI Target * mode code, and is assumed to be disabled while using TCM/pSCSI. */ - if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { + if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; return; } dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; - DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" - " device\n", TRANSPORT(dev)->name, - TRANSPORT(dev)->get_device_rev(dev)); + pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" + " device\n", dev->transport->name, + dev->transport->get_device_rev(dev)); } static void scsi_dump_inquiry(struct se_device *dev) { - struct t10_wwn *wwn = DEV_T10_WWN(dev); + struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; int i, device_type; /* * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer */ - printk(" Vendor: "); + pr_debug(" Vendor: "); for (i = 0; i < 8; i++) if (wwn->vendor[i] >= 0x20) - printk("%c", wwn->vendor[i]); + pr_debug("%c", wwn->vendor[i]); else - printk(" "); + pr_debug(" "); - printk(" Model: "); + pr_debug(" Model: "); for (i = 0; i < 16; i++) if (wwn->model[i] >= 0x20) - printk("%c", wwn->model[i]); + pr_debug("%c", wwn->model[i]); else - printk(" "); + pr_debug(" "); - printk(" Revision: "); + pr_debug(" Revision: "); for (i = 0; i < 4; i++) if (wwn->revision[i] >= 0x20) - printk("%c", wwn->revision[i]); + pr_debug("%c", wwn->revision[i]); else - printk(" "); + pr_debug(" "); - printk("\n"); + pr_debug("\n"); - device_type = TRANSPORT(dev)->get_device_type(dev); - printk(" Type: %s ", scsi_device_type(device_type)); - printk(" ANSI SCSI revision: %02x\n", - TRANSPORT(dev)->get_device_rev(dev)); + device_type = dev->transport->get_device_type(dev); + pr_debug(" Type: %s ", scsi_device_type(device_type)); + pr_debug(" ANSI SCSI revision: %02x\n", + dev->transport->get_device_rev(dev)); } struct se_device *transport_add_device_to_core_hba( @@ -1630,33 +1411,15 @@ struct se_device *transport_add_device_to_core_hba( struct se_device *dev; dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); - if (!(dev)) { - printk(KERN_ERR "Unable to allocate memory for se_dev_t\n"); - return NULL; - } - dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL); - if (!(dev->dev_queue_obj)) { - printk(KERN_ERR "Unable to allocate memory for" - " dev->dev_queue_obj\n"); - kfree(dev); + if (!dev) { + pr_err("Unable to allocate memory for se_dev_t\n"); return NULL; } - transport_init_queue_obj(dev->dev_queue_obj); - - dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj), - GFP_KERNEL); - if (!(dev->dev_status_queue_obj)) { - printk(KERN_ERR "Unable to allocate memory for" - " dev->dev_status_queue_obj\n"); - kfree(dev->dev_queue_obj); - kfree(dev); - return NULL; - } - transport_init_queue_obj(dev->dev_status_queue_obj); + transport_init_queue_obj(&dev->dev_queue_obj); dev->dev_flags = device_flags; dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; - dev->dev_ptr = (void *) transport_dev; + dev->dev_ptr = transport_dev; dev->se_hba = hba; dev->se_sub_dev = se_dev; dev->transport = transport; @@ -1668,6 +1431,7 @@ struct se_device *transport_add_device_to_core_hba( INIT_LIST_HEAD(&dev->delayed_cmd_list); INIT_LIST_HEAD(&dev->ordered_cmd_list); INIT_LIST_HEAD(&dev->state_task_list); + INIT_LIST_HEAD(&dev->qf_cmd_list); spin_lock_init(&dev->execute_task_lock); spin_lock_init(&dev->delayed_cmd_lock); spin_lock_init(&dev->ordered_cmd_lock); @@ -1678,6 +1442,7 @@ struct se_device *transport_add_device_to_core_hba( spin_lock_init(&dev->dev_status_thr_lock); spin_lock_init(&dev->se_port_lock); spin_lock_init(&dev->se_tmr_lock); + spin_lock_init(&dev->qf_cmd_lock); dev->queue_depth = dev_limits->queue_depth; atomic_set(&dev->depth_left, dev->queue_depth); @@ -1715,13 +1480,16 @@ struct se_device *transport_add_device_to_core_hba( * Startup the struct se_device processing thread */ dev->process_thread = kthread_run(transport_processing_thread, dev, - "LIO_%s", TRANSPORT(dev)->name); + "LIO_%s", dev->transport->name); if (IS_ERR(dev->process_thread)) { - printk(KERN_ERR "Unable to create kthread: LIO_%s\n", - TRANSPORT(dev)->name); + pr_err("Unable to create kthread: LIO_%s\n", + dev->transport->name); goto out; } - + /* + * Setup work_queue for QUEUE_FULL + */ + INIT_WORK(&dev->qf_work_queue, target_qf_do_work); /* * Preload the initial INQUIRY const values if we are doing * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI @@ -1730,16 +1498,16 @@ struct se_device *transport_add_device_to_core_hba( * originals once back into DEV_T10_WWN(dev) for the virtual device * setup. */ - if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { - if (!(inquiry_prod) || !(inquiry_prod)) { - printk(KERN_ERR "All non TCM/pSCSI plugins require" + if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { + if (!inquiry_prod || !inquiry_rev) { + pr_err("All non TCM/pSCSI plugins require" " INQUIRY consts\n"); goto out; } - strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8); - strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16); - strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4); + strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8); + strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16); + strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4); } scsi_dump_inquiry(dev); @@ -1754,8 +1522,6 @@ out: se_release_vpd_for_dev(dev); - kfree(dev->dev_status_queue_obj); - kfree(dev->dev_queue_obj); kfree(dev); return NULL; @@ -1794,12 +1560,11 @@ transport_generic_get_task(struct se_cmd *cmd, enum dma_data_direction data_direction) { struct se_task *task; - struct se_device *dev = SE_DEV(cmd); - unsigned long flags; + struct se_device *dev = cmd->se_dev; - task = dev->transport->alloc_task(cmd); + task = dev->transport->alloc_task(cmd->t_task_cdb); if (!task) { - printk(KERN_ERR "Unable to allocate struct se_task\n"); + pr_err("Unable to allocate struct se_task\n"); return NULL; } @@ -1807,26 +1572,15 @@ transport_generic_get_task(struct se_cmd *cmd, INIT_LIST_HEAD(&task->t_execute_list); INIT_LIST_HEAD(&task->t_state_list); init_completion(&task->task_stop_comp); - task->task_no = T_TASK(cmd)->t_tasks_no++; task->task_se_cmd = cmd; task->se_dev = dev; task->task_data_direction = data_direction; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); - return task; } static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); -void transport_device_setup_cmd(struct se_cmd *cmd) -{ - cmd->se_dev = SE_LUN(cmd)->lun_se_dev; -} -EXPORT_SYMBOL(transport_device_setup_cmd); - /* * Used by fabric modules containing a local struct se_cmd within their * fabric dependent per I/O descriptor. @@ -1840,20 +1594,17 @@ void transport_init_se_cmd( int task_attr, unsigned char *sense_buffer) { - INIT_LIST_HEAD(&cmd->se_lun_list); - INIT_LIST_HEAD(&cmd->se_delayed_list); - INIT_LIST_HEAD(&cmd->se_ordered_list); - /* - * Setup t_task pointer to t_task_backstore - */ - cmd->t_task = &cmd->t_task_backstore; + INIT_LIST_HEAD(&cmd->se_lun_node); + INIT_LIST_HEAD(&cmd->se_delayed_node); + INIT_LIST_HEAD(&cmd->se_ordered_node); + INIT_LIST_HEAD(&cmd->se_qf_node); - INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list); - init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp); - init_completion(&T_TASK(cmd)->transport_lun_stop_comp); - init_completion(&T_TASK(cmd)->t_transport_stop_comp); - spin_lock_init(&T_TASK(cmd)->t_state_lock); - atomic_set(&T_TASK(cmd)->transport_dev_active, 1); + INIT_LIST_HEAD(&cmd->t_task_list); + init_completion(&cmd->transport_lun_fe_stop_comp); + init_completion(&cmd->transport_lun_stop_comp); + init_completion(&cmd->t_transport_stop_comp); + spin_lock_init(&cmd->t_state_lock); + atomic_set(&cmd->transport_dev_active, 1); cmd->se_tfo = tfo; cmd->se_sess = se_sess; @@ -1870,23 +1621,23 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd) * Check if SAM Task Attribute emulation is enabled for this * struct se_device storage object */ - if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) + if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) return 0; if (cmd->sam_task_attr == MSG_ACA_TAG) { - DEBUG_STA("SAM Task Attribute ACA" + pr_debug("SAM Task Attribute ACA" " emulation is not supported\n"); - return -1; + return -EINVAL; } /* * Used to determine when ORDERED commands should go from * Dormant to Active status. */ - cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id); + cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id); smp_mb__after_atomic_inc(); - DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", + pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", cmd->se_ordered_id, cmd->sam_task_attr, - TRANSPORT(cmd->se_dev)->name); + cmd->se_dev->transport->name); return 0; } @@ -1898,8 +1649,8 @@ void transport_free_se_cmd( /* * Check and free any extended CDB buffer that was allocated */ - if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb) - kfree(T_TASK(se_cmd)->t_task_cdb); + if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb) + kfree(se_cmd->t_task_cdb); } EXPORT_SYMBOL(transport_free_se_cmd); @@ -1922,42 +1673,41 @@ int transport_generic_allocate_tasks( */ cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; - transport_device_setup_cmd(cmd); /* * Ensure that the received CDB is less than the max (252 + 8) bytes * for VARIABLE_LENGTH_CMD */ if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { - printk(KERN_ERR "Received SCSI CDB with command_size: %d that" + pr_err("Received SCSI CDB with command_size: %d that" " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); - return -1; + return -EINVAL; } /* * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, * allocate the additional extended CDB buffer now.. Otherwise * setup the pointer from __t_task_cdb to t_task_cdb. */ - if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) { - T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb), + if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { + cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); - if (!(T_TASK(cmd)->t_task_cdb)) { - printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb" - " %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n", + if (!cmd->t_task_cdb) { + pr_err("Unable to allocate cmd->t_task_cdb" + " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", scsi_command_size(cdb), - (unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb)); - return -1; + (unsigned long)sizeof(cmd->__t_task_cdb)); + return -ENOMEM; } } else - T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0]; + cmd->t_task_cdb = &cmd->__t_task_cdb[0]; /* - * Copy the original CDB into T_TASK(cmd). + * Copy the original CDB into cmd-> */ - memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb)); + memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); /* * Setup the received CDB based on SCSI defined opcodes and * perform unit attention, persistent reservations and ALUA - * checks for virtual device backends. The T_TASK(cmd)->t_task_cdb + * checks for virtual device backends. The cmd->t_task_cdb * pointer is expected to be setup before we reach this point. */ ret = transport_generic_cmd_sequencer(cmd, cdb); @@ -1969,7 +1719,7 @@ int transport_generic_allocate_tasks( if (transport_check_alloc_task_attr(cmd) < 0) { cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; - return -2; + return -EINVAL; } spin_lock(&cmd->se_lun->lun_sep_lock); if (cmd->se_lun->lun_sep) @@ -1986,10 +1736,10 @@ EXPORT_SYMBOL(transport_generic_allocate_tasks); int transport_generic_handle_cdb( struct se_cmd *cmd) { - if (!SE_LUN(cmd)) { + if (!cmd->se_lun) { dump_stack(); - printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); - return -1; + pr_err("cmd->se_lun is NULL\n"); + return -EINVAL; } transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); @@ -1998,6 +1748,29 @@ int transport_generic_handle_cdb( EXPORT_SYMBOL(transport_generic_handle_cdb); /* + * Used by fabric module frontends to queue tasks directly. + * Many only be used from process context only + */ +int transport_handle_cdb_direct( + struct se_cmd *cmd) +{ + if (!cmd->se_lun) { + dump_stack(); + pr_err("cmd->se_lun is NULL\n"); + return -EINVAL; + } + if (in_interrupt()) { + dump_stack(); + pr_err("transport_generic_handle_cdb cannot be called" + " from interrupt context\n"); + return -EINVAL; + } + + return transport_generic_new_cmd(cmd); +} +EXPORT_SYMBOL(transport_handle_cdb_direct); + +/* * Used by fabric module frontends defining a TFO->new_cmd_map() caller * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to * complete setup in TCM process context w/ TFO->new_cmd_map(). @@ -2005,10 +1778,10 @@ EXPORT_SYMBOL(transport_generic_handle_cdb); int transport_generic_handle_cdb_map( struct se_cmd *cmd) { - if (!SE_LUN(cmd)) { + if (!cmd->se_lun) { dump_stack(); - printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); - return -1; + pr_err("cmd->se_lun is NULL\n"); + return -EINVAL; } transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP); @@ -2030,7 +1803,7 @@ int transport_generic_handle_data( * in interrupt code, the signal_pending() check is skipped. */ if (!in_interrupt() && signal_pending(current)) - return -1; + return -EPERM; /* * If the received CDB has aleady been ABORTED by the generic * target engine, we now call transport_check_aborted_status() @@ -2057,7 +1830,6 @@ int transport_generic_handle_tmr( * This is needed for early exceptions. */ cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; - transport_device_setup_cmd(cmd); transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); return 0; @@ -2077,16 +1849,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) unsigned long flags; int ret = 0; - DEBUG_TS("ITT[0x%08x] - Stopping tasks\n", - CMD_TFO(cmd)->get_task_tag(cmd)); + pr_debug("ITT[0x%08x] - Stopping tasks\n", + cmd->se_tfo->get_task_tag(cmd)); /* * No tasks remain in the execution queue */ - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, - &T_TASK(cmd)->t_task_list, t_list) { - DEBUG_TS("task_no[%d] - Processing task %p\n", + &cmd->t_task_list, t_list) { + pr_debug("task_no[%d] - Processing task %p\n", task->task_no, task); /* * If the struct se_task has not been sent and is not active, @@ -2094,14 +1866,14 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) */ if (!atomic_read(&task->task_sent) && !atomic_read(&task->task_active)) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_remove_task_from_execute_queue(task, task->se_dev); - DEBUG_TS("task_no[%d] - Removed from execute queue\n", + pr_debug("task_no[%d] - Removed from execute queue\n", task->task_no); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); continue; } @@ -2111,42 +1883,32 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) */ if (atomic_read(&task->task_active)) { atomic_set(&task->task_stop, 1); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + spin_unlock_irqrestore(&cmd->t_state_lock, flags); - DEBUG_TS("task_no[%d] - Waiting to complete\n", + pr_debug("task_no[%d] - Waiting to complete\n", task->task_no); wait_for_completion(&task->task_stop_comp); - DEBUG_TS("task_no[%d] - Stopped successfully\n", + pr_debug("task_no[%d] - Stopped successfully\n", task->task_no); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); + spin_lock_irqsave(&cmd->t_state_lock, flags); + atomic_dec(&cmd->t_task_cdbs_left); atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); } else { - DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no); + pr_debug("task_no[%d] - Did nothing\n", task->task_no); ret++; } __transport_stop_task_timer(task, &flags); } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return ret; } -static void transport_failure_reset_queue_depth(struct se_device *dev) -{ - unsigned long flags; - - spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); - atomic_inc(&dev->depth_left); - atomic_inc(&SE_HBA(dev)->left_queue_depth); - spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); -} - /* * Handle SAM-esque emulation for generic transport request failures. */ @@ -2156,29 +1918,31 @@ static void transport_generic_request_failure( int complete, int sc) { - DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" - " CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd), - T_TASK(cmd)->t_task_cdb[0]); - DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:" + int ret = 0; + + pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" + " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), + cmd->t_task_cdb[0]); + pr_debug("-----[ i_state: %d t_state/def_t_state:" " %d/%d transport_error_status: %d\n", - CMD_TFO(cmd)->get_cmd_state(cmd), + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, cmd->deferred_t_state, cmd->transport_error_status); - DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d" + pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" " t_transport_active: %d t_transport_stop: %d" - " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs, - atomic_read(&T_TASK(cmd)->t_task_cdbs_left), - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), - atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left), - atomic_read(&T_TASK(cmd)->t_transport_active), - atomic_read(&T_TASK(cmd)->t_transport_stop), - atomic_read(&T_TASK(cmd)->t_transport_sent)); + " t_transport_sent: %d\n", cmd->t_task_list_num, + atomic_read(&cmd->t_task_cdbs_left), + atomic_read(&cmd->t_task_cdbs_sent), + atomic_read(&cmd->t_task_cdbs_ex_left), + atomic_read(&cmd->t_transport_active), + atomic_read(&cmd->t_transport_stop), + atomic_read(&cmd->t_transport_sent)); transport_stop_all_task_timers(cmd); if (dev) - transport_failure_reset_queue_depth(dev); + atomic_inc(&dev->depth_left); /* * For SAM Task Attribute emulation for failed struct se_cmd */ @@ -2211,8 +1975,8 @@ static void transport_generic_request_failure( * we force this session to fall back to session * recovery. */ - CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess); - CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0); + cmd->se_tfo->fall_back_to_erl0(cmd->se_sess); + cmd->se_tfo->stop_session(cmd->se_sess, 0, 0); goto check_stop; case PYX_TRANSPORT_LU_COMM_FAILURE: @@ -2240,13 +2004,15 @@ static void transport_generic_request_failure( * * See spc4r17, section 7.4.6 Control Mode Page, Table 349 */ - if (SE_SESS(cmd) && - DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2) - core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl, + if (cmd->se_sess && + cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) + core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, cmd->orig_fe_lun, 0x2C, ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); - CMD_TFO(cmd)->queue_status(cmd); + ret = cmd->se_tfo->queue_status(cmd); + if (ret == -EAGAIN) + goto queue_full; goto check_stop; case PYX_TRANSPORT_USE_SENSE_REASON: /* @@ -2254,8 +2020,8 @@ static void transport_generic_request_failure( */ break; default: - printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n", - T_TASK(cmd)->t_task_cdb[0], + pr_err("Unknown transport error for CDB 0x%02x: %d\n", + cmd->t_task_cdb[0], cmd->transport_error_status); cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; break; @@ -2263,32 +2029,41 @@ static void transport_generic_request_failure( if (!sc) transport_new_cmd_failure(cmd); - else - transport_send_check_condition_and_sense(cmd, - cmd->scsi_sense_reason, 0); + else { + ret = transport_send_check_condition_and_sense(cmd, + cmd->scsi_sense_reason, 0); + if (ret == -EAGAIN) + goto queue_full; + } + check_stop: transport_lun_remove_cmd(cmd); - if (!(transport_cmd_check_stop_to_fabric(cmd))) + if (!transport_cmd_check_stop_to_fabric(cmd)) ; + return; + +queue_full: + cmd->t_state = TRANSPORT_COMPLETE_OK; + transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); } static void transport_direct_request_timeout(struct se_cmd *cmd) { unsigned long flags; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (!atomic_read(&cmd->t_transport_timeout)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } - if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + if (atomic_read(&cmd->t_task_cdbs_timeout_left)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } - atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout), - &T_TASK(cmd)->t_se_count); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + atomic_sub(atomic_read(&cmd->t_transport_timeout), + &cmd->t_se_count); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } static void transport_generic_request_timeout(struct se_cmd *cmd) @@ -2296,35 +2071,18 @@ static void transport_generic_request_timeout(struct se_cmd *cmd) unsigned long flags; /* - * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove() + * Reset cmd->t_se_count to allow transport_generic_remove() * to allow last call to free memory resources. */ - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) { - int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1); - - atomic_sub(tmp, &T_TASK(cmd)->t_se_count); - } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); - - transport_generic_remove(cmd, 0, 0); -} - -static int -transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length) -{ - unsigned char *buf; + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (atomic_read(&cmd->t_transport_timeout) > 1) { + int tmp = (atomic_read(&cmd->t_transport_timeout) - 1); - buf = kzalloc(data_length, GFP_KERNEL); - if (!(buf)) { - printk(KERN_ERR "Unable to allocate memory for buffer\n"); - return -1; + atomic_sub(tmp, &cmd->t_se_count); } + spin_unlock_irqrestore(&cmd->t_state_lock, flags); - T_TASK(cmd)->t_tasks_se_num = 0; - T_TASK(cmd)->t_task_buf = buf; - - return 0; + transport_generic_remove(cmd, 0); } static inline u32 transport_lba_21(unsigned char *cdb) @@ -2364,9 +2122,9 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) { unsigned long flags; - spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags); + spin_lock_irqsave(&se_cmd->t_state_lock, flags); se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; - spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); } /* @@ -2375,14 +2133,14 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) static void transport_task_timeout_handler(unsigned long data) { struct se_task *task = (struct se_task *)data; - struct se_cmd *cmd = TASK_CMD(task); + struct se_cmd *cmd = task->task_se_cmd; unsigned long flags; - DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd); + pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); if (task->task_flags & TF_STOP) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } task->task_flags &= ~TF_RUNNING; @@ -2390,46 +2148,46 @@ static void transport_task_timeout_handler(unsigned long data) /* * Determine if transport_complete_task() has already been called. */ - if (!(atomic_read(&task->task_active))) { - DEBUG_TT("transport task: %p cmd: %p timeout task_active" + if (!atomic_read(&task->task_active)) { + pr_debug("transport task: %p cmd: %p timeout task_active" " == 0\n", task, cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } - atomic_inc(&T_TASK(cmd)->t_se_count); - atomic_inc(&T_TASK(cmd)->t_transport_timeout); - T_TASK(cmd)->t_tasks_failed = 1; + atomic_inc(&cmd->t_se_count); + atomic_inc(&cmd->t_transport_timeout); + cmd->t_tasks_failed = 1; atomic_set(&task->task_timeout, 1); task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; task->task_scsi_status = 1; if (atomic_read(&task->task_stop)) { - DEBUG_TT("transport task: %p cmd: %p timeout task_stop" + pr_debug("transport task: %p cmd: %p timeout task_stop" " == 1\n", task, cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); complete(&task->task_stop_comp); return; } - if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) { - DEBUG_TT("transport task: %p cmd: %p timeout non zero" + if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { + pr_debug("transport task: %p cmd: %p timeout non zero" " t_task_cdbs_left\n", task, cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } - DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", + pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", task, cmd); cmd->t_state = TRANSPORT_COMPLETE_FAILURE; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); } /* - * Called with T_TASK(cmd)->t_state_lock held. + * Called with cmd->t_state_lock held. */ static void transport_start_task_timer(struct se_task *task) { @@ -2441,8 +2199,8 @@ static void transport_start_task_timer(struct se_task *task) /* * If the task_timeout is disabled, exit now. */ - timeout = DEV_ATTRIB(dev)->task_timeout; - if (!(timeout)) + timeout = dev->se_sub_dev->se_dev_attrib.task_timeout; + if (!timeout) return; init_timer(&task->task_timer); @@ -2453,27 +2211,27 @@ static void transport_start_task_timer(struct se_task *task) task->task_flags |= TF_RUNNING; add_timer(&task->task_timer); #if 0 - printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:" + pr_debug("Starting task timer for cmd: %p task: %p seconds:" " %d\n", task->task_se_cmd, task, timeout); #endif } /* - * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held. + * Called with spin_lock_irq(&cmd->t_state_lock) held. */ void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) { - struct se_cmd *cmd = TASK_CMD(task); + struct se_cmd *cmd = task->task_se_cmd; - if (!(task->task_flags & TF_RUNNING)) + if (!task->task_flags & TF_RUNNING) return; task->task_flags |= TF_STOP; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags); + spin_unlock_irqrestore(&cmd->t_state_lock, *flags); del_timer_sync(&task->task_timer); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags); + spin_lock_irqsave(&cmd->t_state_lock, *flags); task->task_flags &= ~TF_RUNNING; task->task_flags &= ~TF_STOP; } @@ -2483,11 +2241,11 @@ static void transport_stop_all_task_timers(struct se_cmd *cmd) struct se_task *task = NULL, *task_tmp; unsigned long flags; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, - &T_TASK(cmd)->t_task_list, t_list) + &cmd->t_task_list, t_list) __transport_stop_task_timer(task, &flags); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } static inline int transport_tcq_window_closed(struct se_device *dev) @@ -2498,7 +2256,7 @@ static inline int transport_tcq_window_closed(struct se_device *dev) } else msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG); - wake_up_interruptible(&dev->dev_queue_obj->thread_wq); + wake_up_interruptible(&dev->dev_queue_obj.thread_wq); return 0; } @@ -2511,45 +2269,45 @@ static inline int transport_tcq_window_closed(struct se_device *dev) */ static inline int transport_execute_task_attr(struct se_cmd *cmd) { - if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) + if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) return 1; /* * Check for the existence of HEAD_OF_QUEUE, and if true return 1 * to allow the passed struct se_cmd list of tasks to the front of the list. */ if (cmd->sam_task_attr == MSG_HEAD_TAG) { - atomic_inc(&SE_DEV(cmd)->dev_hoq_count); + atomic_inc(&cmd->se_dev->dev_hoq_count); smp_mb__after_atomic_inc(); - DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" + pr_debug("Added HEAD_OF_QUEUE for CDB:" " 0x%02x, se_ordered_id: %u\n", - T_TASK(cmd)->t_task_cdb[0], + cmd->t_task_cdb[0], cmd->se_ordered_id); return 1; } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { - spin_lock(&SE_DEV(cmd)->ordered_cmd_lock); - list_add_tail(&cmd->se_ordered_list, - &SE_DEV(cmd)->ordered_cmd_list); - spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock); + spin_lock(&cmd->se_dev->ordered_cmd_lock); + list_add_tail(&cmd->se_ordered_node, + &cmd->se_dev->ordered_cmd_list); + spin_unlock(&cmd->se_dev->ordered_cmd_lock); - atomic_inc(&SE_DEV(cmd)->dev_ordered_sync); + atomic_inc(&cmd->se_dev->dev_ordered_sync); smp_mb__after_atomic_inc(); - DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered" + pr_debug("Added ORDERED for CDB: 0x%02x to ordered" " list, se_ordered_id: %u\n", - T_TASK(cmd)->t_task_cdb[0], + cmd->t_task_cdb[0], cmd->se_ordered_id); /* * Add ORDERED command to tail of execution queue if * no other older commands exist that need to be * completed first. */ - if (!(atomic_read(&SE_DEV(cmd)->simple_cmds))) + if (!atomic_read(&cmd->se_dev->simple_cmds)) return 1; } else { /* * For SIMPLE and UNTAGGED Task Attribute commands */ - atomic_inc(&SE_DEV(cmd)->simple_cmds); + atomic_inc(&cmd->se_dev->simple_cmds); smp_mb__after_atomic_inc(); } /* @@ -2557,20 +2315,20 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) * add the dormant task(s) built for the passed struct se_cmd to the * execution queue and become in Active state for this struct se_device. */ - if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) { + if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) { /* * Otherwise, add cmd w/ tasks to delayed cmd queue that * will be drained upon completion of HEAD_OF_QUEUE task. */ - spin_lock(&SE_DEV(cmd)->delayed_cmd_lock); + spin_lock(&cmd->se_dev->delayed_cmd_lock); cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; - list_add_tail(&cmd->se_delayed_list, - &SE_DEV(cmd)->delayed_cmd_list); - spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock); + list_add_tail(&cmd->se_delayed_node, + &cmd->se_dev->delayed_cmd_list); + spin_unlock(&cmd->se_dev->delayed_cmd_lock); - DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to" + pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" " delayed CMD list, se_ordered_id: %u\n", - T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr, + cmd->t_task_cdb[0], cmd->sam_task_attr, cmd->se_ordered_id); /* * Return zero to let transport_execute_tasks() know @@ -2592,25 +2350,23 @@ static int transport_execute_tasks(struct se_cmd *cmd) { int add_tasks; - if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) { - if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { - cmd->transport_error_status = - PYX_TRANSPORT_LU_COMM_FAILURE; - transport_generic_request_failure(cmd, NULL, 0, 1); - return 0; - } + if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { + cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; + transport_generic_request_failure(cmd, NULL, 0, 1); + return 0; } + /* * Call transport_cmd_check_stop() to see if a fabric exception * has occurred that prevents execution. */ - if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) { + if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) { /* * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE * attribute for the tasks of the received struct se_cmd CDB */ add_tasks = transport_execute_task_attr(cmd); - if (add_tasks == 0) + if (!add_tasks) goto execute_tasks; /* * This calls transport_add_tasks_from_cmd() to handle @@ -2625,7 +2381,7 @@ static int transport_execute_tasks(struct se_cmd *cmd) * storage object. */ execute_tasks: - __transport_execute_tasks(SE_DEV(cmd)); + __transport_execute_tasks(cmd->se_dev); return 0; } @@ -2639,51 +2395,49 @@ static int __transport_execute_tasks(struct se_device *dev) { int error; struct se_cmd *cmd = NULL; - struct se_task *task; + struct se_task *task = NULL; unsigned long flags; /* * Check if there is enough room in the device and HBA queue to send - * struct se_transport_task's to the selected transport. + * struct se_tasks to the selected transport. */ check_depth: - spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); - if (!(atomic_read(&dev->depth_left)) || - !(atomic_read(&SE_HBA(dev)->left_queue_depth))) { - spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); + if (!atomic_read(&dev->depth_left)) return transport_tcq_window_closed(dev); - } - dev->dev_tcq_window_closed = 0; - spin_lock(&dev->execute_task_lock); - task = transport_get_task_from_execute_queue(dev); - spin_unlock(&dev->execute_task_lock); + dev->dev_tcq_window_closed = 0; - if (!task) { - spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); + spin_lock_irq(&dev->execute_task_lock); + if (list_empty(&dev->execute_task_list)) { + spin_unlock_irq(&dev->execute_task_lock); return 0; } + task = list_first_entry(&dev->execute_task_list, + struct se_task, t_execute_list); + list_del(&task->t_execute_list); + atomic_set(&task->task_execute_queue, 0); + atomic_dec(&dev->execute_tasks); + spin_unlock_irq(&dev->execute_task_lock); atomic_dec(&dev->depth_left); - atomic_dec(&SE_HBA(dev)->left_queue_depth); - spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); - cmd = TASK_CMD(task); + cmd = task->task_se_cmd; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); atomic_set(&task->task_active, 1); atomic_set(&task->task_sent, 1); - atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent); + atomic_inc(&cmd->t_task_cdbs_sent); - if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) == - T_TASK(cmd)->t_task_cdbs) + if (atomic_read(&cmd->t_task_cdbs_sent) == + cmd->t_task_list_num) atomic_set(&cmd->transport_sent, 1); transport_start_task_timer(task); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); /* * The struct se_cmd->transport_emulate_cdb() function pointer is used - * to grab REPORT_LUNS CDBs before they hit the + * to grab REPORT_LUNS and other CDBs we want to handle before they hit the * struct se_subsystem_api->do_task() caller below. */ if (cmd->transport_emulate_cdb) { @@ -2718,11 +2472,11 @@ check_depth: * call ->do_task() directly and let the underlying TCM subsystem plugin * code handle the CDB emulation. */ - if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) && - (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) + if ((dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) && + (!(task->task_se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) error = transport_emulate_control_cdb(task); else - error = TRANSPORT(dev)->do_task(task); + error = dev->transport->do_task(task); if (error != 0) { cmd->transport_error_status = error; @@ -2745,12 +2499,10 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd) * Any unsolicited data will get dumped for failed command inside of * the fabric plugin */ - spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags); + spin_lock_irqsave(&se_cmd->t_state_lock, flags); se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags); - - CMD_TFO(se_cmd)->new_cmd_failure(se_cmd); + spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); } static void transport_nop_wait_for_tasks(struct se_cmd *, int, int); @@ -2760,7 +2512,7 @@ static inline u32 transport_get_sectors_6( struct se_cmd *cmd, int *ret) { - struct se_device *dev = SE_LUN(cmd)->lun_se_dev; + struct se_device *dev = cmd->se_dev; /* * Assume TYPE_DISK for non struct se_device objects. @@ -2772,7 +2524,7 @@ static inline u32 transport_get_sectors_6( /* * Use 24-bit allocation length for TYPE_TAPE. */ - if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) + if (dev->transport->get_device_type(dev) == TYPE_TAPE) return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; /* @@ -2788,7 +2540,7 @@ static inline u32 transport_get_sectors_10( struct se_cmd *cmd, int *ret) { - struct se_device *dev = SE_LUN(cmd)->lun_se_dev; + struct se_device *dev = cmd->se_dev; /* * Assume TYPE_DISK for non struct se_device objects. @@ -2800,8 +2552,8 @@ static inline u32 transport_get_sectors_10( /* * XXX_10 is not defined in SSC, throw an exception */ - if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { - *ret = -1; + if (dev->transport->get_device_type(dev) == TYPE_TAPE) { + *ret = -EINVAL; return 0; } @@ -2818,7 +2570,7 @@ static inline u32 transport_get_sectors_12( struct se_cmd *cmd, int *ret) { - struct se_device *dev = SE_LUN(cmd)->lun_se_dev; + struct se_device *dev = cmd->se_dev; /* * Assume TYPE_DISK for non struct se_device objects. @@ -2830,8 +2582,8 @@ static inline u32 transport_get_sectors_12( /* * XXX_12 is not defined in SSC, throw an exception */ - if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { - *ret = -1; + if (dev->transport->get_device_type(dev) == TYPE_TAPE) { + *ret = -EINVAL; return 0; } @@ -2848,7 +2600,7 @@ static inline u32 transport_get_sectors_16( struct se_cmd *cmd, int *ret) { - struct se_device *dev = SE_LUN(cmd)->lun_se_dev; + struct se_device *dev = cmd->se_dev; /* * Assume TYPE_DISK for non struct se_device objects. @@ -2860,7 +2612,7 @@ static inline u32 transport_get_sectors_16( /* * Use 24-bit allocation length for TYPE_TAPE. */ - if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) + if (dev->transport->get_device_type(dev) == TYPE_TAPE) return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; type_disk: @@ -2890,57 +2642,30 @@ static inline u32 transport_get_size( unsigned char *cdb, struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_dev; - if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { + if (dev->transport->get_device_type(dev) == TYPE_TAPE) { if (cdb[1] & 1) { /* sectors */ - return DEV_ATTRIB(dev)->block_size * sectors; + return dev->se_sub_dev->se_dev_attrib.block_size * sectors; } else /* bytes */ return sectors; } #if 0 - printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for" - " %s object\n", DEV_ATTRIB(dev)->block_size, sectors, - DEV_ATTRIB(dev)->block_size * sectors, - TRANSPORT(dev)->name); + pr_debug("Returning block_size: %u, sectors: %u == %u for" + " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors, + dev->se_sub_dev->se_dev_attrib.block_size * sectors, + dev->transport->name); #endif - return DEV_ATTRIB(dev)->block_size * sectors; -} - -unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]) -{ - unsigned char result = 0; - /* - * MSB - */ - if ((val[0] >= 'a') && (val[0] <= 'f')) - result = ((val[0] - 'a' + 10) & 0xf) << 4; - else - if ((val[0] >= 'A') && (val[0] <= 'F')) - result = ((val[0] - 'A' + 10) & 0xf) << 4; - else /* digit */ - result = ((val[0] - '0') & 0xf) << 4; - /* - * LSB - */ - if ((val[1] >= 'a') && (val[1] <= 'f')) - result |= ((val[1] - 'a' + 10) & 0xf); - else - if ((val[1] >= 'A') && (val[1] <= 'F')) - result |= ((val[1] - 'A' + 10) & 0xf); - else /* digit */ - result |= ((val[1] - '0') & 0xf); - - return result; + return dev->se_sub_dev->se_dev_attrib.block_size * sectors; } -EXPORT_SYMBOL(transport_asciihex_to_binaryhex); static void transport_xor_callback(struct se_cmd *cmd) { unsigned char *buf, *addr; - struct se_mem *se_mem; + struct scatterlist *sg; unsigned int offset; int i; + int count; /* * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command * @@ -2953,32 +2678,37 @@ static void transport_xor_callback(struct se_cmd *cmd) * 5) transfer the resulting XOR data to the data-in buffer. */ buf = kmalloc(cmd->data_length, GFP_KERNEL); - if (!(buf)) { - printk(KERN_ERR "Unable to allocate xor_callback buf\n"); + if (!buf) { + pr_err("Unable to allocate xor_callback buf\n"); return; } /* - * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list + * Copy the scatterlist WRITE buffer located at cmd->t_data_sg * into the locally allocated *buf */ - transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list); + sg_copy_to_buffer(cmd->t_data_sg, + cmd->t_data_nents, + buf, + cmd->data_length); + /* * Now perform the XOR against the BIDI read memory located at - * T_TASK(cmd)->t_mem_bidi_list + * cmd->t_mem_bidi_list */ offset = 0; - list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) { - addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0); - if (!(addr)) + for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { + addr = kmap_atomic(sg_page(sg), KM_USER0); + if (!addr) goto out; - for (i = 0; i < se_mem->se_len; i++) - *(addr + se_mem->se_off + i) ^= *(buf + offset + i); + for (i = 0; i < sg->length; i++) + *(addr + sg->offset + i) ^= *(buf + offset + i); - offset += se_mem->se_len; + offset += sg->length; kunmap_atomic(addr, KM_USER0); } + out: kfree(buf); } @@ -2994,75 +2724,60 @@ static int transport_get_sense_data(struct se_cmd *cmd) unsigned long flags; u32 offset = 0; - if (!SE_LUN(cmd)) { - printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); - return -1; - } - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + WARN_ON(!cmd->se_lun); + + spin_lock_irqsave(&cmd->t_state_lock, flags); if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 0; } list_for_each_entry_safe(task, task_tmp, - &T_TASK(cmd)->t_task_list, t_list) { + &cmd->t_task_list, t_list) { if (!task->task_sense) continue; dev = task->se_dev; - if (!(dev)) + if (!dev) continue; - if (!TRANSPORT(dev)->get_sense_buffer) { - printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer" + if (!dev->transport->get_sense_buffer) { + pr_err("dev->transport->get_sense_buffer" " is NULL\n"); continue; } - sense_buffer = TRANSPORT(dev)->get_sense_buffer(task); - if (!(sense_buffer)) { - printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate" + sense_buffer = dev->transport->get_sense_buffer(task); + if (!sense_buffer) { + pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate" " sense buffer for task with sense\n", - CMD_TFO(cmd)->get_task_tag(cmd), task->task_no); + cmd->se_tfo->get_task_tag(cmd), task->task_no); continue; } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); - offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd, + offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER); - memcpy((void *)&buffer[offset], (void *)sense_buffer, + memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER); cmd->scsi_status = task->task_scsi_status; /* Automatically padded */ cmd->scsi_sense_length = (TRANSPORT_SENSE_BUFFER + offset); - printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" + pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" " and sense\n", - dev->se_hba->hba_id, TRANSPORT(dev)->name, + dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); return 0; } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return -1; } -static int transport_allocate_resources(struct se_cmd *cmd) -{ - u32 length = cmd->data_length; - - if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || - (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) - return transport_generic_get_mem(cmd, length, PAGE_SIZE); - else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) - return transport_generic_allocate_buf(cmd, length); - else - return 0; -} - static int transport_handle_reservation_conflict(struct se_cmd *cmd) { @@ -3077,12 +2792,40 @@ transport_handle_reservation_conflict(struct se_cmd *cmd) * * See spc4r17, section 7.4.6 Control Mode Page, Table 349 */ - if (SE_SESS(cmd) && - DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2) - core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl, + if (cmd->se_sess && + cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) + core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, cmd->orig_fe_lun, 0x2C, ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); - return -2; + return -EINVAL; +} + +static inline long long transport_dev_end_lba(struct se_device *dev) +{ + return dev->transport->get_blocks(dev) + 1; +} + +static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + u32 sectors; + + if (dev->transport->get_device_type(dev) != TYPE_DISK) + return 0; + + sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); + + if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) { + pr_err("LBA: %llu Sectors: %u exceeds" + " transport_dev_end_lba(): %llu\n", + cmd->t_task_lba, sectors, + transport_dev_end_lba(dev)); + pr_err(" We should return CHECK_CONDITION" + " but we don't yet\n"); + return 0; + } + + return sectors; } /* transport_generic_cmd_sequencer(): @@ -3099,7 +2842,7 @@ static int transport_generic_cmd_sequencer( struct se_cmd *cmd, unsigned char *cdb) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_dev; struct se_subsystem_dev *su_dev = dev->se_sub_dev; int ret = 0, sector_ret = 0, passthrough; u32 sectors = 0, size = 0, pr_reg_type = 0; @@ -3113,12 +2856,12 @@ static int transport_generic_cmd_sequencer( &transport_nop_wait_for_tasks; cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; - return -2; + return -EINVAL; } /* * Check status of Asymmetric Logical Unit Assignment port */ - ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq); + ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); if (ret != 0) { cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; /* @@ -3128,22 +2871,22 @@ static int transport_generic_cmd_sequencer( */ if (ret > 0) { #if 0 - printk(KERN_INFO "[%s]: ALUA TG Port not available," + pr_debug("[%s]: ALUA TG Port not available," " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", - CMD_TFO(cmd)->get_fabric_name(), alua_ascq); + cmd->se_tfo->get_fabric_name(), alua_ascq); #endif transport_set_sense_codes(cmd, 0x04, alua_ascq); cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; - return -2; + return -EINVAL; } goto out_invalid_cdb_field; } /* * Check status for SPC-3 Persistent Reservations */ - if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) { - if (T10_PR_OPS(su_dev)->t10_seq_non_holder( + if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) { + if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( cmd, cdb, pr_reg_type) != 0) return transport_handle_reservation_conflict(cmd); /* @@ -3160,7 +2903,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_6; - T_TASK(cmd)->t_task_lba = transport_lba_21(cdb); + cmd->t_task_lba = transport_lba_21(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case READ_10: @@ -3169,7 +2912,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_10; - T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); + cmd->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case READ_12: @@ -3178,7 +2921,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_12; - T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); + cmd->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case READ_16: @@ -3187,7 +2930,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_16; - T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); + cmd->t_task_lba = transport_lba_64(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_6: @@ -3196,7 +2939,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_6; - T_TASK(cmd)->t_task_lba = transport_lba_21(cdb); + cmd->t_task_lba = transport_lba_21(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_10: @@ -3205,8 +2948,8 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_10; - T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); - T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); + cmd->t_task_lba = transport_lba_32(cdb); + cmd->t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_12: @@ -3215,8 +2958,8 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_12; - T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); - T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); + cmd->t_task_lba = transport_lba_32(cdb); + cmd->t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_16: @@ -3225,22 +2968,22 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_16; - T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); - T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); + cmd->t_task_lba = transport_lba_64(cdb); + cmd->t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case XDWRITEREAD_10: if ((cmd->data_direction != DMA_TO_DEVICE) || - !(T_TASK(cmd)->t_tasks_bidi)) + !(cmd->t_tasks_bidi)) goto out_invalid_cdb_field; sectors = transport_get_sectors_10(cdb, cmd, §or_ret); if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_10; - T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); + cmd->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; - passthrough = (TRANSPORT(dev)->transport_type == + passthrough = (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); /* * Skip the remaining assignments for TCM/PSCSI passthrough @@ -3251,7 +2994,7 @@ static int transport_generic_cmd_sequencer( * Setup BIDI XOR callback to be run during transport_generic_complete_ok() */ cmd->transport_complete_callback = &transport_xor_callback; - T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); + cmd->t_tasks_fua = (cdb[1] & 0x8); break; case VARIABLE_LENGTH_CMD: service_action = get_unaligned_be16(&cdb[8]); @@ -3259,7 +3002,7 @@ static int transport_generic_cmd_sequencer( * Determine if this is TCM/PSCSI device and we should disable * internal emulation for this CDB. */ - passthrough = (TRANSPORT(dev)->transport_type == + passthrough = (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); switch (service_action) { @@ -3273,7 +3016,7 @@ static int transport_generic_cmd_sequencer( * XDWRITE_READ_32 logic. */ cmd->transport_split_cdb = &split_cdb_XX_32; - T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb); + cmd->t_task_lba = transport_lba_64_ext(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; /* @@ -3287,14 +3030,22 @@ static int transport_generic_cmd_sequencer( * transport_generic_complete_ok() */ cmd->transport_complete_callback = &transport_xor_callback; - T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8); + cmd->t_tasks_fua = (cdb[10] & 0x8); break; case WRITE_SAME_32: sectors = transport_get_sectors_32(cdb, cmd, §or_ret); if (sector_ret) goto out_unsupported_cdb; - size = transport_get_size(sectors, cdb, cmd); - T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]); + + if (sectors) + size = transport_get_size(sectors, cdb, cmd); + else { + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" + " supported\n"); + goto out_invalid_cdb_field; + } + + cmd->t_task_lba = get_unaligned_be64(&cdb[12]); cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; /* @@ -3304,7 +3055,7 @@ static int transport_generic_cmd_sequencer( break; if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) { - printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" + pr_err("WRITE_SAME PBDATA and LBDATA" " bits not supported for Block Discard" " Emulation\n"); goto out_invalid_cdb_field; @@ -3314,28 +3065,28 @@ static int transport_generic_cmd_sequencer( * tpws with the UNMAP=1 bit set. */ if (!(cdb[10] & 0x08)) { - printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not" + pr_err("WRITE_SAME w/o UNMAP bit not" " supported for Block Discard Emulation\n"); goto out_invalid_cdb_field; } break; default: - printk(KERN_ERR "VARIABLE_LENGTH_CMD service action" + pr_err("VARIABLE_LENGTH_CMD service action" " 0x%04x not supported\n", service_action); goto out_unsupported_cdb; } break; - case 0xa3: - if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) { + case MAINTENANCE_IN: + if (dev->transport->get_device_type(dev) != TYPE_ROM) { /* MAINTENANCE_IN from SCC-2 */ /* * Check for emulated MI_REPORT_TARGET_PGS. */ if (cdb[1] == MI_REPORT_TARGET_PGS) { cmd->transport_emulate_cdb = - (T10_ALUA(su_dev)->alua_type == + (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) ? - &core_emulate_report_target_port_groups : + core_emulate_report_target_port_groups : NULL; } size = (cdb[6] << 24) | (cdb[7] << 16) | @@ -3344,7 +3095,7 @@ static int transport_generic_cmd_sequencer( /* GPCMD_SEND_KEY from multi media commands */ size = (cdb[8] << 8) + cdb[9]; } - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case MODE_SELECT: size = cdb[4]; @@ -3356,7 +3107,7 @@ static int transport_generic_cmd_sequencer( break; case MODE_SENSE: size = cdb[4]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case MODE_SENSE_10: case GPCMD_READ_BUFFER_CAPACITY: @@ -3364,11 +3115,11 @@ static int transport_generic_cmd_sequencer( case LOG_SELECT: case LOG_SENSE: size = (cdb[7] << 8) + cdb[8]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case READ_BLOCK_LIMITS: size = READ_BLOCK_LEN; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case GPCMD_GET_CONFIGURATION: case GPCMD_READ_FORMAT_CAPACITIES: @@ -3380,11 +3131,11 @@ static int transport_generic_cmd_sequencer( case PERSISTENT_RESERVE_IN: case PERSISTENT_RESERVE_OUT: cmd->transport_emulate_cdb = - (T10_RES(su_dev)->res_type == + (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) ? - &core_scsi3_emulate_pr : NULL; + core_scsi3_emulate_pr : NULL; size = (cdb[7] << 8) + cdb[8]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case GPCMD_MECHANISM_STATUS: case GPCMD_READ_DVD_STRUCTURE: @@ -3393,19 +3144,19 @@ static int transport_generic_cmd_sequencer( break; case READ_POSITION: size = READ_POSITION_LEN; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; - case 0xa4: - if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) { + case MAINTENANCE_OUT: + if (dev->transport->get_device_type(dev) != TYPE_ROM) { /* MAINTENANCE_OUT from SCC-2 * * Check for emulated MO_SET_TARGET_PGS. */ if (cdb[1] == MO_SET_TARGET_PGS) { cmd->transport_emulate_cdb = - (T10_ALUA(su_dev)->alua_type == + (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) ? - &core_emulate_set_target_port_groups : + core_emulate_set_target_port_groups : NULL; } @@ -3415,7 +3166,7 @@ static int transport_generic_cmd_sequencer( /* GPCMD_REPORT_KEY from multi media commands */ size = (cdb[8] << 8) + cdb[9]; } - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case INQUIRY: size = (cdb[3] << 8) + cdb[4]; @@ -3423,23 +3174,23 @@ static int transport_generic_cmd_sequencer( * Do implict HEAD_OF_QUEUE processing for INQUIRY. * See spc4r17 section 5.3 */ - if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) cmd->sam_task_attr = MSG_HEAD_TAG; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case READ_BUFFER: size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case READ_CAPACITY: size = READ_CAP_LEN; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case READ_MEDIA_SERIAL_NUMBER: case SECURITY_PROTOCOL_IN: case SECURITY_PROTOCOL_OUT: size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case SERVICE_ACTION_IN: case ACCESS_CONTROL_IN: @@ -3450,36 +3201,36 @@ static int transport_generic_cmd_sequencer( case WRITE_ATTRIBUTE: size = (cdb[10] << 24) | (cdb[11] << 16) | (cdb[12] << 8) | cdb[13]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case RECEIVE_DIAGNOSTIC: case SEND_DIAGNOSTIC: size = (cdb[3] << 8) | cdb[4]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */ #if 0 case GPCMD_READ_CD: sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; size = (2336 * sectors); - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; #endif case READ_TOC: size = cdb[8]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case REQUEST_SENSE: size = cdb[4]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case READ_ELEMENT_STATUS: size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case WRITE_BUFFER: size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case RESERVE: case RESERVE_10: @@ -3500,9 +3251,9 @@ static int transport_generic_cmd_sequencer( * emulation disabled. */ cmd->transport_emulate_cdb = - (T10_RES(su_dev)->res_type != + (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) ? - &core_scsi2_emulate_crh : NULL; + core_scsi2_emulate_crh : NULL; cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; break; case RELEASE: @@ -3517,9 +3268,9 @@ static int transport_generic_cmd_sequencer( size = cmd->data_length; cmd->transport_emulate_cdb = - (T10_RES(su_dev)->res_type != + (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) ? - &core_scsi2_emulate_crh : NULL; + core_scsi2_emulate_crh : NULL; cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; break; case SYNCHRONIZE_CACHE: @@ -3529,10 +3280,10 @@ static int transport_generic_cmd_sequencer( */ if (cdb[0] == SYNCHRONIZE_CACHE) { sectors = transport_get_sectors_10(cdb, cmd, §or_ret); - T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); + cmd->t_task_lba = transport_lba_32(cdb); } else { sectors = transport_get_sectors_16(cdb, cmd, §or_ret); - T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); + cmd->t_task_lba = transport_lba_64(cdb); } if (sector_ret) goto out_unsupported_cdb; @@ -3543,7 +3294,7 @@ static int transport_generic_cmd_sequencer( /* * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb() */ - if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) break; /* * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation @@ -3554,32 +3305,27 @@ static int transport_generic_cmd_sequencer( * Check to ensure that LBA + Range does not exceed past end of * device. */ - if (transport_get_sectors(cmd) < 0) + if (!transport_cmd_get_valid_sectors(cmd)) goto out_invalid_cdb_field; break; case UNMAP: size = get_unaligned_be16(&cdb[7]); - passthrough = (TRANSPORT(dev)->transport_type == - TRANSPORT_PLUGIN_PHBA_PDEV); - /* - * Determine if the received UNMAP used to for direct passthrough - * into Linux/SCSI with struct request via TCM/pSCSI or we are - * signaling the use of internal transport_generic_unmap() emulation - * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO - * subsystem plugin backstores. - */ - if (!(passthrough)) - cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP; - - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case WRITE_SAME_16: sectors = transport_get_sectors_16(cdb, cmd, §or_ret); if (sector_ret) goto out_unsupported_cdb; - size = transport_get_size(sectors, cdb, cmd); - T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]); - passthrough = (TRANSPORT(dev)->transport_type == + + if (sectors) + size = transport_get_size(sectors, cdb, cmd); + else { + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); + goto out_invalid_cdb_field; + } + + cmd->t_task_lba = get_unaligned_be16(&cdb[2]); + passthrough = (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); /* * Determine if the received WRITE_SAME_16 is used to for direct @@ -3588,9 +3334,9 @@ static int transport_generic_cmd_sequencer( * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and * TCM/FILEIO subsystem plugin backstores. */ - if (!(passthrough)) { + if (!passthrough) { if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { - printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" + pr_err("WRITE_SAME PBDATA and LBDATA" " bits not supported for Block Discard" " Emulation\n"); goto out_invalid_cdb_field; @@ -3600,7 +3346,7 @@ static int transport_generic_cmd_sequencer( * tpws with the UNMAP=1 bit set. */ if (!(cdb[1] & 0x08)) { - printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not " + pr_err("WRITE_SAME w/o UNMAP bit not " " supported for Block Discard Emulation\n"); goto out_invalid_cdb_field; } @@ -3625,34 +3371,34 @@ static int transport_generic_cmd_sequencer( break; case REPORT_LUNS: cmd->transport_emulate_cdb = - &transport_core_report_lun_response; + transport_core_report_lun_response; size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; /* * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS * See spc4r17 section 5.3 */ - if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) cmd->sam_task_attr = MSG_HEAD_TAG; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; default: - printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode" + pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" " 0x%02x, sending CHECK_CONDITION.\n", - CMD_TFO(cmd)->get_fabric_name(), cdb[0]); + cmd->se_tfo->get_fabric_name(), cdb[0]); cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; goto out_unsupported_cdb; } if (size != cmd->data_length) { - printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:" + pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" " %u does not match SCSI CDB Length: %u for SAM Opcode:" - " 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(), + " 0x%02x\n", cmd->se_tfo->get_fabric_name(), cmd->data_length, size, cdb[0]); cmd->cmd_spdtl = size; if (cmd->data_direction == DMA_TO_DEVICE) { - printk(KERN_ERR "Rejecting underflow/overflow" + pr_err("Rejecting underflow/overflow" " WRITE data\n"); goto out_invalid_cdb_field; } @@ -3660,10 +3406,10 @@ static int transport_generic_cmd_sequencer( * Reject READ_* or WRITE_* with overflow/underflow for * type SCF_SCSI_DATA_SG_IO_CDB. */ - if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512)) { - printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op" + if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) { + pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" " CDB on non 512-byte sector setup subsystem" - " plugin: %s\n", TRANSPORT(dev)->name); + " plugin: %s\n", dev->transport->name); /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ goto out_invalid_cdb_field; } @@ -3678,105 +3424,22 @@ static int transport_generic_cmd_sequencer( cmd->data_length = size; } + /* Let's limit control cdbs to a page, for simplicity's sake. */ + if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && + size > PAGE_SIZE) + goto out_invalid_cdb_field; + transport_set_supported_SAM_opcode(cmd); return ret; out_unsupported_cdb: cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; - return -2; + return -EINVAL; out_invalid_cdb_field: cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; - return -2; -} - -static inline void transport_release_tasks(struct se_cmd *); - -/* - * This function will copy a contiguous *src buffer into a destination - * struct scatterlist array. - */ -static void transport_memcpy_write_contig( - struct se_cmd *cmd, - struct scatterlist *sg_d, - unsigned char *src) -{ - u32 i = 0, length = 0, total_length = cmd->data_length; - void *dst; - - while (total_length) { - length = sg_d[i].length; - - if (length > total_length) - length = total_length; - - dst = sg_virt(&sg_d[i]); - - memcpy(dst, src, length); - - if (!(total_length -= length)) - return; - - src += length; - i++; - } -} - -/* - * This function will copy a struct scatterlist array *sg_s into a destination - * contiguous *dst buffer. - */ -static void transport_memcpy_read_contig( - struct se_cmd *cmd, - unsigned char *dst, - struct scatterlist *sg_s) -{ - u32 i = 0, length = 0, total_length = cmd->data_length; - void *src; - - while (total_length) { - length = sg_s[i].length; - - if (length > total_length) - length = total_length; - - src = sg_virt(&sg_s[i]); - - memcpy(dst, src, length); - - if (!(total_length -= length)) - return; - - dst += length; - i++; - } -} - -static void transport_memcpy_se_mem_read_contig( - struct se_cmd *cmd, - unsigned char *dst, - struct list_head *se_mem_list) -{ - struct se_mem *se_mem; - void *src; - u32 length = 0, total_length = cmd->data_length; - - list_for_each_entry(se_mem, se_mem_list, se_list) { - length = se_mem->se_len; - - if (length > total_length) - length = total_length; - - src = page_address(se_mem->se_page) + se_mem->se_off; - - memcpy(dst, src, length); - - if (!(total_length -= length)) - return; - - dst += length; - } + return -EINVAL; } /* @@ -3786,7 +3449,7 @@ static void transport_memcpy_se_mem_read_contig( */ static void transport_complete_task_attr(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_dev; struct se_cmd *cmd_p, *cmd_tmp; int new_active_tasks = 0; @@ -3794,25 +3457,25 @@ static void transport_complete_task_attr(struct se_cmd *cmd) atomic_dec(&dev->simple_cmds); smp_mb__after_atomic_dec(); dev->dev_cur_ordered_id++; - DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for" + pr_debug("Incremented dev->dev_cur_ordered_id: %u for" " SIMPLE: %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { atomic_dec(&dev->dev_hoq_count); smp_mb__after_atomic_dec(); dev->dev_cur_ordered_id++; - DEBUG_STA("Incremented dev_cur_ordered_id: %u for" + pr_debug("Incremented dev_cur_ordered_id: %u for" " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { spin_lock(&dev->ordered_cmd_lock); - list_del(&cmd->se_ordered_list); + list_del(&cmd->se_ordered_node); atomic_dec(&dev->dev_ordered_sync); smp_mb__after_atomic_dec(); spin_unlock(&dev->ordered_cmd_lock); dev->dev_cur_ordered_id++; - DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:" + pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); } /* @@ -3822,15 +3485,15 @@ static void transport_complete_task_attr(struct se_cmd *cmd) */ spin_lock(&dev->delayed_cmd_lock); list_for_each_entry_safe(cmd_p, cmd_tmp, - &dev->delayed_cmd_list, se_delayed_list) { + &dev->delayed_cmd_list, se_delayed_node) { - list_del(&cmd_p->se_delayed_list); + list_del(&cmd_p->se_delayed_node); spin_unlock(&dev->delayed_cmd_lock); - DEBUG_STA("Calling add_tasks() for" + pr_debug("Calling add_tasks() for" " cmd_p: 0x%02x Task Attr: 0x%02x" " Dormant -> Active, se_ordered_id: %u\n", - T_TASK(cmd_p)->t_task_cdb[0], + cmd_p->t_task_cdb[0], cmd_p->sam_task_attr, cmd_p->se_ordered_id); transport_add_tasks_from_cmd(cmd_p); @@ -3846,20 +3509,79 @@ static void transport_complete_task_attr(struct se_cmd *cmd) * to do the processing of the Active tasks. */ if (new_active_tasks != 0) - wake_up_interruptible(&dev->dev_queue_obj->thread_wq); + wake_up_interruptible(&dev->dev_queue_obj.thread_wq); +} + +static int transport_complete_qf(struct se_cmd *cmd) +{ + int ret = 0; + + if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) + return cmd->se_tfo->queue_status(cmd); + + switch (cmd->data_direction) { + case DMA_FROM_DEVICE: + ret = cmd->se_tfo->queue_data_in(cmd); + break; + case DMA_TO_DEVICE: + if (cmd->t_bidi_data_sg) { + ret = cmd->se_tfo->queue_data_in(cmd); + if (ret < 0) + return ret; + } + /* Fall through for DMA_TO_DEVICE */ + case DMA_NONE: + ret = cmd->se_tfo->queue_status(cmd); + break; + default: + break; + } + + return ret; +} + +static void transport_handle_queue_full( + struct se_cmd *cmd, + struct se_device *dev, + int (*qf_callback)(struct se_cmd *)) +{ + spin_lock_irq(&dev->qf_cmd_lock); + cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL; + cmd->transport_qf_callback = qf_callback; + list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); + atomic_inc(&dev->dev_qf_count); + smp_mb__after_atomic_inc(); + spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); + + schedule_work(&cmd->se_dev->qf_work_queue); } static void transport_generic_complete_ok(struct se_cmd *cmd) { - int reason = 0; + int reason = 0, ret; /* * Check if we need to move delayed/dormant tasks from cmds on the * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task * Attribute. */ - if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) transport_complete_task_attr(cmd); /* + * Check to schedule QUEUE_FULL work, or execute an existing + * cmd->transport_qf_callback() + */ + if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) + schedule_work(&cmd->se_dev->qf_work_queue); + + if (cmd->transport_qf_callback) { + ret = cmd->transport_qf_callback(cmd); + if (ret < 0) + goto queue_full; + + cmd->transport_qf_callback = NULL; + goto done; + } + /* * Check if we need to retrieve a sense buffer from * the struct se_cmd in question. */ @@ -3872,8 +3594,11 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) * a non GOOD status. */ if (cmd->scsi_status) { - transport_send_check_condition_and_sense( + ret = transport_send_check_condition_and_sense( cmd, reason, 1); + if (ret == -EAGAIN) + goto queue_full; + transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; @@ -3889,53 +3614,57 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) switch (cmd->data_direction) { case DMA_FROM_DEVICE: spin_lock(&cmd->se_lun->lun_sep_lock); - if (SE_LUN(cmd)->lun_sep) { - SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets += + if (cmd->se_lun->lun_sep) { + cmd->se_lun->lun_sep->sep_stats.tx_data_octets += cmd->data_length; } spin_unlock(&cmd->se_lun->lun_sep_lock); - /* - * If enabled by TCM fabirc module pre-registered SGL - * memory, perform the memcpy() from the TCM internal - * contigious buffer back to the original SGL. - */ - if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) - transport_memcpy_write_contig(cmd, - T_TASK(cmd)->t_task_pt_sgl, - T_TASK(cmd)->t_task_buf); - CMD_TFO(cmd)->queue_data_in(cmd); + ret = cmd->se_tfo->queue_data_in(cmd); + if (ret == -EAGAIN) + goto queue_full; break; case DMA_TO_DEVICE: spin_lock(&cmd->se_lun->lun_sep_lock); - if (SE_LUN(cmd)->lun_sep) { - SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets += + if (cmd->se_lun->lun_sep) { + cmd->se_lun->lun_sep->sep_stats.rx_data_octets += cmd->data_length; } spin_unlock(&cmd->se_lun->lun_sep_lock); /* * Check if we need to send READ payload for BIDI-COMMAND */ - if (T_TASK(cmd)->t_mem_bidi_list != NULL) { + if (cmd->t_bidi_data_sg) { spin_lock(&cmd->se_lun->lun_sep_lock); - if (SE_LUN(cmd)->lun_sep) { - SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets += + if (cmd->se_lun->lun_sep) { + cmd->se_lun->lun_sep->sep_stats.tx_data_octets += cmd->data_length; } spin_unlock(&cmd->se_lun->lun_sep_lock); - CMD_TFO(cmd)->queue_data_in(cmd); + ret = cmd->se_tfo->queue_data_in(cmd); + if (ret == -EAGAIN) + goto queue_full; break; } /* Fall through for DMA_TO_DEVICE */ case DMA_NONE: - CMD_TFO(cmd)->queue_status(cmd); + ret = cmd->se_tfo->queue_status(cmd); + if (ret == -EAGAIN) + goto queue_full; break; default: break; } +done: transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); + return; + +queue_full: + pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," + " data_direction: %d\n", cmd, cmd->data_direction); + transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); } static void transport_free_dev_tasks(struct se_cmd *cmd) @@ -3943,9 +3672,9 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) struct se_task *task, *task_tmp; unsigned long flags; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, - &T_TASK(cmd)->t_task_list, t_list) { + &cmd->t_task_list, t_list) { if (atomic_read(&task->task_active)) continue; @@ -3954,75 +3683,40 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) list_del(&task->t_list); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); if (task->se_dev) - TRANSPORT(task->se_dev)->free_task(task); + task->se_dev->transport->free_task(task); else - printk(KERN_ERR "task[%u] - task->se_dev is NULL\n", + pr_err("task[%u] - task->se_dev is NULL\n", task->task_no); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } -static inline void transport_free_pages(struct se_cmd *cmd) +static inline void transport_free_sgl(struct scatterlist *sgl, int nents) { - struct se_mem *se_mem, *se_mem_tmp; - int free_page = 1; - - if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) - free_page = 0; - if (cmd->se_dev->transport->do_se_mem_map) - free_page = 0; + struct scatterlist *sg; + int count; - if (T_TASK(cmd)->t_task_buf) { - kfree(T_TASK(cmd)->t_task_buf); - T_TASK(cmd)->t_task_buf = NULL; - return; - } + for_each_sg(sgl, sg, nents, count) + __free_page(sg_page(sg)); - /* - * Caller will handle releasing of struct se_mem. - */ - if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC) - return; + kfree(sgl); +} - if (!(T_TASK(cmd)->t_tasks_se_num)) +static inline void transport_free_pages(struct se_cmd *cmd) +{ + if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) return; - list_for_each_entry_safe(se_mem, se_mem_tmp, - T_TASK(cmd)->t_mem_list, se_list) { - /* - * We only release call __free_page(struct se_mem->se_page) when - * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, - */ - if (free_page) - __free_page(se_mem->se_page); - - list_del(&se_mem->se_list); - kmem_cache_free(se_mem_cache, se_mem); - } - - if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) { - list_for_each_entry_safe(se_mem, se_mem_tmp, - T_TASK(cmd)->t_mem_bidi_list, se_list) { - /* - * We only release call __free_page(struct se_mem->se_page) when - * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, - */ - if (free_page) - __free_page(se_mem->se_page); - - list_del(&se_mem->se_list); - kmem_cache_free(se_mem_cache, se_mem); - } - } + transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); + cmd->t_data_sg = NULL; + cmd->t_data_nents = 0; - kfree(T_TASK(cmd)->t_mem_bidi_list); - T_TASK(cmd)->t_mem_bidi_list = NULL; - kfree(T_TASK(cmd)->t_mem_list); - T_TASK(cmd)->t_mem_list = NULL; - T_TASK(cmd)->t_tasks_se_num = 0; + transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); + cmd->t_bidi_data_sg = NULL; + cmd->t_bidi_data_nents = 0; } static inline void transport_release_tasks(struct se_cmd *cmd) @@ -4034,23 +3728,23 @@ static inline int transport_dec_and_check(struct se_cmd *cmd) { unsigned long flags; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (atomic_read(&T_TASK(cmd)->t_fe_count)) { - if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (atomic_read(&cmd->t_fe_count)) { + if (!atomic_dec_and_test(&cmd->t_fe_count)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 1; } } - if (atomic_read(&T_TASK(cmd)->t_se_count)) { - if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + if (atomic_read(&cmd->t_se_count)) { + if (!atomic_dec_and_test(&cmd->t_se_count)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 1; } } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 0; } @@ -4062,68 +3756,57 @@ static void transport_release_fe_cmd(struct se_cmd *cmd) if (transport_dec_and_check(cmd)) return; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (!atomic_read(&cmd->transport_dev_active)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); goto free_pages; } - atomic_set(&T_TASK(cmd)->transport_dev_active, 0); + atomic_set(&cmd->transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_release_tasks(cmd); free_pages: transport_free_pages(cmd); transport_free_se_cmd(cmd); - CMD_TFO(cmd)->release_cmd_direct(cmd); + cmd->se_tfo->release_cmd(cmd); } -static int transport_generic_remove( - struct se_cmd *cmd, - int release_to_pool, - int session_reinstatement) +static int +transport_generic_remove(struct se_cmd *cmd, int session_reinstatement) { unsigned long flags; - if (!(T_TASK(cmd))) - goto release_cmd; - if (transport_dec_and_check(cmd)) { if (session_reinstatement) { - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } return 1; } - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (!atomic_read(&cmd->transport_dev_active)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); goto free_pages; } - atomic_set(&T_TASK(cmd)->transport_dev_active, 0); + atomic_set(&cmd->transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_release_tasks(cmd); + free_pages: transport_free_pages(cmd); - -release_cmd: - if (release_to_pool) { - transport_release_cmd_to_pool(cmd); - } else { - transport_free_se_cmd(cmd); - CMD_TFO(cmd)->release_cmd_direct(cmd); - } - + transport_release_cmd(cmd); return 0; } /* - * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map + * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of + * allocating in the core. * @cmd: Associated se_cmd descriptor * @mem: SGL style memory for TCM WRITE / READ * @sg_mem_num: Number of SGL elements @@ -4135,614 +3818,163 @@ release_cmd: */ int transport_generic_map_mem_to_cmd( struct se_cmd *cmd, - struct scatterlist *mem, - u32 sg_mem_num, - struct scatterlist *mem_bidi_in, - u32 sg_mem_bidi_num) + struct scatterlist *sgl, + u32 sgl_count, + struct scatterlist *sgl_bidi, + u32 sgl_bidi_count) { - u32 se_mem_cnt_out = 0; - int ret; - - if (!(mem) || !(sg_mem_num)) + if (!sgl || !sgl_count) return 0; - /* - * Passed *mem will contain a list_head containing preformatted - * struct se_mem elements... - */ - if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) { - if ((mem_bidi_in) || (sg_mem_bidi_num)) { - printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported" - " with BIDI-COMMAND\n"); - return -ENOSYS; - } - T_TASK(cmd)->t_mem_list = (struct list_head *)mem; - T_TASK(cmd)->t_tasks_se_num = sg_mem_num; - cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC; - return 0; - } - /* - * Otherwise, assume the caller is passing a struct scatterlist - * array from include/linux/scatterlist.h - */ if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { - /* - * For CDB using TCM struct se_mem linked list scatterlist memory - * processed into a TCM struct se_subsystem_dev, we do the mapping - * from the passed physical memory to struct se_mem->se_page here. - */ - T_TASK(cmd)->t_mem_list = transport_init_se_mem_list(); - if (!(T_TASK(cmd)->t_mem_list)) - return -ENOMEM; - ret = transport_map_sg_to_mem(cmd, - T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out); - if (ret < 0) - return -ENOMEM; + cmd->t_data_sg = sgl; + cmd->t_data_nents = sgl_count; - T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out; - /* - * Setup BIDI READ list of struct se_mem elements - */ - if ((mem_bidi_in) && (sg_mem_bidi_num)) { - T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list(); - if (!(T_TASK(cmd)->t_mem_bidi_list)) { - kfree(T_TASK(cmd)->t_mem_list); - return -ENOMEM; - } - se_mem_cnt_out = 0; - - ret = transport_map_sg_to_mem(cmd, - T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in, - &se_mem_cnt_out); - if (ret < 0) { - kfree(T_TASK(cmd)->t_mem_list); - return -ENOMEM; - } - - T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out; + if (sgl_bidi && sgl_bidi_count) { + cmd->t_bidi_data_sg = sgl_bidi; + cmd->t_bidi_data_nents = sgl_bidi_count; } cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; - - } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { - if (mem_bidi_in || sg_mem_bidi_num) { - printk(KERN_ERR "BIDI-Commands not supported using " - "SCF_SCSI_CONTROL_NONSG_IO_CDB\n"); - return -ENOSYS; - } - /* - * For incoming CDBs using a contiguous buffer internall with TCM, - * save the passed struct scatterlist memory. After TCM storage object - * processing has completed for this struct se_cmd, TCM core will call - * transport_memcpy_[write,read]_contig() as necessary from - * transport_generic_complete_ok() and transport_write_pending() in order - * to copy the TCM buffer to/from the original passed *mem in SGL -> - * struct scatterlist format. - */ - cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG; - T_TASK(cmd)->t_task_pt_sgl = mem; } return 0; } EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); - -static inline long long transport_dev_end_lba(struct se_device *dev) -{ - return dev->transport->get_blocks(dev) + 1; -} - -static int transport_get_sectors(struct se_cmd *cmd) -{ - struct se_device *dev = SE_DEV(cmd); - - T_TASK(cmd)->t_tasks_sectors = - (cmd->data_length / DEV_ATTRIB(dev)->block_size); - if (!(T_TASK(cmd)->t_tasks_sectors)) - T_TASK(cmd)->t_tasks_sectors = 1; - - if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK) - return 0; - - if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) > - transport_dev_end_lba(dev)) { - printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" - " transport_dev_end_lba(): %llu\n", - T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors, - transport_dev_end_lba(dev)); - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; - return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS; - } - - return 0; -} - static int transport_new_cmd_obj(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); - u32 task_cdbs = 0, rc; - - if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { - task_cdbs++; - T_TASK(cmd)->t_task_cdbs++; - } else { - int set_counts = 1; + struct se_device *dev = cmd->se_dev; + u32 task_cdbs; + u32 rc; + int set_counts = 1; - /* - * Setup any BIDI READ tasks and memory from - * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks - * are queued first for the non pSCSI passthrough case. - */ - if ((T_TASK(cmd)->t_mem_bidi_list != NULL) && - (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { - rc = transport_generic_get_cdb_count(cmd, - T_TASK(cmd)->t_task_lba, - T_TASK(cmd)->t_tasks_sectors, - DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list, - set_counts); - if (!(rc)) { - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - cmd->scsi_sense_reason = - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - return PYX_TRANSPORT_LU_COMM_FAILURE; - } - set_counts = 0; - } - /* - * Setup the tasks and memory from T_TASK(cmd)->t_mem_list - * Note for BIDI transfers this will contain the WRITE payload - */ - task_cdbs = transport_generic_get_cdb_count(cmd, - T_TASK(cmd)->t_task_lba, - T_TASK(cmd)->t_tasks_sectors, - cmd->data_direction, T_TASK(cmd)->t_mem_list, - set_counts); - if (!(task_cdbs)) { + /* + * Setup any BIDI READ tasks and memory from + * cmd->t_mem_bidi_list so the READ struct se_tasks + * are queued first for the non pSCSI passthrough case. + */ + if (cmd->t_bidi_data_sg && + (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { + rc = transport_allocate_tasks(cmd, + cmd->t_task_lba, + DMA_FROM_DEVICE, + cmd->t_bidi_data_sg, + cmd->t_bidi_data_nents); + if (rc <= 0) { cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return PYX_TRANSPORT_LU_COMM_FAILURE; } - T_TASK(cmd)->t_task_cdbs += task_cdbs; - -#if 0 - printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:" - " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length, - T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors, - T_TASK(cmd)->t_task_cdbs); -#endif - } - - atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs); - atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs); - atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs); - return 0; -} - -static struct list_head *transport_init_se_mem_list(void) -{ - struct list_head *se_mem_list; - - se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL); - if (!(se_mem_list)) { - printk(KERN_ERR "Unable to allocate memory for se_mem_list\n"); - return NULL; + atomic_inc(&cmd->t_fe_count); + atomic_inc(&cmd->t_se_count); + set_counts = 0; } - INIT_LIST_HEAD(se_mem_list); - - return se_mem_list; -} - -static int -transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) -{ - unsigned char *buf; - struct se_mem *se_mem; - - T_TASK(cmd)->t_mem_list = transport_init_se_mem_list(); - if (!(T_TASK(cmd)->t_mem_list)) - return -ENOMEM; - /* - * If the device uses memory mapping this is enough. + * Setup the tasks and memory from cmd->t_mem_list + * Note for BIDI transfers this will contain the WRITE payload */ - if (cmd->se_dev->transport->do_se_mem_map) - return 0; - - /* - * Setup BIDI-COMMAND READ list of struct se_mem elements - */ - if (T_TASK(cmd)->t_tasks_bidi) { - T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list(); - if (!(T_TASK(cmd)->t_mem_bidi_list)) { - kfree(T_TASK(cmd)->t_mem_list); - return -ENOMEM; - } + task_cdbs = transport_allocate_tasks(cmd, + cmd->t_task_lba, + cmd->data_direction, + cmd->t_data_sg, + cmd->t_data_nents); + if (task_cdbs <= 0) { + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return PYX_TRANSPORT_LU_COMM_FAILURE; } - while (length) { - se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); - if (!(se_mem)) { - printk(KERN_ERR "Unable to allocate struct se_mem\n"); - goto out; - } - -/* #warning FIXME Allocate contigous pages for struct se_mem elements */ - se_mem->se_page = alloc_pages(GFP_KERNEL, 0); - if (!(se_mem->se_page)) { - printk(KERN_ERR "alloc_pages() failed\n"); - goto out; - } - - buf = kmap_atomic(se_mem->se_page, KM_IRQ0); - if (!(buf)) { - printk(KERN_ERR "kmap_atomic() failed\n"); - goto out; - } - INIT_LIST_HEAD(&se_mem->se_list); - se_mem->se_len = (length > dma_size) ? dma_size : length; - memset(buf, 0, se_mem->se_len); - kunmap_atomic(buf, KM_IRQ0); - - list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list); - T_TASK(cmd)->t_tasks_se_num++; - - DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)" - " Offset(%u)\n", se_mem->se_page, se_mem->se_len, - se_mem->se_off); - - length -= se_mem->se_len; + if (set_counts) { + atomic_inc(&cmd->t_fe_count); + atomic_inc(&cmd->t_se_count); } - DEBUG_MEM("Allocated total struct se_mem elements(%u)\n", - T_TASK(cmd)->t_tasks_se_num); + cmd->t_task_list_num = task_cdbs; + atomic_set(&cmd->t_task_cdbs_left, task_cdbs); + atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs); + atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs); return 0; -out: - if (se_mem) - __free_pages(se_mem->se_page, 0); - kmem_cache_free(se_mem_cache, se_mem); - return -1; } -u32 transport_calc_sg_num( - struct se_task *task, - struct se_mem *in_se_mem, - u32 task_offset) +void *transport_kmap_first_data_page(struct se_cmd *cmd) { - struct se_cmd *se_cmd = task->task_se_cmd; - struct se_device *se_dev = SE_DEV(se_cmd); - struct se_mem *se_mem = in_se_mem; - struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd); - u32 sg_length, task_size = task->task_size, task_sg_num_padded; - - while (task_size != 0) { - DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)" - " se_mem->se_off(%u) task_offset(%u)\n", - se_mem->se_page, se_mem->se_len, - se_mem->se_off, task_offset); - - if (task_offset == 0) { - if (task_size >= se_mem->se_len) { - sg_length = se_mem->se_len; - - if (!(list_is_last(&se_mem->se_list, - T_TASK(se_cmd)->t_mem_list))) - se_mem = list_entry(se_mem->se_list.next, - struct se_mem, se_list); - } else { - sg_length = task_size; - task_size -= sg_length; - goto next; - } + struct scatterlist *sg = cmd->t_data_sg; - DEBUG_SC("sg_length(%u) task_size(%u)\n", - sg_length, task_size); - } else { - if ((se_mem->se_len - task_offset) > task_size) { - sg_length = task_size; - task_size -= sg_length; - goto next; - } else { - sg_length = (se_mem->se_len - task_offset); - - if (!(list_is_last(&se_mem->se_list, - T_TASK(se_cmd)->t_mem_list))) - se_mem = list_entry(se_mem->se_list.next, - struct se_mem, se_list); - } - - DEBUG_SC("sg_length(%u) task_size(%u)\n", - sg_length, task_size); - - task_offset = 0; - } - task_size -= sg_length; -next: - DEBUG_SC("task[%u] - Reducing task_size to(%u)\n", - task->task_no, task_size); - - task->task_sg_num++; - } - /* - * Check if the fabric module driver is requesting that all - * struct se_task->task_sg[] be chained together.. If so, - * then allocate an extra padding SG entry for linking and - * marking the end of the chained SGL. - */ - if (tfo->task_sg_chaining) { - task_sg_num_padded = (task->task_sg_num + 1); - task->task_padded_sg = 1; - } else - task_sg_num_padded = task->task_sg_num; - - task->task_sg = kzalloc(task_sg_num_padded * - sizeof(struct scatterlist), GFP_KERNEL); - if (!(task->task_sg)) { - printk(KERN_ERR "Unable to allocate memory for" - " task->task_sg\n"); - return 0; - } - sg_init_table(&task->task_sg[0], task_sg_num_padded); + BUG_ON(!sg); /* - * Setup task->task_sg_bidi for SCSI READ payload for - * TCM/pSCSI passthrough if present for BIDI-COMMAND + * We need to take into account a possible offset here for fabrics like + * tcm_loop who may be using a contig buffer from the SCSI midlayer for + * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() */ - if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) && - (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) { - task->task_sg_bidi = kzalloc(task_sg_num_padded * - sizeof(struct scatterlist), GFP_KERNEL); - if (!(task->task_sg_bidi)) { - printk(KERN_ERR "Unable to allocate memory for" - " task->task_sg_bidi\n"); - return 0; - } - sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded); - } - /* - * For the chaining case, setup the proper end of SGL for the - * initial submission struct task into struct se_subsystem_api. - * This will be cleared later by transport_do_task_sg_chain() - */ - if (task->task_padded_sg) { - sg_mark_end(&task->task_sg[task->task_sg_num - 1]); - /* - * Added the 'if' check before marking end of bi-directional - * scatterlist (which gets created only in case of request - * (RD + WR). - */ - if (task->task_sg_bidi) - sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]); - } - - DEBUG_SC("Successfully allocated task->task_sg_num(%u)," - " task_sg_num_padded(%u)\n", task->task_sg_num, - task_sg_num_padded); - - return task->task_sg_num; + return kmap(sg_page(sg)) + sg->offset; } +EXPORT_SYMBOL(transport_kmap_first_data_page); -static inline int transport_set_tasks_sectors_disk( - struct se_task *task, - struct se_device *dev, - unsigned long long lba, - u32 sectors, - int *max_sectors_set) +void transport_kunmap_first_data_page(struct se_cmd *cmd) { - if ((lba + sectors) > transport_dev_end_lba(dev)) { - task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1); - - if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) { - task->task_sectors = DEV_ATTRIB(dev)->max_sectors; - *max_sectors_set = 1; - } - } else { - if (sectors > DEV_ATTRIB(dev)->max_sectors) { - task->task_sectors = DEV_ATTRIB(dev)->max_sectors; - *max_sectors_set = 1; - } else - task->task_sectors = sectors; - } - - return 0; + kunmap(sg_page(cmd->t_data_sg)); } +EXPORT_SYMBOL(transport_kunmap_first_data_page); -static inline int transport_set_tasks_sectors_non_disk( - struct se_task *task, - struct se_device *dev, - unsigned long long lba, - u32 sectors, - int *max_sectors_set) +static int +transport_generic_get_mem(struct se_cmd *cmd) { - if (sectors > DEV_ATTRIB(dev)->max_sectors) { - task->task_sectors = DEV_ATTRIB(dev)->max_sectors; - *max_sectors_set = 1; - } else - task->task_sectors = sectors; + u32 length = cmd->data_length; + unsigned int nents; + struct page *page; + int i = 0; - return 0; -} + nents = DIV_ROUND_UP(length, PAGE_SIZE); + cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); + if (!cmd->t_data_sg) + return -ENOMEM; -static inline int transport_set_tasks_sectors( - struct se_task *task, - struct se_device *dev, - unsigned long long lba, - u32 sectors, - int *max_sectors_set) -{ - return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ? - transport_set_tasks_sectors_disk(task, dev, lba, sectors, - max_sectors_set) : - transport_set_tasks_sectors_non_disk(task, dev, lba, sectors, - max_sectors_set); -} + cmd->t_data_nents = nents; + sg_init_table(cmd->t_data_sg, nents); -static int transport_map_sg_to_mem( - struct se_cmd *cmd, - struct list_head *se_mem_list, - void *in_mem, - u32 *se_mem_cnt) -{ - struct se_mem *se_mem; - struct scatterlist *sg; - u32 sg_count = 1, cmd_size = cmd->data_length; + while (length) { + u32 page_len = min_t(u32, length, PAGE_SIZE); + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + goto out; - if (!in_mem) { - printk(KERN_ERR "No source scatterlist\n"); - return -1; + sg_set_page(&cmd->t_data_sg[i], page, page_len, 0); + length -= page_len; + i++; } - sg = (struct scatterlist *)in_mem; - - while (cmd_size) { - se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); - if (!(se_mem)) { - printk(KERN_ERR "Unable to allocate struct se_mem\n"); - return -1; - } - INIT_LIST_HEAD(&se_mem->se_list); - DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u" - " sg_page: %p offset: %d length: %d\n", cmd_size, - sg_page(sg), sg->offset, sg->length); - - se_mem->se_page = sg_page(sg); - se_mem->se_off = sg->offset; - - if (cmd_size > sg->length) { - se_mem->se_len = sg->length; - sg = sg_next(sg); - sg_count++; - } else - se_mem->se_len = cmd_size; - - cmd_size -= se_mem->se_len; - - DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n", - *se_mem_cnt, cmd_size); - DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n", - se_mem->se_page, se_mem->se_off, se_mem->se_len); + return 0; - list_add_tail(&se_mem->se_list, se_mem_list); - (*se_mem_cnt)++; +out: + while (i >= 0) { + __free_page(sg_page(&cmd->t_data_sg[i])); + i--; } - - DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)" - " struct se_mem\n", sg_count, *se_mem_cnt); - - if (sg_count != *se_mem_cnt) - BUG(); - - return 0; + kfree(cmd->t_data_sg); + cmd->t_data_sg = NULL; + return -ENOMEM; } -/* transport_map_mem_to_sg(): - * - * - */ -int transport_map_mem_to_sg( - struct se_task *task, - struct list_head *se_mem_list, - void *in_mem, - struct se_mem *in_se_mem, - struct se_mem **out_se_mem, - u32 *se_mem_cnt, - u32 *task_offset) +/* Reduce sectors if they are too long for the device */ +static inline sector_t transport_limit_task_sectors( + struct se_device *dev, + unsigned long long lba, + sector_t sectors) { - struct se_cmd *se_cmd = task->task_se_cmd; - struct se_mem *se_mem = in_se_mem; - struct scatterlist *sg = (struct scatterlist *)in_mem; - u32 task_size = task->task_size, sg_no = 0; + sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); - if (!sg) { - printk(KERN_ERR "Unable to locate valid struct" - " scatterlist pointer\n"); - return -1; - } - - while (task_size != 0) { - /* - * Setup the contigious array of scatterlists for - * this struct se_task. - */ - sg_assign_page(sg, se_mem->se_page); - - if (*task_offset == 0) { - sg->offset = se_mem->se_off; - - if (task_size >= se_mem->se_len) { - sg->length = se_mem->se_len; + if (dev->transport->get_device_type(dev) == TYPE_DISK) + if ((lba + sectors) > transport_dev_end_lba(dev)) + sectors = ((transport_dev_end_lba(dev) - lba) + 1); - if (!(list_is_last(&se_mem->se_list, - T_TASK(se_cmd)->t_mem_list))) { - se_mem = list_entry(se_mem->se_list.next, - struct se_mem, se_list); - (*se_mem_cnt)++; - } - } else { - sg->length = task_size; - /* - * Determine if we need to calculate an offset - * into the struct se_mem on the next go around.. - */ - task_size -= sg->length; - if (!(task_size)) - *task_offset = sg->length; - - goto next; - } - - } else { - sg->offset = (*task_offset + se_mem->se_off); - - if ((se_mem->se_len - *task_offset) > task_size) { - sg->length = task_size; - /* - * Determine if we need to calculate an offset - * into the struct se_mem on the next go around.. - */ - task_size -= sg->length; - if (!(task_size)) - *task_offset += sg->length; - - goto next; - } else { - sg->length = (se_mem->se_len - *task_offset); - - if (!(list_is_last(&se_mem->se_list, - T_TASK(se_cmd)->t_mem_list))) { - se_mem = list_entry(se_mem->se_list.next, - struct se_mem, se_list); - (*se_mem_cnt)++; - } - } - - *task_offset = 0; - } - task_size -= sg->length; -next: - DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing" - " task_size to(%u), task_offset: %u\n", task->task_no, sg_no, - sg_page(sg), sg->length, sg->offset, task_size, *task_offset); - - sg_no++; - if (!(task_size)) - break; - - sg = sg_next(sg); - - if (task_size > se_cmd->data_length) - BUG(); - } - *out_se_mem = se_mem; - - DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)" - " SGs\n", task->task_no, *se_mem_cnt, sg_no); - - return 0; + return sectors; } + /* * This function can be used by HW target mode drivers to create a linked * scatterlist from all contiguously allocated struct se_task->task_sg[]. @@ -4751,334 +3983,236 @@ next: */ void transport_do_task_sg_chain(struct se_cmd *cmd) { - struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL; - struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL; - struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL; + struct scatterlist *sg_first = NULL; + struct scatterlist *sg_prev = NULL; + int sg_prev_nents = 0; + struct scatterlist *sg; struct se_task *task; - struct target_core_fabric_ops *tfo = CMD_TFO(cmd); - u32 task_sg_num = 0, sg_count = 0; + u32 chained_nents = 0; int i; - if (tfo->task_sg_chaining == 0) { - printk(KERN_ERR "task_sg_chaining is diabled for fabric module:" - " %s\n", tfo->get_fabric_name()); - dump_stack(); - return; - } + BUG_ON(!cmd->se_tfo->task_sg_chaining); + /* * Walk the struct se_task list and setup scatterlist chains - * for each contiguosly allocated struct se_task->task_sg[]. + * for each contiguously allocated struct se_task->task_sg[]. */ - list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { - if (!(task->task_sg) || !(task->task_padded_sg)) + list_for_each_entry(task, &cmd->t_task_list, t_list) { + if (!task->task_sg) continue; - if (sg_head && sg_link) { - sg_head_cur = &task->task_sg[0]; - sg_link_cur = &task->task_sg[task->task_sg_num]; - /* - * Either add chain or mark end of scatterlist - */ - if (!(list_is_last(&task->t_list, - &T_TASK(cmd)->t_task_list))) { - /* - * Clear existing SGL termination bit set in - * transport_calc_sg_num(), see sg_mark_end() - */ - sg_end_cur = &task->task_sg[task->task_sg_num - 1]; - sg_end_cur->page_link &= ~0x02; - - sg_chain(sg_head, task_sg_num, sg_head_cur); - sg_count += task->task_sg_num; - task_sg_num = (task->task_sg_num + 1); - } else { - sg_chain(sg_head, task_sg_num, sg_head_cur); - sg_count += task->task_sg_num; - task_sg_num = task->task_sg_num; - } + BUG_ON(!task->task_padded_sg); - sg_head = sg_head_cur; - sg_link = sg_link_cur; - continue; - } - sg_head = sg_first = &task->task_sg[0]; - sg_link = &task->task_sg[task->task_sg_num]; - /* - * Check for single task.. - */ - if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) { - /* - * Clear existing SGL termination bit set in - * transport_calc_sg_num(), see sg_mark_end() - */ - sg_end = &task->task_sg[task->task_sg_num - 1]; - sg_end->page_link &= ~0x02; - sg_count += task->task_sg_num; - task_sg_num = (task->task_sg_num + 1); + if (!sg_first) { + sg_first = task->task_sg; + chained_nents = task->task_sg_nents; } else { - sg_count += task->task_sg_num; - task_sg_num = task->task_sg_num; + sg_chain(sg_prev, sg_prev_nents, task->task_sg); + chained_nents += task->task_sg_nents; } + + sg_prev = task->task_sg; + sg_prev_nents = task->task_sg_nents; } /* * Setup the starting pointer and total t_tasks_sg_linked_no including * padding SGs for linking and to mark the end. */ - T_TASK(cmd)->t_tasks_sg_chained = sg_first; - T_TASK(cmd)->t_tasks_sg_chained_no = sg_count; + cmd->t_tasks_sg_chained = sg_first; + cmd->t_tasks_sg_chained_no = chained_nents; - DEBUG_CMD_M("Setup cmd: %p T_TASK(cmd)->t_tasks_sg_chained: %p and" - " t_tasks_sg_chained_no: %u\n", cmd, T_TASK(cmd)->t_tasks_sg_chained, - T_TASK(cmd)->t_tasks_sg_chained_no); + pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" + " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained, + cmd->t_tasks_sg_chained_no); - for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg, - T_TASK(cmd)->t_tasks_sg_chained_no, i) { + for_each_sg(cmd->t_tasks_sg_chained, sg, + cmd->t_tasks_sg_chained_no, i) { - DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n", - i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic); + pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n", + i, sg, sg_page(sg), sg->length, sg->offset); if (sg_is_chain(sg)) - DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg); + pr_debug("SG: %p sg_is_chain=1\n", sg); if (sg_is_last(sg)) - DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg); + pr_debug("SG: %p sg_is_last=1\n", sg); } } EXPORT_SYMBOL(transport_do_task_sg_chain); -static int transport_do_se_mem_map( - struct se_device *dev, - struct se_task *task, - struct list_head *se_mem_list, - void *in_mem, - struct se_mem *in_se_mem, - struct se_mem **out_se_mem, - u32 *se_mem_cnt, - u32 *task_offset_in) -{ - u32 task_offset = *task_offset_in; - int ret = 0; - /* - * se_subsystem_api_t->do_se_mem_map is used when internal allocation - * has been done by the transport plugin. - */ - if (TRANSPORT(dev)->do_se_mem_map) { - ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list, - in_mem, in_se_mem, out_se_mem, se_mem_cnt, - task_offset_in); - if (ret == 0) - T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; - - return ret; - } - - BUG_ON(list_empty(se_mem_list)); - /* - * This is the normal path for all normal non BIDI and BIDI-COMMAND - * WRITE payloads.. If we need to do BIDI READ passthrough for - * TCM/pSCSI the first call to transport_do_se_mem_map -> - * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the - * allocation for task->task_sg_bidi, and the subsequent call to - * transport_do_se_mem_map() from transport_generic_get_cdb_count() - */ - if (!(task->task_sg_bidi)) { - /* - * Assume default that transport plugin speaks preallocated - * scatterlists. - */ - if (!(transport_calc_sg_num(task, in_se_mem, task_offset))) - return -1; - /* - * struct se_task->task_sg now contains the struct scatterlist array. - */ - return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, - in_se_mem, out_se_mem, se_mem_cnt, - task_offset_in); - } - /* - * Handle the se_mem_list -> struct task->task_sg_bidi - * memory map for the extra BIDI READ payload - */ - return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi, - in_se_mem, out_se_mem, se_mem_cnt, - task_offset_in); -} - -static u32 transport_generic_get_cdb_count( +/* + * Break up cmd into chunks transport can handle + */ +static int transport_allocate_data_tasks( struct se_cmd *cmd, unsigned long long lba, - u32 sectors, enum dma_data_direction data_direction, - struct list_head *mem_list, - int set_counts) + struct scatterlist *sgl, + unsigned int sgl_nents) { unsigned char *cdb = NULL; struct se_task *task; - struct se_mem *se_mem = NULL, *se_mem_lout = NULL; - struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL; - struct se_device *dev = SE_DEV(cmd); - int max_sectors_set = 0, ret; - u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0; - - if (!mem_list) { - printk(KERN_ERR "mem_list is NULL in transport_generic_get" - "_cdb_count()\n"); - return 0; - } - /* - * While using RAMDISK_DR backstores is the only case where - * mem_list will ever be empty at this point. - */ - if (!(list_empty(mem_list))) - se_mem = list_entry(mem_list->next, struct se_mem, se_list); - /* - * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to - * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation - */ - if ((T_TASK(cmd)->t_mem_bidi_list != NULL) && - !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) && - (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) - se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next, - struct se_mem, se_list); - - while (sectors) { - DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n", - CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors, - transport_dev_end_lba(dev)); + struct se_device *dev = cmd->se_dev; + unsigned long flags; + sector_t sectors; + int task_count, i, ret; + sector_t dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; + u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; + struct scatterlist *sg; + struct scatterlist *cmd_sg; - task = transport_generic_get_task(cmd, data_direction); - if (!(task)) - goto out; + WARN_ON(cmd->data_length % sector_size); + sectors = DIV_ROUND_UP(cmd->data_length, sector_size); + task_count = DIV_ROUND_UP(sectors, dev_max_sectors); - transport_set_tasks_sectors(task, dev, lba, sectors, - &max_sectors_set); + cmd_sg = sgl; + for (i = 0; i < task_count; i++) { + unsigned int task_size; + int count; + + task = transport_generic_get_task(cmd, data_direction); + if (!task) + return -ENOMEM; task->task_lba = lba; - lba += task->task_sectors; - sectors -= task->task_sectors; - task->task_size = (task->task_sectors * - DEV_ATTRIB(dev)->block_size); - - cdb = TRANSPORT(dev)->get_cdb(task); - if ((cdb)) { - memcpy(cdb, T_TASK(cmd)->t_task_cdb, - scsi_command_size(T_TASK(cmd)->t_task_cdb)); - cmd->transport_split_cdb(task->task_lba, - &task->task_sectors, cdb); - } + task->task_sectors = min(sectors, dev_max_sectors); + task->task_size = task->task_sectors * sector_size; - /* - * Perform the SE OBJ plugin and/or Transport plugin specific - * mapping for T_TASK(cmd)->t_mem_list. And setup the - * task->task_sg and if necessary task->task_sg_bidi - */ - ret = transport_do_se_mem_map(dev, task, mem_list, - NULL, se_mem, &se_mem_lout, &se_mem_cnt, - &task_offset_in); - if (ret < 0) - goto out; + cdb = dev->transport->get_cdb(task); + BUG_ON(!cdb); + + memcpy(cdb, cmd->t_task_cdb, + scsi_command_size(cmd->t_task_cdb)); + + /* Update new cdb with updated lba/sectors */ + cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); - se_mem = se_mem_lout; /* - * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi - * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI - * - * Note that the first call to transport_do_se_mem_map() above will - * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map() - * -> transport_calc_sg_num(), and the second here will do the - * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI. + * Check if the fabric module driver is requesting that all + * struct se_task->task_sg[] be chained together.. If so, + * then allocate an extra padding SG entry for linking and + * marking the end of the chained SGL. + * Possibly over-allocate task sgl size by using cmd sgl size. + * It's so much easier and only a waste when task_count > 1. + * That is extremely rare. */ - if (task->task_sg_bidi != NULL) { - ret = transport_do_se_mem_map(dev, task, - T_TASK(cmd)->t_mem_bidi_list, NULL, - se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt, - &task_offset_in); - if (ret < 0) - goto out; + task->task_sg_nents = sgl_nents; + if (cmd->se_tfo->task_sg_chaining) { + task->task_sg_nents++; + task->task_padded_sg = 1; + } - se_mem_bidi = se_mem_bidi_lout; + task->task_sg = kmalloc(sizeof(struct scatterlist) * + task->task_sg_nents, GFP_KERNEL); + if (!task->task_sg) { + cmd->se_dev->transport->free_task(task); + return -ENOMEM; } - task_cdbs++; - DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n", - task_cdbs, task->task_sg_num); + sg_init_table(task->task_sg, task->task_sg_nents); - if (max_sectors_set) { - max_sectors_set = 0; - continue; + task_size = task->task_size; + + /* Build new sgl, only up to task_size */ + for_each_sg(task->task_sg, sg, task->task_sg_nents, count) { + if (cmd_sg->length > task_size) + break; + + *sg = *cmd_sg; + task_size -= cmd_sg->length; + cmd_sg = sg_next(cmd_sg); } - if (!sectors) - break; - } + lba += task->task_sectors; + sectors -= task->task_sectors; - if (set_counts) { - atomic_inc(&T_TASK(cmd)->t_fe_count); - atomic_inc(&T_TASK(cmd)->t_se_count); + spin_lock_irqsave(&cmd->t_state_lock, flags); + list_add_tail(&task->t_list, &cmd->t_task_list); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } + /* + * Now perform the memory map of task->task_sg[] into backend + * subsystem memory.. + */ + list_for_each_entry(task, &cmd->t_task_list, t_list) { + if (atomic_read(&task->task_sent)) + continue; + if (!dev->transport->map_data_SG) + continue; - DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n", - CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE) - ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs); + ret = dev->transport->map_data_SG(task); + if (ret < 0) + return 0; + } - return task_cdbs; -out: - return 0; + return task_count; } static int -transport_map_control_cmd_to_task(struct se_cmd *cmd) +transport_allocate_control_task(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_dev; unsigned char *cdb; struct se_task *task; - int ret; + unsigned long flags; + int ret = 0; task = transport_generic_get_task(cmd, cmd->data_direction); if (!task) - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + return -ENOMEM; - cdb = TRANSPORT(dev)->get_cdb(task); - if (cdb) - memcpy(cdb, cmd->t_task->t_task_cdb, - scsi_command_size(cmd->t_task->t_task_cdb)); + cdb = dev->transport->get_cdb(task); + BUG_ON(!cdb); + memcpy(cdb, cmd->t_task_cdb, + scsi_command_size(cmd->t_task_cdb)); + task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, + GFP_KERNEL); + if (!task->task_sg) { + cmd->se_dev->transport->free_task(task); + return -ENOMEM; + } + + memcpy(task->task_sg, cmd->t_data_sg, + sizeof(struct scatterlist) * cmd->t_data_nents); task->task_size = cmd->data_length; - task->task_sg_num = - (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0; + task->task_sg_nents = cmd->t_data_nents; - atomic_inc(&cmd->t_task->t_fe_count); - atomic_inc(&cmd->t_task->t_se_count); + spin_lock_irqsave(&cmd->t_state_lock, flags); + list_add_tail(&task->t_list, &cmd->t_task_list); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { - struct se_mem *se_mem = NULL, *se_mem_lout = NULL; - u32 se_mem_cnt = 0, task_offset = 0; - - if (!list_empty(T_TASK(cmd)->t_mem_list)) - se_mem = list_entry(T_TASK(cmd)->t_mem_list->next, - struct se_mem, se_list); - - ret = transport_do_se_mem_map(dev, task, - cmd->t_task->t_mem_list, NULL, se_mem, - &se_mem_lout, &se_mem_cnt, &task_offset); - if (ret < 0) - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; - - if (dev->transport->map_task_SG) - return dev->transport->map_task_SG(task); - return 0; - } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { - if (dev->transport->map_task_non_SG) - return dev->transport->map_task_non_SG(task); - return 0; + if (dev->transport->map_control_SG) + ret = dev->transport->map_control_SG(task); } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { if (dev->transport->cdb_none) - return dev->transport->cdb_none(task); - return 0; + ret = dev->transport->cdb_none(task); } else { + pr_err("target: Unknown control cmd type!\n"); BUG(); - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; } + + /* Success! Return number of tasks allocated */ + if (ret == 0) + return 1; + return ret; +} + +static u32 transport_allocate_tasks( + struct se_cmd *cmd, + unsigned long long lba, + enum dma_data_direction data_direction, + struct scatterlist *sgl, + unsigned int sgl_nents) +{ + if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) + return transport_allocate_data_tasks(cmd, lba, data_direction, + sgl, sgl_nents); + else + return transport_allocate_control_task(cmd); + } + /* transport_generic_new_cmd(): Called from transport_processing_thread() * * Allocate storage transport resources from a set of values predefined @@ -5088,64 +4222,33 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd) /* * Generate struct se_task(s) and/or their payloads for this CDB. */ -static int transport_generic_new_cmd(struct se_cmd *cmd) +int transport_generic_new_cmd(struct se_cmd *cmd) { - struct se_portal_group *se_tpg; - struct se_task *task; - struct se_device *dev = SE_DEV(cmd); int ret = 0; /* * Determine is the TCM fabric module has already allocated physical * memory, and is directly calling transport_generic_map_mem_to_cmd() - * to setup beforehand the linked list of physical memory at - * T_TASK(cmd)->t_mem_list of struct se_mem->se_page + * beforehand. */ - if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { - ret = transport_allocate_resources(cmd); + if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && + cmd->data_length) { + ret = transport_generic_get_mem(cmd); if (ret < 0) return ret; } - - ret = transport_get_sectors(cmd); - if (ret < 0) - return ret; - + /* + * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for + * control or data CDB types, and perform the map to backend subsystem + * code from SGL memory allocated here by transport_generic_get_mem(), or + * via pre-existing SGL memory setup explictly by fabric module code with + * transport_generic_map_mem_to_cmd(). + */ ret = transport_new_cmd_obj(cmd); if (ret < 0) return ret; - /* - * Determine if the calling TCM fabric module is talking to - * Linux/NET via kernel sockets and needs to allocate a - * struct iovec array to complete the struct se_cmd - */ - se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg; - if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) { - ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd); - if (ret < 0) - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; - } - - if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { - list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { - if (atomic_read(&task->task_sent)) - continue; - if (!dev->transport->map_task_SG) - continue; - - ret = dev->transport->map_task_SG(task); - if (ret < 0) - return ret; - } - } else { - ret = transport_map_control_cmd_to_task(cmd); - if (ret < 0) - return ret; - } - - /* - * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready.. + * For WRITEs, let the fabric know its buffer is ready.. * This WRITE struct se_cmd (and all of its associated struct se_task's) * will be added to the struct se_device execution queue after its WRITE * data has arrived. (ie: It gets handled by the transport processing @@ -5162,6 +4265,7 @@ static int transport_generic_new_cmd(struct se_cmd *cmd) transport_execute_tasks(cmd); return 0; } +EXPORT_SYMBOL(transport_generic_new_cmd); /* transport_generic_process_write(): * @@ -5169,68 +4273,15 @@ static int transport_generic_new_cmd(struct se_cmd *cmd) */ void transport_generic_process_write(struct se_cmd *cmd) { -#if 0 - /* - * Copy SCSI Presented DTL sector(s) from received buffers allocated to - * original EDTL - */ - if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { - if (!T_TASK(cmd)->t_tasks_se_num) { - unsigned char *dst, *buf = - (unsigned char *)T_TASK(cmd)->t_task_buf; - - dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL); - if (!(dst)) { - printk(KERN_ERR "Unable to allocate memory for" - " WRITE underflow\n"); - transport_generic_request_failure(cmd, NULL, - PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); - return; - } - memcpy(dst, buf, cmd->cmd_spdtl); - - kfree(T_TASK(cmd)->t_task_buf); - T_TASK(cmd)->t_task_buf = dst; - } else { - struct scatterlist *sg = - (struct scatterlist *sg)T_TASK(cmd)->t_task_buf; - struct scatterlist *orig_sg; - - orig_sg = kzalloc(sizeof(struct scatterlist) * - T_TASK(cmd)->t_tasks_se_num, - GFP_KERNEL))) { - if (!(orig_sg)) { - printk(KERN_ERR "Unable to allocate memory" - " for WRITE underflow\n"); - transport_generic_request_failure(cmd, NULL, - PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); - return; - } - - memcpy(orig_sg, T_TASK(cmd)->t_task_buf, - sizeof(struct scatterlist) * - T_TASK(cmd)->t_tasks_se_num); - - cmd->data_length = cmd->cmd_spdtl; - /* - * FIXME, clear out original struct se_task and state - * information. - */ - if (transport_generic_new_cmd(cmd) < 0) { - transport_generic_request_failure(cmd, NULL, - PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); - kfree(orig_sg); - return; - } - - transport_memcpy_write_sg(cmd, orig_sg); - } - } -#endif transport_execute_tasks(cmd); } EXPORT_SYMBOL(transport_generic_process_write); +static int transport_write_pending_qf(struct se_cmd *cmd) +{ + return cmd->se_tfo->write_pending(cmd); +} + /* transport_generic_write_pending(): * * @@ -5240,24 +4291,26 @@ static int transport_generic_write_pending(struct se_cmd *cmd) unsigned long flags; int ret; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); cmd->t_state = TRANSPORT_WRITE_PENDING; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); - /* - * For the TCM control CDBs using a contiguous buffer, do the memcpy - * from the passed Linux/SCSI struct scatterlist located at - * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at - * T_TASK(se_cmd)->t_task_buf. - */ - if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) - transport_memcpy_read_contig(cmd, - T_TASK(cmd)->t_task_buf, - T_TASK(cmd)->t_task_pt_sgl); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + if (cmd->transport_qf_callback) { + ret = cmd->transport_qf_callback(cmd); + if (ret == -EAGAIN) + goto queue_full; + else if (ret < 0) + return ret; + + cmd->transport_qf_callback = NULL; + return 0; + } + /* * Clear the se_cmd for WRITE_PENDING status in order to set - * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data + * cmd->t_transport_active=0 so that transport_generic_handle_data * can be called from HW target mode interrupt code. This is safe - * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending + * to be called with transport_off=1 before the cmd->se_tfo->write_pending * because the se_cmd->se_lun pointer is not being cleared. */ transport_cmd_check_stop(cmd, 1, 0); @@ -5266,26 +4319,30 @@ static int transport_generic_write_pending(struct se_cmd *cmd) * Call the fabric write_pending function here to let the * frontend know that WRITE buffers are ready. */ - ret = CMD_TFO(cmd)->write_pending(cmd); - if (ret < 0) + ret = cmd->se_tfo->write_pending(cmd); + if (ret == -EAGAIN) + goto queue_full; + else if (ret < 0) return ret; return PYX_TRANSPORT_WRITE_PENDING; + +queue_full: + pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); + cmd->t_state = TRANSPORT_COMPLETE_QF_WP; + transport_handle_queue_full(cmd, cmd->se_dev, + transport_write_pending_qf); + return ret; } -/* transport_release_cmd_to_pool(): - * - * - */ -void transport_release_cmd_to_pool(struct se_cmd *cmd) +void transport_release_cmd(struct se_cmd *cmd) { - BUG_ON(!T_TASK(cmd)); - BUG_ON(!CMD_TFO(cmd)); + BUG_ON(!cmd->se_tfo); transport_free_se_cmd(cmd); - CMD_TFO(cmd)->release_cmd_to_pool(cmd); + cmd->se_tfo->release_cmd(cmd); } -EXPORT_SYMBOL(transport_release_cmd_to_pool); +EXPORT_SYMBOL(transport_release_cmd); /* transport_generic_free_cmd(): * @@ -5294,19 +4351,18 @@ EXPORT_SYMBOL(transport_release_cmd_to_pool); void transport_generic_free_cmd( struct se_cmd *cmd, int wait_for_tasks, - int release_to_pool, int session_reinstatement) { - if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd)) - transport_release_cmd_to_pool(cmd); + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) + transport_release_cmd(cmd); else { core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); - if (SE_LUN(cmd)) { + if (cmd->se_lun) { #if 0 - printk(KERN_INFO "cmd: %p ITT: 0x%08x contains" - " SE_LUN(cmd)\n", cmd, - CMD_TFO(cmd)->get_task_tag(cmd)); + pr_debug("cmd: %p ITT: 0x%08x contains" + " cmd->se_lun\n", cmd, + cmd->se_tfo->get_task_tag(cmd)); #endif transport_lun_remove_cmd(cmd); } @@ -5316,8 +4372,7 @@ void transport_generic_free_cmd( transport_free_dev_tasks(cmd); - transport_generic_remove(cmd, release_to_pool, - session_reinstatement); + transport_generic_remove(cmd, session_reinstatement); } } EXPORT_SYMBOL(transport_generic_free_cmd); @@ -5343,43 +4398,36 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) * If the frontend has already requested this struct se_cmd to * be stopped, we can safely ignore this struct se_cmd. */ - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (atomic_read(&T_TASK(cmd)->t_transport_stop)) { - atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); - DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop ==" - " TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd)); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (atomic_read(&cmd->t_transport_stop)) { + atomic_set(&cmd->transport_lun_stop, 0); + pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop ==" + " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_cmd_check_stop(cmd, 1, 0); - return -1; + return -EPERM; } - atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + atomic_set(&cmd->transport_lun_fe_stop, 1); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); - wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq); + wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); ret = transport_stop_tasks_for_cmd(cmd); - DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:" - " %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret); + pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:" + " %d\n", cmd, cmd->t_task_list_num, ret); if (!ret) { - DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n", - CMD_TFO(cmd)->get_task_tag(cmd)); - wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp); - DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n", - CMD_TFO(cmd)->get_task_tag(cmd)); + pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", + cmd->se_tfo->get_task_tag(cmd)); + wait_for_completion(&cmd->transport_lun_stop_comp); + pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", + cmd->se_tfo->get_task_tag(cmd)); } - transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); + transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); return 0; } -/* #define DEBUG_CLEAR_LUN */ -#ifdef DEBUG_CLEAR_LUN -#define DEBUG_CLEAR_L(x...) printk(KERN_INFO x) -#else -#define DEBUG_CLEAR_L(x...) -#endif - static void __transport_clear_lun_from_sessions(struct se_lun *lun) { struct se_cmd *cmd = NULL; @@ -5389,66 +4437,59 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) * Initiator Port. */ spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); - while (!list_empty_careful(&lun->lun_cmd_list)) { - cmd = list_entry(lun->lun_cmd_list.next, - struct se_cmd, se_lun_list); - list_del(&cmd->se_lun_list); - - if (!(T_TASK(cmd))) { - printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL" - "[i,t]_state: %u/%u\n", - CMD_TFO(cmd)->get_task_tag(cmd), - CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state); - BUG(); - } - atomic_set(&T_TASK(cmd)->transport_lun_active, 0); + while (!list_empty(&lun->lun_cmd_list)) { + cmd = list_first_entry(&lun->lun_cmd_list, + struct se_cmd, se_lun_node); + list_del(&cmd->se_lun_node); + + atomic_set(&cmd->transport_lun_active, 0); /* * This will notify iscsi_target_transport.c: * transport_cmd_check_stop() that a LUN shutdown is in * progress for the iscsi_cmd_t. */ - spin_lock(&T_TASK(cmd)->t_state_lock); - DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport" + spin_lock(&cmd->t_state_lock); + pr_debug("SE_LUN[%d] - Setting cmd->transport" "_lun_stop for ITT: 0x%08x\n", - SE_LUN(cmd)->unpacked_lun, - CMD_TFO(cmd)->get_task_tag(cmd)); - atomic_set(&T_TASK(cmd)->transport_lun_stop, 1); - spin_unlock(&T_TASK(cmd)->t_state_lock); + cmd->se_lun->unpacked_lun, + cmd->se_tfo->get_task_tag(cmd)); + atomic_set(&cmd->transport_lun_stop, 1); + spin_unlock(&cmd->t_state_lock); spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); - if (!(SE_LUN(cmd))) { - printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n", - CMD_TFO(cmd)->get_task_tag(cmd), - CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state); + if (!cmd->se_lun) { + pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", + cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); BUG(); } /* * If the Storage engine still owns the iscsi_cmd_t, determine * and/or stop its context. */ - DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport" - "_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun, - CMD_TFO(cmd)->get_task_tag(cmd)); + pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" + "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, + cmd->se_tfo->get_task_tag(cmd)); - if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) { + if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); continue; } - DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun" + pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" "_wait_for_tasks(): SUCCESS\n", - SE_LUN(cmd)->unpacked_lun, - CMD_TFO(cmd)->get_task_tag(cmd)); + cmd->se_lun->unpacked_lun, + cmd->se_tfo->get_task_tag(cmd)); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags); - if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); + spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); + if (!atomic_read(&cmd->transport_dev_active)) { + spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); goto check_cond; } - atomic_set(&T_TASK(cmd)->transport_dev_active, 0); + atomic_set(&cmd->transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); + spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); transport_free_dev_tasks(cmd); /* @@ -5465,24 +4506,24 @@ check_cond: * be released, notify the waiting thread now that LU has * finished accessing it. */ - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags); - if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) { - DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for" + spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); + if (atomic_read(&cmd->transport_lun_fe_stop)) { + pr_debug("SE_LUN[%d] - Detected FE stop for" " struct se_cmd: %p ITT: 0x%08x\n", lun->unpacked_lun, - cmd, CMD_TFO(cmd)->get_task_tag(cmd)); + cmd, cmd->se_tfo->get_task_tag(cmd)); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); transport_cmd_check_stop(cmd, 1, 0); - complete(&T_TASK(cmd)->transport_lun_fe_stop_comp); + complete(&cmd->transport_lun_fe_stop_comp); spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); continue; } - DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n", - lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd)); + pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", + lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); + spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); } spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); @@ -5502,11 +4543,11 @@ int transport_clear_lun_from_sessions(struct se_lun *lun) { struct task_struct *kt; - kt = kthread_run(transport_clear_lun_thread, (void *)lun, + kt = kthread_run(transport_clear_lun_thread, lun, "tcm_cl_%u", lun->unpacked_lun); if (IS_ERR(kt)) { - printk(KERN_ERR "Unable to start clear_lun thread\n"); - return -1; + pr_err("Unable to start clear_lun thread\n"); + return PTR_ERR(kt); } wait_for_completion(&lun->lun_shutdown_comp); @@ -5528,20 +4569,20 @@ static void transport_generic_wait_for_tasks( if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) return; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); /* * If we are already stopped due to an external event (ie: LUN shutdown) * sleep until the connection can have the passed struct se_cmd back. - * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by + * The cmd->transport_lun_stopped_sem will be upped by * transport_clear_lun_from_sessions() once the ConfigFS context caller * has completed its operation on the struct se_cmd. */ - if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) { + if (atomic_read(&cmd->transport_lun_stop)) { - DEBUG_TRANSPORT_S("wait_for_tasks: Stopping" - " wait_for_completion(&T_TASK(cmd)transport_lun_fe" + pr_debug("wait_for_tasks: Stopping" + " wait_for_completion(&cmd->t_tasktransport_lun_fe" "_stop_comp); for ITT: 0x%08x\n", - CMD_TFO(cmd)->get_task_tag(cmd)); + cmd->se_tfo->get_task_tag(cmd)); /* * There is a special case for WRITES where a FE exception + * LUN shutdown means ConfigFS context is still sleeping on @@ -5549,10 +4590,10 @@ static void transport_generic_wait_for_tasks( * We go ahead and up transport_lun_stop_comp just to be sure * here. */ - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); - complete(&T_TASK(cmd)->transport_lun_stop_comp); - wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + complete(&cmd->transport_lun_stop_comp); + wait_for_completion(&cmd->transport_lun_fe_stop_comp); + spin_lock_irqsave(&cmd->t_state_lock, flags); transport_all_task_dev_remove_state(cmd); /* @@ -5560,44 +4601,44 @@ static void transport_generic_wait_for_tasks( * struct se_cmd, now owns the structure and can be released through * normal means below. */ - DEBUG_TRANSPORT_S("wait_for_tasks: Stopped" - " wait_for_completion(&T_TASK(cmd)transport_lun_fe_" + pr_debug("wait_for_tasks: Stopped" + " wait_for_completion(&cmd->t_tasktransport_lun_fe_" "stop_comp); for ITT: 0x%08x\n", - CMD_TFO(cmd)->get_task_tag(cmd)); + cmd->se_tfo->get_task_tag(cmd)); - atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); + atomic_set(&cmd->transport_lun_stop, 0); } - if (!atomic_read(&T_TASK(cmd)->t_transport_active) || - atomic_read(&T_TASK(cmd)->t_transport_aborted)) + if (!atomic_read(&cmd->t_transport_active) || + atomic_read(&cmd->t_transport_aborted)) goto remove; - atomic_set(&T_TASK(cmd)->t_transport_stop, 1); + atomic_set(&cmd->t_transport_stop, 1); - DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x" + pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" - " = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd), - CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state, + " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, cmd->deferred_t_state); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); - wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq); + wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); - wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp); + wait_for_completion(&cmd->t_transport_stop_comp); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - atomic_set(&T_TASK(cmd)->t_transport_active, 0); - atomic_set(&T_TASK(cmd)->t_transport_stop, 0); + spin_lock_irqsave(&cmd->t_state_lock, flags); + atomic_set(&cmd->t_transport_active, 0); + atomic_set(&cmd->t_transport_stop, 0); - DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion(" - "&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n", - CMD_TFO(cmd)->get_task_tag(cmd)); + pr_debug("wait_for_tasks: Stopped wait_for_compltion(" + "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", + cmd->se_tfo->get_task_tag(cmd)); remove: - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); if (!remove_cmd) return; - transport_generic_free_cmd(cmd, 0, 0, session_reinstatement); + transport_generic_free_cmd(cmd, 0, session_reinstatement); } static int transport_get_sense_codes( @@ -5632,13 +4673,13 @@ int transport_send_check_condition_and_sense( int offset; u8 asc = 0, ascq = 0; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 0; } cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); if (!reason && from_transport) goto after_reason; @@ -5651,7 +4692,7 @@ int transport_send_check_condition_and_sense( * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE * from include/scsi/scsi_cmnd.h */ - offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd, + offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER); /* * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses @@ -5788,8 +4829,7 @@ int transport_send_check_condition_and_sense( cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; after_reason: - CMD_TFO(cmd)->queue_status(cmd); - return 0; + return cmd->se_tfo->queue_status(cmd); } EXPORT_SYMBOL(transport_send_check_condition_and_sense); @@ -5797,18 +4837,18 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status) { int ret = 0; - if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) { - if (!(send_status) || + if (atomic_read(&cmd->t_transport_aborted) != 0) { + if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) return 1; #if 0 - printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED" + pr_debug("Sending delayed SAM_STAT_TASK_ABORTED" " status for CDB: 0x%02x ITT: 0x%08x\n", - T_TASK(cmd)->t_task_cdb[0], - CMD_TFO(cmd)->get_task_tag(cmd)); + cmd->t_task_cdb[0], + cmd->se_tfo->get_task_tag(cmd)); #endif cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; - CMD_TFO(cmd)->queue_status(cmd); + cmd->se_tfo->queue_status(cmd); ret = 1; } return ret; @@ -5824,8 +4864,8 @@ void transport_send_task_abort(struct se_cmd *cmd) * queued back to fabric module by transport_check_aborted_status(). */ if (cmd->data_direction == DMA_TO_DEVICE) { - if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) { - atomic_inc(&T_TASK(cmd)->t_transport_aborted); + if (cmd->se_tfo->write_pending_status(cmd) != 0) { + atomic_inc(&cmd->t_transport_aborted); smp_mb__after_atomic_inc(); cmd->scsi_status = SAM_STAT_TASK_ABORTED; transport_new_cmd_failure(cmd); @@ -5834,11 +4874,11 @@ void transport_send_task_abort(struct se_cmd *cmd) } cmd->scsi_status = SAM_STAT_TASK_ABORTED; #if 0 - printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," - " ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0], - CMD_TFO(cmd)->get_task_tag(cmd)); + pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," + " ITT: 0x%08x\n", cmd->t_task_cdb[0], + cmd->se_tfo->get_task_tag(cmd)); #endif - CMD_TFO(cmd)->queue_status(cmd); + cmd->se_tfo->queue_status(cmd); } /* transport_generic_do_tmr(): @@ -5847,14 +4887,12 @@ void transport_send_task_abort(struct se_cmd *cmd) */ int transport_generic_do_tmr(struct se_cmd *cmd) { - struct se_cmd *ref_cmd; - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_dev; struct se_tmr_req *tmr = cmd->se_tmr_req; int ret; switch (tmr->function) { case TMR_ABORT_TASK: - ref_cmd = tmr->ref_cmd; tmr->response = TMR_FUNCTION_REJECTED; break; case TMR_ABORT_TASK_SET: @@ -5874,14 +4912,14 @@ int transport_generic_do_tmr(struct se_cmd *cmd) tmr->response = TMR_FUNCTION_REJECTED; break; default: - printk(KERN_ERR "Uknown TMR function: 0x%02x.\n", + pr_err("Uknown TMR function: 0x%02x.\n", tmr->function); tmr->response = TMR_FUNCTION_REJECTED; break; } cmd->t_state = TRANSPORT_ISTATE_PROCESSING; - CMD_TFO(cmd)->queue_tm_rsp(cmd); + cmd->se_tfo->queue_tm_rsp(cmd); transport_cmd_check_stop(cmd, 2, 0); return 0; @@ -5911,62 +4949,54 @@ transport_get_task_from_state_list(struct se_device *dev) static void transport_processing_shutdown(struct se_device *dev) { struct se_cmd *cmd; - struct se_queue_req *qr; struct se_task *task; - u8 state; unsigned long flags; /* * Empty the struct se_device's struct se_task state list. */ spin_lock_irqsave(&dev->execute_task_lock, flags); while ((task = transport_get_task_from_state_list(dev))) { - if (!(TASK_CMD(task))) { - printk(KERN_ERR "TASK_CMD(task) is NULL!\n"); + if (!task->task_se_cmd) { + pr_err("task->task_se_cmd is NULL!\n"); continue; } - cmd = TASK_CMD(task); + cmd = task->task_se_cmd; - if (!T_TASK(cmd)) { - printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:" - " %p ITT: 0x%08x\n", task, cmd, - CMD_TFO(cmd)->get_task_tag(cmd)); - continue; - } spin_unlock_irqrestore(&dev->execute_task_lock, flags); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); - DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x," - " i_state/def_i_state: %d/%d, t_state/def_t_state:" + pr_debug("PT: cmd: %p task: %p ITT: 0x%08x," + " i_state: %d, t_state/def_t_state:" " %d/%d cdb: 0x%02x\n", cmd, task, - CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn, - CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state, + cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, cmd->deferred_t_state, - T_TASK(cmd)->t_task_cdb[0]); - DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:" + cmd->t_task_cdb[0]); + pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:" " %d t_task_cdbs_sent: %d -- t_transport_active: %d" " t_transport_stop: %d t_transport_sent: %d\n", - CMD_TFO(cmd)->get_task_tag(cmd), - T_TASK(cmd)->t_task_cdbs, - atomic_read(&T_TASK(cmd)->t_task_cdbs_left), - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), - atomic_read(&T_TASK(cmd)->t_transport_active), - atomic_read(&T_TASK(cmd)->t_transport_stop), - atomic_read(&T_TASK(cmd)->t_transport_sent)); + cmd->se_tfo->get_task_tag(cmd), + cmd->t_task_list_num, + atomic_read(&cmd->t_task_cdbs_left), + atomic_read(&cmd->t_task_cdbs_sent), + atomic_read(&cmd->t_transport_active), + atomic_read(&cmd->t_transport_stop), + atomic_read(&cmd->t_transport_sent)); if (atomic_read(&task->task_active)) { atomic_set(&task->task_stop, 1); spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_state_lock, flags); - DEBUG_DO("Waiting for task: %p to shutdown for dev:" + pr_debug("Waiting for task: %p to shutdown for dev:" " %p\n", task, dev); wait_for_completion(&task->task_stop_comp); - DEBUG_DO("Completed task: %p shutdown for dev: %p\n", + pr_debug("Completed task: %p shutdown for dev: %p\n", task, dev); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); + spin_lock_irqsave(&cmd->t_state_lock, flags); + atomic_dec(&cmd->t_task_cdbs_left); atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); @@ -5976,72 +5006,72 @@ static void transport_processing_shutdown(struct se_device *dev) } __transport_stop_task_timer(task, &flags); - if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) { + if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_state_lock, flags); - DEBUG_DO("Skipping task: %p, dev: %p for" + pr_debug("Skipping task: %p, dev: %p for" " t_task_cdbs_ex_left: %d\n", task, dev, - atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left)); + atomic_read(&cmd->t_task_cdbs_ex_left)); spin_lock_irqsave(&dev->execute_task_lock, flags); continue; } - if (atomic_read(&T_TASK(cmd)->t_transport_active)) { - DEBUG_DO("got t_transport_active = 1 for task: %p, dev:" + if (atomic_read(&cmd->t_transport_active)) { + pr_debug("got t_transport_active = 1 for task: %p, dev:" " %p\n", task, dev); - if (atomic_read(&T_TASK(cmd)->t_fe_count)) { + if (atomic_read(&cmd->t_fe_count)) { spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_state_lock, flags); transport_send_check_condition_and_sense( cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); transport_remove_cmd_from_queue(cmd, - SE_DEV(cmd)->dev_queue_obj); + &cmd->se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); transport_cmd_check_stop(cmd, 1, 0); } else { spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_state_lock, flags); transport_remove_cmd_from_queue(cmd, - SE_DEV(cmd)->dev_queue_obj); + &cmd->se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop(cmd, 1, 0)) - transport_generic_remove(cmd, 0, 0); + transport_generic_remove(cmd, 0); } spin_lock_irqsave(&dev->execute_task_lock, flags); continue; } - DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n", + pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n", task, dev); - if (atomic_read(&T_TASK(cmd)->t_fe_count)) { + if (atomic_read(&cmd->t_fe_count)) { spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_state_lock, flags); transport_send_check_condition_and_sense(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); transport_remove_cmd_from_queue(cmd, - SE_DEV(cmd)->dev_queue_obj); + &cmd->se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); transport_cmd_check_stop(cmd, 1, 0); } else { spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_state_lock, flags); transport_remove_cmd_from_queue(cmd, - SE_DEV(cmd)->dev_queue_obj); + &cmd->se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop(cmd, 1, 0)) - transport_generic_remove(cmd, 0, 0); + transport_generic_remove(cmd, 0); } spin_lock_irqsave(&dev->execute_task_lock, flags); @@ -6050,18 +5080,12 @@ static void transport_processing_shutdown(struct se_device *dev) /* * Empty the struct se_device's struct se_cmd list. */ - spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); - while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) { - spin_unlock_irqrestore( - &dev->dev_queue_obj->cmd_queue_lock, flags); - cmd = (struct se_cmd *)qr->cmd; - state = qr->state; - kfree(qr); - - DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n", - cmd, state); - - if (atomic_read(&T_TASK(cmd)->t_fe_count)) { + while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) { + + pr_debug("From Device Queue: cmd: %p t_state: %d\n", + cmd, cmd->t_state); + + if (atomic_read(&cmd->t_fe_count)) { transport_send_check_condition_and_sense(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); @@ -6070,11 +5094,9 @@ static void transport_processing_shutdown(struct se_device *dev) } else { transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop(cmd, 1, 0)) - transport_generic_remove(cmd, 0, 0); + transport_generic_remove(cmd, 0); } - spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); } - spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags); } /* transport_processing_thread(): @@ -6083,16 +5105,15 @@ static void transport_processing_shutdown(struct se_device *dev) */ static int transport_processing_thread(void *param) { - int ret, t_state; + int ret; struct se_cmd *cmd; struct se_device *dev = (struct se_device *) param; - struct se_queue_req *qr; set_user_nice(current, -20); while (!kthread_should_stop()) { - ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq, - atomic_read(&dev->dev_queue_obj->queue_cnt) || + ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, + atomic_read(&dev->dev_queue_obj.queue_cnt) || kthread_should_stop()); if (ret < 0) goto out; @@ -6108,22 +5129,18 @@ static int transport_processing_thread(void *param) get_cmd: __transport_execute_tasks(dev); - qr = transport_get_qr_from_queue(dev->dev_queue_obj); - if (!(qr)) + cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj); + if (!cmd) continue; - cmd = (struct se_cmd *)qr->cmd; - t_state = qr->state; - kfree(qr); - - switch (t_state) { + switch (cmd->t_state) { case TRANSPORT_NEW_CMD_MAP: - if (!(CMD_TFO(cmd)->new_cmd_map)) { - printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is" + if (!cmd->se_tfo->new_cmd_map) { + pr_err("cmd->se_tfo->new_cmd_map is" " NULL for TRANSPORT_NEW_CMD_MAP\n"); BUG(); } - ret = CMD_TFO(cmd)->new_cmd_map(cmd); + ret = cmd->se_tfo->new_cmd_map(cmd); if (ret < 0) { cmd->transport_error_status = ret; transport_generic_request_failure(cmd, NULL, @@ -6134,7 +5151,9 @@ get_cmd: /* Fall through */ case TRANSPORT_NEW_CMD: ret = transport_generic_new_cmd(cmd); - if (ret < 0) { + if (ret == -EAGAIN) + break; + else if (ret < 0) { cmd->transport_error_status = ret; transport_generic_request_failure(cmd, NULL, 0, (cmd->data_direction != @@ -6149,10 +5168,10 @@ get_cmd: transport_generic_complete_ok(cmd); break; case TRANSPORT_REMOVE: - transport_generic_remove(cmd, 1, 0); + transport_generic_remove(cmd, 0); break; case TRANSPORT_FREE_CMD_INTR: - transport_generic_free_cmd(cmd, 0, 1, 0); + transport_generic_free_cmd(cmd, 0, 0); break; case TRANSPORT_PROCESS_TMR: transport_generic_do_tmr(cmd); @@ -6164,13 +5183,16 @@ get_cmd: transport_stop_all_task_timers(cmd); transport_generic_request_timeout(cmd); break; + case TRANSPORT_COMPLETE_QF_WP: + transport_generic_write_pending(cmd); + break; default: - printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" + pr_err("Unknown t_state: %d deferred_t_state:" " %d for ITT: 0x%08x i_state: %d on SE LUN:" - " %u\n", t_state, cmd->deferred_t_state, - CMD_TFO(cmd)->get_task_tag(cmd), - CMD_TFO(cmd)->get_cmd_state(cmd), - SE_LUN(cmd)->unpacked_lun); + " %u\n", cmd->t_state, cmd->deferred_t_state, + cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), + cmd->se_lun->unpacked_lun); BUG(); } |