diff options
Diffstat (limited to 'drivers/target/target_core_transport.c')
-rw-r--r-- | drivers/target/target_core_transport.c | 274 |
1 files changed, 174 insertions, 100 deletions
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 7dfefd66df93..434d9d693989 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -457,8 +457,20 @@ static void target_complete_nacl(struct kref *kref) { struct se_node_acl *nacl = container_of(kref, struct se_node_acl, acl_kref); + struct se_portal_group *se_tpg = nacl->se_tpg; - complete(&nacl->acl_free_comp); + if (!nacl->dynamic_stop) { + complete(&nacl->acl_free_comp); + return; + } + + mutex_lock(&se_tpg->acl_node_mutex); + list_del(&nacl->acl_list); + mutex_unlock(&se_tpg->acl_node_mutex); + + core_tpg_wait_for_nacl_pr_ref(nacl); + core_free_device_list_for_node(nacl, se_tpg); + kfree(nacl); } void target_put_nacl(struct se_node_acl *nacl) @@ -499,12 +511,39 @@ EXPORT_SYMBOL(transport_deregister_session_configfs); void transport_free_session(struct se_session *se_sess) { struct se_node_acl *se_nacl = se_sess->se_node_acl; + /* * Drop the se_node_acl->nacl_kref obtained from within * core_tpg_get_initiator_node_acl(). */ if (se_nacl) { + struct se_portal_group *se_tpg = se_nacl->se_tpg; + const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo; + unsigned long flags; + se_sess->se_node_acl = NULL; + + /* + * Also determine if we need to drop the extra ->cmd_kref if + * it had been previously dynamically generated, and + * the endpoint is not caching dynamic ACLs. + */ + mutex_lock(&se_tpg->acl_node_mutex); + if (se_nacl->dynamic_node_acl && + !se_tfo->tpg_check_demo_mode_cache(se_tpg)) { + spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); + if (list_empty(&se_nacl->acl_sess_list)) + se_nacl->dynamic_stop = true; + spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); + + if (se_nacl->dynamic_stop) + list_del(&se_nacl->acl_list); + } + mutex_unlock(&se_tpg->acl_node_mutex); + + if (se_nacl->dynamic_stop) + target_put_nacl(se_nacl); + target_put_nacl(se_nacl); } if (se_sess->sess_cmd_map) { @@ -518,16 +557,12 @@ EXPORT_SYMBOL(transport_free_session); void transport_deregister_session(struct se_session *se_sess) { struct se_portal_group *se_tpg = se_sess->se_tpg; - const struct target_core_fabric_ops *se_tfo; - struct se_node_acl *se_nacl; unsigned long flags; - bool drop_nacl = false; if (!se_tpg) { transport_free_session(se_sess); return; } - se_tfo = se_tpg->se_tpg_tfo; spin_lock_irqsave(&se_tpg->session_lock, flags); list_del(&se_sess->sess_list); @@ -535,33 +570,15 @@ void transport_deregister_session(struct se_session *se_sess) se_sess->fabric_sess_ptr = NULL; spin_unlock_irqrestore(&se_tpg->session_lock, flags); - /* - * Determine if we need to do extra work for this initiator node's - * struct se_node_acl if it had been previously dynamically generated. - */ - se_nacl = se_sess->se_node_acl; - - mutex_lock(&se_tpg->acl_node_mutex); - if (se_nacl && se_nacl->dynamic_node_acl) { - if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { - list_del(&se_nacl->acl_list); - drop_nacl = true; - } - } - mutex_unlock(&se_tpg->acl_node_mutex); - - if (drop_nacl) { - core_tpg_wait_for_nacl_pr_ref(se_nacl); - core_free_device_list_for_node(se_nacl, se_tpg); - se_sess->se_node_acl = NULL; - kfree(se_nacl); - } pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", se_tpg->se_tpg_tfo->get_fabric_name()); /* * If last kref is dropping now for an explicit NodeACL, awake sleeping * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group * removal context from within transport_free_session() code. + * + * For dynamic ACL, target_put_nacl() uses target_complete_nacl() + * to release all remaining generate_node_acl=1 created ACL resources. */ transport_free_session(se_sess); @@ -576,9 +593,6 @@ static void target_remove_from_state_list(struct se_cmd *cmd) if (!dev) return; - if (cmd->transport_state & CMD_T_BUSY) - return; - spin_lock_irqsave(&dev->execute_task_lock, flags); if (cmd->state_active) { list_del(&cmd->state_list); @@ -587,24 +601,18 @@ static void target_remove_from_state_list(struct se_cmd *cmd) spin_unlock_irqrestore(&dev->execute_task_lock, flags); } -static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, - bool write_pending) +static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) { unsigned long flags; - if (remove_from_lists) { - target_remove_from_state_list(cmd); + target_remove_from_state_list(cmd); - /* - * Clear struct se_cmd->se_lun before the handoff to FE. - */ - cmd->se_lun = NULL; - } + /* + * Clear struct se_cmd->se_lun before the handoff to FE. + */ + cmd->se_lun = NULL; spin_lock_irqsave(&cmd->t_state_lock, flags); - if (write_pending) - cmd->t_state = TRANSPORT_WRITE_PENDING; - /* * Determine if frontend context caller is requesting the stopping of * this command for frontend exceptions. @@ -618,31 +626,18 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, complete_all(&cmd->t_transport_stop_comp); return 1; } - cmd->transport_state &= ~CMD_T_ACTIVE; - if (remove_from_lists) { - /* - * Some fabric modules like tcm_loop can release - * their internally allocated I/O reference now and - * struct se_cmd now. - * - * Fabric modules are expected to return '1' here if the - * se_cmd being passed is released at this point, - * or zero if not being released. - */ - if (cmd->se_tfo->check_stop_free != NULL) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return cmd->se_tfo->check_stop_free(cmd); - } - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return 0; -} -static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) -{ - return transport_cmd_check_stop(cmd, true, false); + /* + * Some fabric modules like tcm_loop can release their internally + * allocated I/O reference and struct se_cmd now. + * + * Fabric modules are expected to return '1' here if the se_cmd being + * passed is released at this point, or zero if not being released. + */ + return cmd->se_tfo->check_stop_free ? cmd->se_tfo->check_stop_free(cmd) + : 0; } static void transport_lun_remove_cmd(struct se_cmd *cmd) @@ -716,7 +711,6 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) spin_lock_irqsave(&cmd->t_state_lock, flags); - cmd->transport_state &= ~CMD_T_BUSY; if (dev && dev->transport->transport_complete) { dev->transport->transport_complete(cmd, @@ -1229,7 +1223,6 @@ void transport_init_se_cmd( init_completion(&cmd->cmd_wait_comp); spin_lock_init(&cmd->t_state_lock); kref_init(&cmd->cmd_kref); - cmd->transport_state = CMD_T_DEV_ACTIVE; cmd->se_tfo = tfo; cmd->se_sess = se_sess; @@ -1654,6 +1647,9 @@ void transport_generic_request_failure(struct se_cmd *cmd, { int ret = 0, post_ret = 0; + if (transport_check_aborted_status(cmd, 1)) + return; + pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", @@ -1693,6 +1689,10 @@ void transport_generic_request_failure(struct se_cmd *cmd, case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: + case TCM_TOO_MANY_TARGET_DESCS: + case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE: + case TCM_TOO_MANY_SEGMENT_DESCS: + case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: break; case TCM_OUT_OF_RESOURCES: sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -1780,7 +1780,7 @@ void __target_execute_cmd(struct se_cmd *cmd, bool do_checks) return; err: spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); + cmd->transport_state &= ~CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); transport_generic_request_failure(cmd, ret); @@ -1808,7 +1808,7 @@ static int target_write_prot_action(struct se_cmd *cmd) sectors, 0, cmd->t_prot_sg, 0); if (unlikely(cmd->pi_err)) { spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); + cmd->transport_state &= ~CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); transport_generic_request_failure(cmd, cmd->pi_err); return -1; @@ -1897,7 +1897,7 @@ void target_execute_cmd(struct se_cmd *cmd) } cmd->t_state = TRANSPORT_PROCESSING; - cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; + cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); if (target_write_prot_action(cmd)) @@ -1905,7 +1905,7 @@ void target_execute_cmd(struct se_cmd *cmd) if (target_handle_task_attr(cmd)) { spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT); + cmd->transport_state &= ~CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); return; } @@ -1958,8 +1958,6 @@ static void transport_complete_task_attr(struct se_cmd *cmd) if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { atomic_dec_mb(&dev->simple_cmds); dev->dev_cur_ordered_id++; - pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n", - dev->dev_cur_ordered_id); } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", @@ -2366,6 +2364,7 @@ EXPORT_SYMBOL(target_alloc_sgl); sense_reason_t transport_generic_new_cmd(struct se_cmd *cmd) { + unsigned long flags; int ret = 0; bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); @@ -2431,7 +2430,24 @@ transport_generic_new_cmd(struct se_cmd *cmd) target_execute_cmd(cmd); return 0; } - transport_cmd_check_stop(cmd, false, true); + + spin_lock_irqsave(&cmd->t_state_lock, flags); + cmd->t_state = TRANSPORT_WRITE_PENDING; + /* + * Determine if frontend context caller is requesting the stopping of + * this command for frontend exceptions. + */ + if (cmd->transport_state & CMD_T_STOP) { + pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", + __func__, __LINE__, cmd->tag); + + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + complete_all(&cmd->t_transport_stop_comp); + return 0; + } + cmd->transport_state &= ~CMD_T_ACTIVE; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); ret = cmd->se_tfo->write_pending(cmd); if (ret == -EAGAIN || ret == -ENOMEM) @@ -2574,39 +2590,38 @@ static void target_release_cmd_kref(struct kref *kref) unsigned long flags; bool fabric_stop; - spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); + if (se_sess) { + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); - spin_lock(&se_cmd->t_state_lock); - fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) && - (se_cmd->transport_state & CMD_T_ABORTED); - spin_unlock(&se_cmd->t_state_lock); + spin_lock(&se_cmd->t_state_lock); + fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) && + (se_cmd->transport_state & CMD_T_ABORTED); + spin_unlock(&se_cmd->t_state_lock); - if (se_cmd->cmd_wait_set || fabric_stop) { + if (se_cmd->cmd_wait_set || fabric_stop) { + list_del_init(&se_cmd->se_cmd_list); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + target_free_cmd_mem(se_cmd); + complete(&se_cmd->cmd_wait_comp); + return; + } list_del_init(&se_cmd->se_cmd_list); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - target_free_cmd_mem(se_cmd); - complete(&se_cmd->cmd_wait_comp); - return; } - list_del_init(&se_cmd->se_cmd_list); - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); target_free_cmd_mem(se_cmd); se_cmd->se_tfo->release_cmd(se_cmd); } -/* target_put_sess_cmd - Check for active I/O shutdown via kref_put - * @se_cmd: command descriptor to drop +/** + * target_put_sess_cmd - decrease the command reference count + * @se_cmd: command to drop a reference from + * + * Returns 1 if and only if this target_put_sess_cmd() call caused the + * refcount to drop to zero. Returns zero otherwise. */ int target_put_sess_cmd(struct se_cmd *se_cmd) { - struct se_session *se_sess = se_cmd->se_sess; - - if (!se_sess) { - target_free_cmd_mem(se_cmd); - se_cmd->se_tfo->release_cmd(se_cmd); - return 1; - } return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); } EXPORT_SYMBOL(target_put_sess_cmd); @@ -2685,10 +2700,39 @@ void target_wait_for_sess_cmds(struct se_session *se_sess) } EXPORT_SYMBOL(target_wait_for_sess_cmds); +static void target_lun_confirm(struct percpu_ref *ref) +{ + struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); + + complete(&lun->lun_ref_comp); +} + void transport_clear_lun_ref(struct se_lun *lun) { - percpu_ref_kill(&lun->lun_ref); + /* + * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop + * the initial reference and schedule confirm kill to be + * executed after one full RCU grace period has completed. + */ + percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm); + /* + * The first completion waits for percpu_ref_switch_to_atomic_rcu() + * to call target_lun_confirm after lun->lun_ref has been marked + * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t + * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref + * fails for all new incoming I/O. + */ wait_for_completion(&lun->lun_ref_comp); + /* + * The second completion waits for percpu_ref_put_many() to + * invoke ->release() after lun->lun_ref has switched to + * atomic_t mode, and lun->lun_ref.count has reached zero. + * + * At this point all target-core lun->lun_ref references have + * been dropped via transport_lun_remove_cmd(), and it's safe + * to proceed with the remaining LUN shutdown. + */ + wait_for_completion(&lun->lun_shutdown_comp); } static bool @@ -2744,11 +2788,8 @@ __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, } /** - * transport_wait_for_tasks - wait for completion to occur - * @cmd: command to wait - * - * Called from frontend fabric context to wait for storage engine - * to pause and/or release frontend generated struct se_cmd. + * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp + * @cmd: command to wait on */ bool transport_wait_for_tasks(struct se_cmd *cmd) { @@ -2808,6 +2849,26 @@ static const struct sense_info sense_info_table[] = { .key = ILLEGAL_REQUEST, .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ }, + [TCM_TOO_MANY_TARGET_DESCS] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, + .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */ + }, + [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, + .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */ + }, + [TCM_TOO_MANY_SEGMENT_DESCS] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, + .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */ + }, + [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, + .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */ + }, [TCM_PARAMETER_LIST_LENGTH_ERROR] = { .key = ILLEGAL_REQUEST, .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ @@ -3086,7 +3147,6 @@ static void target_tmr_work(struct work_struct *work) spin_unlock_irqrestore(&cmd->t_state_lock, flags); goto check_stop; } - cmd->t_state = TRANSPORT_ISTATE_PROCESSING; spin_unlock_irqrestore(&cmd->t_state_lock, flags); cmd->se_tfo->queue_tm_rsp(cmd); @@ -3099,11 +3159,25 @@ int transport_generic_handle_tmr( struct se_cmd *cmd) { unsigned long flags; + bool aborted = false; spin_lock_irqsave(&cmd->t_state_lock, flags); - cmd->transport_state |= CMD_T_ACTIVE; + if (cmd->transport_state & CMD_T_ABORTED) { + aborted = true; + } else { + cmd->t_state = TRANSPORT_ISTATE_PROCESSING; + cmd->transport_state |= CMD_T_ACTIVE; + } spin_unlock_irqrestore(&cmd->t_state_lock, flags); + if (aborted) { + pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d" + "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function, + cmd->se_tmr_req->ref_task_tag, cmd->tag); + transport_cmd_check_stop_to_fabric(cmd); + return 0; + } + INIT_WORK(&cmd->work, target_tmr_work); queue_work(cmd->se_dev->tmr_wq, &cmd->work); return 0; |