@@ -114,7 +114,7 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
spin_unlock(&se_cmd->t_state_lock);
return false;
}
- if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
+ if (sess->sess_tearing_down) {
pr_debug("Attempted to abort io tag: %llu already shutdown,"
" skipping\n", se_cmd->tag);
spin_unlock(&se_cmd->t_state_lock);
@@ -247,7 +247,7 @@ static void core_tmr_drain_tmr_list(
spin_unlock(&sess->sess_cmd_lock);
continue;
}
- if (sess->sess_tearing_down || cmd->cmd_wait_set) {
+ if (sess->sess_tearing_down) {
spin_unlock(&cmd->t_state_lock);
spin_unlock(&sess->sess_cmd_lock);
continue;
@@ -237,8 +237,8 @@ struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
INIT_LIST_HEAD(&se_sess->sess_list);
INIT_LIST_HEAD(&se_sess->sess_acl_list);
INIT_LIST_HEAD(&se_sess->sess_cmd_list);
- INIT_LIST_HEAD(&se_sess->sess_wait_list);
spin_lock_init(&se_sess->sess_cmd_lock);
+ init_waitqueue_head(&se_sess->cmd_list_wq);
se_sess->sup_prot_ops = sup_prot_ops;
return se_sess;
@@ -1223,7 +1223,6 @@ void transport_init_se_cmd(
INIT_LIST_HEAD(&cmd->se_cmd_list);
INIT_LIST_HEAD(&cmd->state_list);
init_completion(&cmd->t_transport_stop_comp);
- init_completion(&cmd->cmd_wait_comp);
init_completion(&cmd->finished);
spin_lock_init(&cmd->t_state_lock);
kref_init(&cmd->cmd_kref);
@@ -2524,16 +2523,6 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
if (cmd->se_lun)
transport_lun_remove_cmd(cmd);
}
- /*
- * If the task has been internally aborted due to TMR ABORT_TASK
- * or LUN_RESET, target_core_tmr.c is responsible for performing
- * the remaining calls to target_put_sess_cmd(), and not the
- * callers of this function.
- */
- if (aborted) {
- pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
- wait_for_completion(&cmd->cmd_wait_comp);
- }
return transport_put_cmd(cmd);
}
EXPORT_SYMBOL(transport_generic_free_cmd);
@@ -2591,25 +2580,15 @@ static void target_release_cmd_kref(struct kref *kref)
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
struct se_session *se_sess = se_cmd->se_sess;
unsigned long flags;
- bool fabric_stop;
complete_all(&se_cmd->finished);
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
-
- spin_lock(&se_cmd->t_state_lock);
- fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) &&
- (se_cmd->transport_state & CMD_T_ABORTED);
- spin_unlock(&se_cmd->t_state_lock);
-
- if (se_cmd->cmd_wait_set || fabric_stop) {
- list_del_init(&se_cmd->se_cmd_list);
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- target_free_cmd_mem(se_cmd);
- complete(&se_cmd->cmd_wait_comp);
- return;
+ if (likely(!list_empty(&se_cmd->se_cmd_list))) {
+ list_del(&se_cmd->se_cmd_list);
+ if (list_empty(&se_sess->sess_cmd_list))
+ wake_up(&se_sess->cmd_list_wq);
}
- list_del_init(&se_cmd->se_cmd_list);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
target_free_cmd_mem(se_cmd);
@@ -2632,77 +2611,44 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
}
EXPORT_SYMBOL(target_put_sess_cmd);
-/* target_sess_cmd_list_set_waiting - Flag all commands in
- * sess_cmd_list to complete cmd_wait_comp. Set
- * sess_tearing_down so no more commands are queued.
+/**
+ * target_sess_cmd_list_set_waiting - Prevent new commands to be queued
* @se_sess: session to flag
*/
void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
{
- struct se_cmd *se_cmd, *tmp_cmd;
unsigned long flags;
- int rc;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- if (se_sess->sess_tearing_down) {
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- return;
- }
se_sess->sess_tearing_down = 1;
- list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
-
- list_for_each_entry_safe(se_cmd, tmp_cmd,
- &se_sess->sess_wait_list, se_cmd_list) {
- rc = kref_get_unless_zero(&se_cmd->cmd_kref);
- if (rc) {
- se_cmd->cmd_wait_set = 1;
- spin_lock(&se_cmd->t_state_lock);
- se_cmd->transport_state |= CMD_T_FABRIC_STOP;
- spin_unlock(&se_cmd->t_state_lock);
- } else
- list_del_init(&se_cmd->se_cmd_list);
- }
-
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
-/* target_wait_for_sess_cmds - Wait for outstanding descriptors
+/**
+ * target_wait_for_sess_cmds - Wait for outstanding commands
* @se_sess: session to wait for active I/O
*/
void target_wait_for_sess_cmds(struct se_session *se_sess)
{
- struct se_cmd *se_cmd, *tmp_cmd;
- unsigned long flags;
- bool tas;
-
- list_for_each_entry_safe(se_cmd, tmp_cmd,
- &se_sess->sess_wait_list, se_cmd_list) {
- pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
- " %d\n", se_cmd, se_cmd->t_state,
- se_cmd->se_tfo->get_cmd_state(se_cmd));
-
- spin_lock_irqsave(&se_cmd->t_state_lock, flags);
- tas = (se_cmd->transport_state & CMD_T_TAS);
- spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-
- if (!target_put_sess_cmd(se_cmd)) {
- if (tas)
- target_put_sess_cmd(se_cmd);
- }
-
- wait_for_completion(&se_cmd->cmd_wait_comp);
- pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
- " fabric state: %d\n", se_cmd, se_cmd->t_state,
- se_cmd->se_tfo->get_cmd_state(se_cmd));
-
- target_put_sess_cmd(se_cmd);
- }
-
- spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- WARN_ON(!list_empty(&se_sess->sess_cmd_list));
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ struct se_cmd *cmd;
+ int ret;
+ WARN_ON_ONCE(!se_sess->sess_tearing_down);
+
+ spin_lock_irq(&se_sess->sess_cmd_lock);
+ do {
+ ret = wait_event_interruptible_lock_irq_timeout(
+ se_sess->cmd_list_wq,
+ list_empty(&se_sess->sess_cmd_list),
+ se_sess->sess_cmd_lock, 180 * HZ);
+ list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
+ pr_debug("%s: ITT %#llx dir %d i_state %d t_state %d len %d\n",
+ __func__, cmd->tag, cmd->data_direction,
+ cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
+ cmd->data_length);
+ } while (ret <= 0);
+ spin_unlock_irq(&se_sess->sess_cmd_lock);
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);
@@ -440,7 +440,6 @@ struct se_cmd {
u8 scsi_asc;
u8 scsi_ascq;
u16 scsi_sense_length;
- unsigned cmd_wait_set:1;
unsigned unknown_data_length:1;
bool state_active:1;
bool send_abort_response:1;
@@ -473,7 +472,6 @@ struct se_cmd {
struct se_session *se_sess;
struct se_tmr_req *se_tmr_req;
struct list_head se_cmd_list;
- struct completion cmd_wait_comp;
const struct target_core_fabric_ops *se_tfo;
sense_reason_t (*execute_cmd)(struct se_cmd *);
sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
@@ -604,8 +602,8 @@ struct se_session {
struct list_head sess_list;
struct list_head sess_acl_list;
struct list_head sess_cmd_list;
- struct list_head sess_wait_list;
spinlock_t sess_cmd_lock;
+ wait_queue_head_t cmd_list_wq;
void *sess_cmd_map;
struct percpu_ida sess_tag_pool;
};
Target drivers must call target_sess_cmd_list_set_waiting() and target_wait_for_sess_cmds() before freeing a session. Instead of setting a flag in each pending command from the former function and waiting in the latter function on a per-command completion, only set a per-session flag in the former function and wait on a per-session completion in the latter function. Note: this patch changes the behavior from aborting outstanding commands back to waiting for command completion. See also commit 0f4a943168f3 ("target: Fix remote-port TMR ABORT + se_cmd fabric stop"). This patch is based on the following two patches: * Bart Van Assche, target: Simplify session shutdown code, February 19, 2015 (https://github.com/bvanassche/linux/commit/8df5463d7d7619f2f1b70cfe5172eaef0aa52815). * Christoph Hellwig, target: Rework session shutdown code, December 7, 2015 (http://thread.gmane.org/gmane.linux.scsi.target.devel/10695). Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Andy Grover <agrover@redhat.com> --- drivers/target/target_core_tmr.c | 4 +- drivers/target/target_core_transport.c | 106 ++++++++------------------------- include/target/target_core_base.h | 4 +- 3 files changed, 29 insertions(+), 85 deletions(-)