@@ -111,6 +111,8 @@ struct tcmu_dev {
struct idr commands;
spinlock_t commands_lock;
+ struct timer_list timeout;
+
char dev_config[TCMU_CONFIG_LEN];
};
@@ -127,6 +129,11 @@ struct tcmu_cmd {
/* Can't use se_cmd when cleaning up expired cmds, because if
cmd has been completed then accessing se_cmd is off limits */
DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS);
+
+ unsigned long deadline;
+
+#define TCMU_CMD_BIT_EXPIRED 0
+ unsigned long flags;
};
static struct kmem_cache *tcmu_cmd_cache;
@@ -165,6 +172,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
tcmu_cmd->se_cmd = se_cmd;
tcmu_cmd->tcmu_dev = udev;
+ tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
idr_preload(GFP_KERNEL);
spin_lock_irq(&udev->commands_lock);
@@ -518,6 +526,9 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
/* TODO: only if FLUSH and FUA? */
uio_event_notify(&udev->uio_info);
+ mod_timer(&udev->timeout,
+ round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
+
return TCM_NO_SENSE;
}
@@ -551,6 +562,17 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
struct se_cmd *se_cmd = cmd->se_cmd;
struct tcmu_dev *udev = cmd->tcmu_dev;
+ if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ /*
+ * cmd has been completed already from timeout, just reclaim
+ * data area space and free cmd
+ */
+ free_data_area(udev, cmd);
+
+ kmem_cache_free(tcmu_cmd_cache, cmd);
+ return;
+ }
+
if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
free_data_area(udev, cmd);
pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
@@ -638,6 +660,9 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
handled++;
}
+ if (mb->cmd_tail == mb->cmd_head)
+ del_timer(&udev->timeout); /* no more pending cmds */
+
spin_unlock_irqrestore(&udev->cmdr_lock, flags);
wake_up(&udev->wait_cmdr);
@@ -645,6 +670,43 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
return handled;
}
+static int tcmu_check_expired_cmd(int id, void *p, void *data)
+{
+ struct tcmu_cmd *cmd = p;
+
+ if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
+ return 0;
+
+ if (!time_after(jiffies, cmd->deadline))
+ return 0;
+
+ set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
+ target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
+ cmd->se_cmd = NULL;
+
+ return 0;
+}
+
+static void tcmu_device_timedout(unsigned long data)
+{
+ struct tcmu_dev *udev = (struct tcmu_dev *)data;
+ unsigned long flags;
+ int handled;
+
+ handled = tcmu_handle_completions(udev);
+
+ pr_warn("%d completions handled from timeout\n", handled);
+
+ spin_lock_irqsave(&udev->commands_lock, flags);
+ idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
+ spin_unlock_irqrestore(&udev->commands_lock, flags);
+
+ /*
+ * We don't need to wakeup threads on wait_cmdr since they have their
+ * own timeout.
+ */
+}
+
static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
{
struct tcmu_hba *tcmu_hba;
@@ -687,6 +749,9 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
idr_init(&udev->commands);
spin_lock_init(&udev->commands_lock);
+ setup_timer(&udev->timeout, tcmu_device_timedout,
+ (unsigned long)udev);
+
return &udev->se_dev;
}
@@ -916,6 +981,15 @@ static int tcmu_configure_device(struct se_device *dev)
return ret;
}
+static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
+{
+ if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ kmem_cache_free(tcmu_cmd_cache, cmd);
+ return 0;
+ }
+ return -EINVAL;
+}
+
static void tcmu_dev_call_rcu(struct rcu_head *p)
{
struct se_device *dev = container_of(p, struct se_device, rcu_head);
@@ -927,14 +1001,23 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
static void tcmu_free_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
+ struct tcmu_cmd *cmd;
+ bool all_expired = true;
+ int i;
+
+ del_timer_sync(&udev->timeout);
vfree(udev->mb_addr);
/* Upper layer should drain all requests before calling this */
spin_lock_irq(&udev->commands_lock);
- WARN_ON(!idr_is_empty(&udev->commands));
+ idr_for_each_entry(&udev->commands, cmd, i) {
+ if (tcmu_check_and_free_pending_cmd(cmd) != 0)
+ all_expired = false;
+ }
idr_destroy(&udev->commands);
spin_unlock_irq(&udev->commands_lock);
+ WARN_ON(!all_expired);
/* Device was configured */
if (udev->uio_info.uio_dev) {
This reverts: commit bb9ce825327ae426c02d5c330dd17acece3653a9 Author: Mike Christie <mchristi@redhat.com> Date: Mon Mar 6 11:38:52 2017 -0600 tcmu: remove cmd timeout code v2 tcmu-runner does not yet clean up running commands during restart, but other daemon mights, so add this feature back. The next patch will make it configurable, so daemons or devices that do not support restarts can disable it. Signed-off-by: Mike Christie <mchristi@redhat.com> --- drivers/target/target_core_user.c | 85 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 84 insertions(+), 1 deletion(-)