diff mbox series

[5/9] ublk: move device reset into ublk_ch_release()

Message ID 20250414112554.3025113-6-ming.lei@redhat.com (mailing list archive)
State New
Headers show
Series ublk: simplify & improve IO canceling | expand

Commit Message

Ming Lei April 14, 2025, 11:25 a.m. UTC
ublk_ch_release() is called after ublk char device is closed, when all
uring_cmd are done, so it is perfect fine to move ublk device reset to
ublk_ch_release() from ublk_ctrl_start_recovery().

This way can avoid to grab the exiting daemon task_struct too long.

However, reset of the following ublk IO flags has to be moved until ublk
io_uring queues are ready:

- ubq->canceling

For requeuing IO in case of ublk_nosrv_dev_should_queue_io() before device
is recovered

- ubq->fail_io

For failing IO in case of UBLK_F_USER_RECOVERY_FAIL_IO before device is
recovered

- ublk_io->flags

For preventing using io->cmd

With this way, recovery is simplified a lot.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 drivers/block/ublk_drv.c | 121 +++++++++++++++++++++++----------------
 1 file changed, 72 insertions(+), 49 deletions(-)

Comments

Uday Shankar April 14, 2025, 8:29 p.m. UTC | #1
On Mon, Apr 14, 2025 at 07:25:46PM +0800, Ming Lei wrote:
> ublk_ch_release() is called after ublk char device is closed, when all
> uring_cmd are done, so it is perfect fine to move ublk device reset to
> ublk_ch_release() from ublk_ctrl_start_recovery().
> 
> This way can avoid to grab the exiting daemon task_struct too long.

Nice, I had noticed this leak too, where we keep the task struct ref
until the new daemon comes around. Thanks for the fix!

> 
> However, reset of the following ublk IO flags has to be moved until ublk
> io_uring queues are ready:
> 
> - ubq->canceling
> 
> For requeuing IO in case of ublk_nosrv_dev_should_queue_io() before device
> is recovered
> 
> - ubq->fail_io
> 
> For failing IO in case of UBLK_F_USER_RECOVERY_FAIL_IO before device is
> recovered
> 
> - ublk_io->flags
> 
> For preventing using io->cmd
> 
> With this way, recovery is simplified a lot.
> 
> Signed-off-by: Ming Lei <ming.lei@redhat.com>
> ---
>  drivers/block/ublk_drv.c | 121 +++++++++++++++++++++++----------------
>  1 file changed, 72 insertions(+), 49 deletions(-)
> 
> diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
> index e0213222e3cf..b68bd4172fa8 100644
> --- a/drivers/block/ublk_drv.c
> +++ b/drivers/block/ublk_drv.c
> @@ -1074,7 +1074,7 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
>  
>  static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
>  {
> -	return ubq->ubq_daemon->flags & PF_EXITING;
> +	return !ubq->ubq_daemon || ubq->ubq_daemon->flags & PF_EXITING;
>  }
>  
>  /* todo: handle partial completion */
> @@ -1470,6 +1470,37 @@ static const struct blk_mq_ops ublk_mq_ops = {
>  	.timeout	= ublk_timeout,
>  };
>  
> +static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
> +{
> +	int i;
> +
> +	/* All old ioucmds have to be completed */
> +	ubq->nr_io_ready = 0;
> +
> +	/*
> +	 * old daemon is PF_EXITING, put it now
> +	 *
> +	 * It could be NULL in case of closing one quisced device.
> +	 */
> +	if (ubq->ubq_daemon)
> +		put_task_struct(ubq->ubq_daemon);
> +	/* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
> +	ubq->ubq_daemon = NULL;
> +	ubq->timeout = false;
> +
> +	for (i = 0; i < ubq->q_depth; i++) {
> +		struct ublk_io *io = &ubq->ios[i];
> +
> +		/*
> +		 * UBLK_IO_FLAG_CANCELED is kept for avoiding to touch
> +		 * io->cmd
> +		 */
> +		io->flags &= UBLK_IO_FLAG_CANCELED;
> +		io->cmd = NULL;
> +		io->addr = 0;
> +	}
> +}
> +
>  static int ublk_ch_open(struct inode *inode, struct file *filp)
>  {
>  	struct ublk_device *ub = container_of(inode->i_cdev,
> @@ -1481,10 +1512,26 @@ static int ublk_ch_open(struct inode *inode, struct file *filp)
>  	return 0;
>  }
>  
> +static void ublk_reset_ch_dev(struct ublk_device *ub)
> +{
> +	int i;
> +
> +	for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
> +		ublk_queue_reinit(ub, ublk_get_queue(ub, i));
> +
> +	/* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
> +	ub->mm = NULL;
> +	ub->nr_queues_ready = 0;
> +	ub->nr_privileged_daemon = 0;
> +}
> +
>  static int ublk_ch_release(struct inode *inode, struct file *filp)
>  {
>  	struct ublk_device *ub = filp->private_data;
>  
> +	/* all uring_cmd has been done now, reset device & ubq */
> +	ublk_reset_ch_dev(ub);
> +
>  	clear_bit(UB_STATE_OPEN, &ub->state);
>  	return 0;
>  }
> @@ -1831,6 +1878,24 @@ static void ublk_nosrv_work(struct work_struct *work)
>  	ublk_cancel_dev(ub);
>  }
>  
> +/* reset ublk io_uring queue & io flags */
> +static void ublk_reset_io_flags(struct ublk_device *ub)
> +{
> +	int i, j;
> +
> +	for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
> +		struct ublk_queue *ubq = ublk_get_queue(ub, i);
> +
> +		/* UBLK_IO_FLAG_CANCELED can be cleared now */
> +		spin_lock(&ubq->cancel_lock);

Do we need this? I think at this point there shouldn't be any concurrent
activity we need to protect against.

> +		for (j = 0; j < ubq->q_depth; j++)
> +			ubq->ios[j].flags &= ~UBLK_IO_FLAG_CANCELED;
> +		spin_unlock(&ubq->cancel_lock);
> +		ubq->canceling = false;
> +		ubq->fail_io = false;
> +	}
> +}
> +
>  /* device can only be started after all IOs are ready */
>  static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
>  	__must_hold(&ub->mutex)
> @@ -1844,8 +1909,12 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
>  		if (capable(CAP_SYS_ADMIN))
>  			ub->nr_privileged_daemon++;
>  	}
> -	if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
> +
> +	if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) {
> +		/* now we are ready for handling ublk io request */
> +		ublk_reset_io_flags(ub);
>  		complete_all(&ub->completion);
> +	}
>  }
>  
>  static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
> @@ -2943,41 +3012,14 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
>  	return ret;
>  }
>  
> -static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
> -{
> -	int i;
> -
> -	WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
> -
> -	/* All old ioucmds have to be completed */
> -	ubq->nr_io_ready = 0;
> -	/* old daemon is PF_EXITING, put it now */
> -	put_task_struct(ubq->ubq_daemon);
> -	/* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
> -	ubq->ubq_daemon = NULL;
> -	ubq->timeout = false;
> -
> -	for (i = 0; i < ubq->q_depth; i++) {
> -		struct ublk_io *io = &ubq->ios[i];
> -
> -		/* forget everything now and be ready for new FETCH_REQ */
> -		io->flags = 0;
> -		io->cmd = NULL;
> -		io->addr = 0;
> -	}
> -}
> -
>  static int ublk_ctrl_start_recovery(struct ublk_device *ub,
>  		const struct ublksrv_ctrl_cmd *header)
>  {
>  	int ret = -EINVAL;
> -	int i;
>  
>  	mutex_lock(&ub->mutex);
>  	if (ublk_nosrv_should_stop_dev(ub))
>  		goto out_unlock;
> -	if (!ub->nr_queues_ready)
> -		goto out_unlock;
>  	/*
>  	 * START_RECOVERY is only allowd after:
>  	 *
> @@ -3001,12 +3043,6 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
>  		goto out_unlock;
>  	}
>  	pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id);
> -	for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
> -		ublk_queue_reinit(ub, ublk_get_queue(ub, i));
> -	/* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
> -	ub->mm = NULL;
> -	ub->nr_queues_ready = 0;
> -	ub->nr_privileged_daemon = 0;
>  	init_completion(&ub->completion);
>  	ret = 0;
>   out_unlock:
> @@ -3019,7 +3055,6 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
>  {
>  	int ublksrv_pid = (int)header->data[0];
>  	int ret = -EINVAL;
> -	int i;
>  
>  	pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
>  			__func__, ub->dev_info.nr_hw_queues, header->dev_id);
> @@ -3039,22 +3074,10 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
>  		goto out_unlock;
>  	}
>  	ub->dev_info.ublksrv_pid = ublksrv_pid;
> +	ub->dev_info.state = UBLK_S_DEV_LIVE;
>  	pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
>  			__func__, ublksrv_pid, header->dev_id);
> -
> -	blk_mq_quiesce_queue(ub->ub_disk->queue);
> -	ub->dev_info.state = UBLK_S_DEV_LIVE;
> -	for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
> -		struct ublk_queue *ubq = ublk_get_queue(ub, i);
> -
> -		ubq->canceling = false;
> -		ubq->fail_io = false;
> -	}
> -	blk_mq_unquiesce_queue(ub->ub_disk->queue);
> -	pr_devel("%s: queue unquiesced, dev id %d.\n",
> -			__func__, header->dev_id);
>  	blk_mq_kick_requeue_list(ub->ub_disk->queue);
> -
>  	ret = 0;
>   out_unlock:
>  	mutex_unlock(&ub->mutex);
> -- 
> 2.47.0
>
Ming Lei April 15, 2025, 1:50 a.m. UTC | #2
On Mon, Apr 14, 2025 at 02:29:48PM -0600, Uday Shankar wrote:
> On Mon, Apr 14, 2025 at 07:25:46PM +0800, Ming Lei wrote:
> > ublk_ch_release() is called after ublk char device is closed, when all
> > uring_cmd are done, so it is perfect fine to move ublk device reset to
> > ublk_ch_release() from ublk_ctrl_start_recovery().
> > 
> > This way can avoid to grab the exiting daemon task_struct too long.
> 
> Nice, I had noticed this leak too, where we keep the task struct ref
> until the new daemon comes around. Thanks for the fix!
> 
> > 
> > However, reset of the following ublk IO flags has to be moved until ublk
> > io_uring queues are ready:
> > 
> > - ubq->canceling
> > 
> > For requeuing IO in case of ublk_nosrv_dev_should_queue_io() before device
> > is recovered
> > 
> > - ubq->fail_io
> > 
> > For failing IO in case of UBLK_F_USER_RECOVERY_FAIL_IO before device is
> > recovered
> > 
> > - ublk_io->flags
> > 
> > For preventing using io->cmd
> > 
> > With this way, recovery is simplified a lot.
> > 
> > Signed-off-by: Ming Lei <ming.lei@redhat.com>
> > ---
> >  drivers/block/ublk_drv.c | 121 +++++++++++++++++++++++----------------
> >  1 file changed, 72 insertions(+), 49 deletions(-)
> > 
> > diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
> > index e0213222e3cf..b68bd4172fa8 100644
> > --- a/drivers/block/ublk_drv.c
> > +++ b/drivers/block/ublk_drv.c
> > @@ -1074,7 +1074,7 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
> >  
> >  static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
> >  {
> > -	return ubq->ubq_daemon->flags & PF_EXITING;
> > +	return !ubq->ubq_daemon || ubq->ubq_daemon->flags & PF_EXITING;
> >  }
> >  
> >  /* todo: handle partial completion */
> > @@ -1470,6 +1470,37 @@ static const struct blk_mq_ops ublk_mq_ops = {
> >  	.timeout	= ublk_timeout,
> >  };
> >  
> > +static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
> > +{
> > +	int i;
> > +
> > +	/* All old ioucmds have to be completed */
> > +	ubq->nr_io_ready = 0;
> > +
> > +	/*
> > +	 * old daemon is PF_EXITING, put it now
> > +	 *
> > +	 * It could be NULL in case of closing one quisced device.
> > +	 */
> > +	if (ubq->ubq_daemon)
> > +		put_task_struct(ubq->ubq_daemon);
> > +	/* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
> > +	ubq->ubq_daemon = NULL;
> > +	ubq->timeout = false;
> > +
> > +	for (i = 0; i < ubq->q_depth; i++) {
> > +		struct ublk_io *io = &ubq->ios[i];
> > +
> > +		/*
> > +		 * UBLK_IO_FLAG_CANCELED is kept for avoiding to touch
> > +		 * io->cmd
> > +		 */
> > +		io->flags &= UBLK_IO_FLAG_CANCELED;
> > +		io->cmd = NULL;
> > +		io->addr = 0;
> > +	}
> > +}
> > +
> >  static int ublk_ch_open(struct inode *inode, struct file *filp)
> >  {
> >  	struct ublk_device *ub = container_of(inode->i_cdev,
> > @@ -1481,10 +1512,26 @@ static int ublk_ch_open(struct inode *inode, struct file *filp)
> >  	return 0;
> >  }
> >  
> > +static void ublk_reset_ch_dev(struct ublk_device *ub)
> > +{
> > +	int i;
> > +
> > +	for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
> > +		ublk_queue_reinit(ub, ublk_get_queue(ub, i));
> > +
> > +	/* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
> > +	ub->mm = NULL;
> > +	ub->nr_queues_ready = 0;
> > +	ub->nr_privileged_daemon = 0;
> > +}
> > +
> >  static int ublk_ch_release(struct inode *inode, struct file *filp)
> >  {
> >  	struct ublk_device *ub = filp->private_data;
> >  
> > +	/* all uring_cmd has been done now, reset device & ubq */
> > +	ublk_reset_ch_dev(ub);
> > +
> >  	clear_bit(UB_STATE_OPEN, &ub->state);
> >  	return 0;
> >  }
> > @@ -1831,6 +1878,24 @@ static void ublk_nosrv_work(struct work_struct *work)
> >  	ublk_cancel_dev(ub);
> >  }
> >  
> > +/* reset ublk io_uring queue & io flags */
> > +static void ublk_reset_io_flags(struct ublk_device *ub)
> > +{
> > +	int i, j;
> > +
> > +	for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
> > +		struct ublk_queue *ubq = ublk_get_queue(ub, i);
> > +
> > +		/* UBLK_IO_FLAG_CANCELED can be cleared now */
> > +		spin_lock(&ubq->cancel_lock);
> 
> Do we need this? I think at this point there shouldn't be any concurrent
> activity we need to protect against.

Yeah, the lock isn't necessary, but doing it here actually has document benefit.


Thanks,
Ming
diff mbox series

Patch

diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index e0213222e3cf..b68bd4172fa8 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -1074,7 +1074,7 @@  static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
 
 static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
 {
-	return ubq->ubq_daemon->flags & PF_EXITING;
+	return !ubq->ubq_daemon || ubq->ubq_daemon->flags & PF_EXITING;
 }
 
 /* todo: handle partial completion */
@@ -1470,6 +1470,37 @@  static const struct blk_mq_ops ublk_mq_ops = {
 	.timeout	= ublk_timeout,
 };
 
+static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
+{
+	int i;
+
+	/* All old ioucmds have to be completed */
+	ubq->nr_io_ready = 0;
+
+	/*
+	 * old daemon is PF_EXITING, put it now
+	 *
+	 * It could be NULL in case of closing one quisced device.
+	 */
+	if (ubq->ubq_daemon)
+		put_task_struct(ubq->ubq_daemon);
+	/* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
+	ubq->ubq_daemon = NULL;
+	ubq->timeout = false;
+
+	for (i = 0; i < ubq->q_depth; i++) {
+		struct ublk_io *io = &ubq->ios[i];
+
+		/*
+		 * UBLK_IO_FLAG_CANCELED is kept for avoiding to touch
+		 * io->cmd
+		 */
+		io->flags &= UBLK_IO_FLAG_CANCELED;
+		io->cmd = NULL;
+		io->addr = 0;
+	}
+}
+
 static int ublk_ch_open(struct inode *inode, struct file *filp)
 {
 	struct ublk_device *ub = container_of(inode->i_cdev,
@@ -1481,10 +1512,26 @@  static int ublk_ch_open(struct inode *inode, struct file *filp)
 	return 0;
 }
 
+static void ublk_reset_ch_dev(struct ublk_device *ub)
+{
+	int i;
+
+	for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+		ublk_queue_reinit(ub, ublk_get_queue(ub, i));
+
+	/* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
+	ub->mm = NULL;
+	ub->nr_queues_ready = 0;
+	ub->nr_privileged_daemon = 0;
+}
+
 static int ublk_ch_release(struct inode *inode, struct file *filp)
 {
 	struct ublk_device *ub = filp->private_data;
 
+	/* all uring_cmd has been done now, reset device & ubq */
+	ublk_reset_ch_dev(ub);
+
 	clear_bit(UB_STATE_OPEN, &ub->state);
 	return 0;
 }
@@ -1831,6 +1878,24 @@  static void ublk_nosrv_work(struct work_struct *work)
 	ublk_cancel_dev(ub);
 }
 
+/* reset ublk io_uring queue & io flags */
+static void ublk_reset_io_flags(struct ublk_device *ub)
+{
+	int i, j;
+
+	for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+		struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+		/* UBLK_IO_FLAG_CANCELED can be cleared now */
+		spin_lock(&ubq->cancel_lock);
+		for (j = 0; j < ubq->q_depth; j++)
+			ubq->ios[j].flags &= ~UBLK_IO_FLAG_CANCELED;
+		spin_unlock(&ubq->cancel_lock);
+		ubq->canceling = false;
+		ubq->fail_io = false;
+	}
+}
+
 /* device can only be started after all IOs are ready */
 static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
 	__must_hold(&ub->mutex)
@@ -1844,8 +1909,12 @@  static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
 		if (capable(CAP_SYS_ADMIN))
 			ub->nr_privileged_daemon++;
 	}
-	if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
+
+	if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) {
+		/* now we are ready for handling ublk io request */
+		ublk_reset_io_flags(ub);
 		complete_all(&ub->completion);
+	}
 }
 
 static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
@@ -2943,41 +3012,14 @@  static int ublk_ctrl_set_params(struct ublk_device *ub,
 	return ret;
 }
 
-static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
-{
-	int i;
-
-	WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
-
-	/* All old ioucmds have to be completed */
-	ubq->nr_io_ready = 0;
-	/* old daemon is PF_EXITING, put it now */
-	put_task_struct(ubq->ubq_daemon);
-	/* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
-	ubq->ubq_daemon = NULL;
-	ubq->timeout = false;
-
-	for (i = 0; i < ubq->q_depth; i++) {
-		struct ublk_io *io = &ubq->ios[i];
-
-		/* forget everything now and be ready for new FETCH_REQ */
-		io->flags = 0;
-		io->cmd = NULL;
-		io->addr = 0;
-	}
-}
-
 static int ublk_ctrl_start_recovery(struct ublk_device *ub,
 		const struct ublksrv_ctrl_cmd *header)
 {
 	int ret = -EINVAL;
-	int i;
 
 	mutex_lock(&ub->mutex);
 	if (ublk_nosrv_should_stop_dev(ub))
 		goto out_unlock;
-	if (!ub->nr_queues_ready)
-		goto out_unlock;
 	/*
 	 * START_RECOVERY is only allowd after:
 	 *
@@ -3001,12 +3043,6 @@  static int ublk_ctrl_start_recovery(struct ublk_device *ub,
 		goto out_unlock;
 	}
 	pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id);
-	for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
-		ublk_queue_reinit(ub, ublk_get_queue(ub, i));
-	/* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
-	ub->mm = NULL;
-	ub->nr_queues_ready = 0;
-	ub->nr_privileged_daemon = 0;
 	init_completion(&ub->completion);
 	ret = 0;
  out_unlock:
@@ -3019,7 +3055,6 @@  static int ublk_ctrl_end_recovery(struct ublk_device *ub,
 {
 	int ublksrv_pid = (int)header->data[0];
 	int ret = -EINVAL;
-	int i;
 
 	pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
 			__func__, ub->dev_info.nr_hw_queues, header->dev_id);
@@ -3039,22 +3074,10 @@  static int ublk_ctrl_end_recovery(struct ublk_device *ub,
 		goto out_unlock;
 	}
 	ub->dev_info.ublksrv_pid = ublksrv_pid;
+	ub->dev_info.state = UBLK_S_DEV_LIVE;
 	pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
 			__func__, ublksrv_pid, header->dev_id);
-
-	blk_mq_quiesce_queue(ub->ub_disk->queue);
-	ub->dev_info.state = UBLK_S_DEV_LIVE;
-	for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
-		struct ublk_queue *ubq = ublk_get_queue(ub, i);
-
-		ubq->canceling = false;
-		ubq->fail_io = false;
-	}
-	blk_mq_unquiesce_queue(ub->ub_disk->queue);
-	pr_devel("%s: queue unquiesced, dev id %d.\n",
-			__func__, header->dev_id);
 	blk_mq_kick_requeue_list(ub->ub_disk->queue);
-
 	ret = 0;
  out_unlock:
 	mutex_unlock(&ub->mutex);