@@ -166,7 +166,7 @@ void blk_mq_quiesce_queue(struct request_queue *q)
unsigned int i;
bool rcu = false;
- blk_mq_stop_hw_queues(q);
+ blk_mq_stop_hw_queues(q, true);
queue_for_each_hw_ctx(q, hctx, i) {
if (hctx->flags & BLK_MQ_F_BLOCKING)
@@ -1218,20 +1218,29 @@ bool blk_mq_queue_stopped(struct request_queue *q)
}
EXPORT_SYMBOL(blk_mq_queue_stopped);
-void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
+static void __blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx, bool sync)
{
- cancel_delayed_work_sync(&hctx->run_work);
+ if (sync)
+ cancel_delayed_work_sync(&hctx->run_work);
+ else
+ cancel_delayed_work(&hctx->run_work);
+
set_bit(BLK_MQ_S_STOPPED, &hctx->state);
}
+
+void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
+{
+ __blk_mq_stop_hw_queue(hctx, false);
+}
EXPORT_SYMBOL(blk_mq_stop_hw_queue);
-void blk_mq_stop_hw_queues(struct request_queue *q)
+void blk_mq_stop_hw_queues(struct request_queue *q, bool sync)
{
struct blk_mq_hw_ctx *hctx;
int i;
queue_for_each_hw_ctx(q, hctx, i)
- blk_mq_stop_hw_queue(hctx);
+ __blk_mq_stop_hw_queue(hctx, sync);
}
EXPORT_SYMBOL(blk_mq_stop_hw_queues);
@@ -950,7 +950,7 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
unsigned long to;
bool active = true;
- blk_mq_stop_hw_queues(port->dd->queue);
+ blk_mq_stop_hw_queues(port->dd->queue, true);
to = jiffies + msecs_to_jiffies(timeout);
do {
@@ -4009,7 +4009,7 @@ static int mtip_block_remove(struct driver_data *dd)
dd->disk->disk_name);
blk_freeze_queue_start(dd->queue);
- blk_mq_stop_hw_queues(dd->queue);
+ blk_mq_stop_hw_queues(dd->queue, true);
blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd);
/*
@@ -656,7 +656,7 @@ static void nbd_clear_req(struct request *req, void *data, bool reserved)
static void nbd_clear_que(struct nbd_device *nbd)
{
- blk_mq_stop_hw_queues(nbd->disk->queue);
+ blk_mq_stop_hw_queues(nbd->disk->queue, true);
blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
blk_mq_start_hw_queues(nbd->disk->queue);
dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
@@ -844,7 +844,7 @@ static int virtblk_freeze(struct virtio_device *vdev)
/* Make sure no work handler is accessing the device. */
flush_work(&vblk->config_work);
- blk_mq_stop_hw_queues(vblk->disk->queue);
+ blk_mq_stop_hw_queues(vblk->disk->queue, true);
vdev->config->del_vqs(vdev);
return 0;
@@ -1969,7 +1969,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
return BLK_MQ_RQ_QUEUE_ERROR;
if (op->rq) {
- blk_mq_stop_hw_queues(op->rq->q);
+ blk_mq_stop_hw_queues(op->rq->q, false);
blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
}
return BLK_MQ_RQ_QUEUE_BUSY;
@@ -2478,7 +2478,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
* use blk_mq_tagset_busy_itr() and the transport routine to
* terminate the exchanges.
*/
- blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
+ blk_mq_stop_hw_queues(ctrl->ctrl.admin_q, true);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
@@ -1084,7 +1084,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
spin_unlock_irq(&nvmeq->q_lock);
if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
- blk_mq_stop_hw_queues(nvmeq->dev->ctrl.admin_q);
+ blk_mq_stop_hw_queues(nvmeq->dev->ctrl.admin_q, true);
free_irq(vector, nvmeq);
@@ -792,7 +792,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
return;
stop_admin_q:
- blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
+ blk_mq_stop_hw_queues(ctrl->ctrl.admin_q, true);
requeue:
dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
ctrl->ctrl.opts->nr_reconnects);
@@ -814,7 +814,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
if (ctrl->queue_count > 1)
nvme_stop_queues(&ctrl->ctrl);
- blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
+ blk_mq_stop_hw_queues(ctrl->ctrl.admin_q, true);
/* We must take care of fastfail/requeue all our inflight requests */
if (ctrl->queue_count > 1)
@@ -1651,7 +1651,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
if (test_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[0].flags))
nvme_shutdown_ctrl(&ctrl->ctrl);
- blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
+ blk_mq_stop_hw_queues(ctrl->ctrl.admin_q, true);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_cancel_request, &ctrl->ctrl);
nvme_rdma_destroy_admin_queue(ctrl);
@@ -446,7 +446,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
nvme_shutdown_ctrl(&ctrl->ctrl);
- blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
+ blk_mq_stop_hw_queues(ctrl->ctrl.admin_q, true);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_cancel_request, &ctrl->ctrl);
nvme_loop_destroy_admin_queue(ctrl);
@@ -2976,7 +2976,7 @@ scsi_internal_device_block(struct scsi_device *sdev, bool wait)
if (wait)
blk_mq_quiesce_queue(q);
else
- blk_mq_stop_hw_queues(q);
+ blk_mq_stop_hw_queues(q, true);
} else {
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
@@ -239,7 +239,7 @@ void blk_mq_complete_request(struct request *rq);
bool blk_mq_queue_stopped(struct request_queue *q);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
-void blk_mq_stop_hw_queues(struct request_queue *q);
+void blk_mq_stop_hw_queues(struct request_queue *q, bool sync);
void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);