@@ -575,6 +575,9 @@ void blk_cleanup_queue(struct request_queue *q)
if (!q->mq_ops)
__blk_drain_queue(q, true);
queue_flag_set(QUEUE_FLAG_DEAD, q);
+
+ /* wait for resets that might have started as result of drain */
+ wait_event_lock_irq(q->reset_wq, !blk_queue_resetting(q), *lock);
spin_unlock_irq(lock);
/* for synchronous bio-based driver finish in-flight integrity i/o */
@@ -728,6 +731,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
kobject_init(&q->kobj, &blk_queue_ktype);
+ init_waitqueue_head(&q->reset_wq);
mutex_init(&q->sysfs_lock);
spin_lock_init(&q->__queue_lock);
@@ -850,6 +854,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
INIT_WORK(&q->timeout_work, blk_timeout_work);
q->request_fn = rfn;
+ q->reset_fn = NULL,
q->prep_rq_fn = NULL;
q->unprep_rq_fn = NULL;
q->queue_flags |= QUEUE_FLAG_DEFAULT;
@@ -2619,6 +2624,9 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
case -ENODATA:
error_type = "critical medium";
break;
+ case -EINTR:
+ error_type = "critical command";
+ break;
case -EIO:
default:
error_type = "I/O";
@@ -71,6 +71,12 @@ void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
}
EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
+void blk_queue_reset(struct request_queue *q, reset_fn *fn)
+{
+ q->reset_fn = fn;
+}
+EXPORT_SYMBOL_GPL(blk_queue_reset);
+
void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
{
q->lld_busy_fn = fn;
@@ -5,6 +5,7 @@
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/fault-inject.h>
+#include <linux/delay.h>
#include "blk.h"
#include "blk-mq.h"
@@ -172,6 +173,73 @@ void blk_abort_request(struct request *req)
}
EXPORT_SYMBOL_GPL(blk_abort_request);
+/**
+ * blk_reset_queue - force completion of requests executing in queue
+ * @q: request queue to reset
+ *
+ * On success the driver returns BLK_EH_HANDLED from the callout and
+ * either complete requests successfully with 0 or if abnormally completed
+ * with the error code -EINTR.
+ *
+ * On failure the driver returns BLK_EH_NOT_HANDLED, and requests may still
+ * be executing.
+ */
+int blk_reset_queue(struct request_queue *q)
+{
+ enum blk_eh_timer_return eh_rc;
+ int rc;
+
+ spin_lock_irq(q->queue_lock);
+ wait_event_lock_irq(q->reset_wq,
+ !queue_flag_test_and_set(QUEUE_FLAG_RESETTING, q),
+ *q->queue_lock);
+ if (blk_queue_dead(q)) {
+ rc = -ENODEV;
+ spin_unlock_irq(q->queue_lock);
+ goto done;
+ }
+ spin_unlock_irq(q->queue_lock);
+
+ if (q->mq_ops) {
+ blk_mq_stop_hw_queues(q);
+ blk_mq_freeze_queue(q);
+
+ eh_rc = q->mq_ops->reset(q);
+
+ blk_mq_unfreeze_queue(q);
+ blk_mq_start_stopped_hw_queues(q, true);
+ } else if (q->reset_fn) {
+ spin_lock_irq(q->queue_lock);
+ blk_stop_queue(q);
+ spin_unlock_irq(q->queue_lock);
+
+ while (q->request_fn_active)
+ msleep(10);
+
+ eh_rc = q->reset_fn(q);
+
+ spin_lock_irq(q->queue_lock);
+ blk_start_queue(q);
+ spin_unlock_irq(q->queue_lock);
+ } else {
+ rc = -EOPNOTSUPP;
+ goto done;
+ }
+
+ if (eh_rc == BLK_EH_HANDLED)
+ rc = 0;
+ else
+ rc = -EIO;
+
+done:
+ spin_lock_irq(q->queue_lock);
+ queue_flag_clear(QUEUE_FLAG_RESETTING, q);
+ spin_unlock_irq(q->queue_lock);
+ wake_up_all(&q->reset_wq);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(blk_reset_queue);
+
unsigned long blk_rq_timeout(unsigned long timeout)
{
unsigned long maxt;
@@ -120,6 +120,11 @@ struct blk_mq_ops {
timeout_fn *timeout;
/*
+ * Force executing IO to complete or fail.
+ */
+ reset_fn *reset;
+
+ /*
* Called to poll for completion of a specific tag.
*/
poll_fn *poll;
@@ -227,6 +227,7 @@ enum blk_eh_timer_return {
};
typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
+typedef enum blk_eh_timer_return (reset_fn)(struct request_queue *);
enum blk_queue_state {
Queue_down,
@@ -304,6 +305,7 @@ struct request_queue {
unprep_rq_fn *unprep_rq_fn;
softirq_done_fn *softirq_done_fn;
rq_timed_out_fn *rq_timed_out_fn;
+ reset_fn *reset_fn;
dma_drain_needed_fn *dma_drain_needed;
lld_busy_fn *lld_busy_fn;
@@ -464,6 +466,8 @@ struct request_queue {
struct bio_set *bio_split;
bool mq_sysfs_init_done;
+
+ wait_queue_head_t reset_wq;
};
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -492,6 +496,7 @@ struct request_queue {
#define QUEUE_FLAG_WC 23 /* Write back caching */
#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
+#define QUEUE_FLAG_RESETTING 26 /* reset callback is executing */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -564,6 +569,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
__clear_bit(flag, &q->queue_flags);
}
+#define blk_queue_resetting(q) test_bit(QUEUE_FLAG_RESETTING, &(q)->queue_flags)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
@@ -955,6 +961,7 @@ extern bool __blk_end_request_err(struct request *rq, int error);
extern void blk_complete_request(struct request *);
extern void __blk_complete_request(struct request *);
extern void blk_abort_request(struct request *);
+extern int blk_reset_queue(struct request_queue *);
extern void blk_unprep_request(struct request *);
/*
@@ -1008,6 +1015,7 @@ extern void blk_queue_update_dma_alignment(struct request_queue *, int);
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
+extern void blk_queue_reset(struct request_queue *, reset_fn *);
extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);