@@ -346,6 +346,17 @@ void blk_sync_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_sync_queue);
+void blk_set_preempt_only(struct request_queue *q, bool preempt_only)
+{
+ blk_mq_freeze_queue(q);
+ if (preempt_only)
+ queue_flag_set_unlocked(QUEUE_FLAG_PREEMPT_ONLY, q);
+ else
+ queue_flag_clear_unlocked(QUEUE_FLAG_PREEMPT_ONLY, q);
+ blk_mq_unfreeze_queue(q);
+}
+EXPORT_SYMBOL(blk_set_preempt_only);
+
/**
* __blk_run_queue_uncond - run a queue whether or not it has been stopped
* @q: The queue to run
@@ -771,9 +782,18 @@ int blk_queue_enter(struct request_queue *q, unsigned flags)
while (true) {
int ret;
+ /*
+ * preempt_only flag has to be set after queue is frozen,
+ * so it can be checked here lockless and safely
+ */
+ if (blk_queue_preempt_only(q)) {
+ if (!(flags & BLK_REQ_PREEMPT))
+ goto slow_path;
+ }
+
if (percpu_ref_tryget_live(&q->q_usage_counter))
return 0;
-
+ slow_path:
if (flags & BLK_REQ_NOWAIT)
return -EBUSY;
@@ -787,7 +807,8 @@ int blk_queue_enter(struct request_queue *q, unsigned flags)
smp_rmb();
ret = wait_event_interruptible(q->mq_freeze_wq,
- !atomic_read(&q->mq_freeze_depth) ||
+ (!atomic_read(&q->mq_freeze_depth) &&
+ !blk_queue_preempt_only(q)) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
@@ -630,6 +630,7 @@ struct request_queue {
#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */
#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
+#define QUEUE_FLAG_PREEMPT_ONLY 29 /* only process REQ_PREEMPT requests */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -734,6 +735,10 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
REQ_FAILFAST_DRIVER))
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
+#define blk_queue_preempt_only(q) \
+ test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
+
+extern void blk_set_preempt_only(struct request_queue *q, bool preempt_only);
static inline bool blk_account_rq(struct request *rq)
{
When queue is in PREEMPT_ONLY mode, only RQF_PREEMPT request can be allocated and dispatched, other requests won't be allowed to enter I/O path. This is useful for supporting safe SCSI quiesce. Part of this patch is from Bart's '[PATCH v4 4∕7] block: Add the QUEUE_FLAG_PREEMPT_ONLY request queue flag'. Signed-off-by: Ming Lei <ming.lei@redhat.com> --- block/blk-core.c | 25 +++++++++++++++++++++++-- include/linux/blkdev.h | 5 +++++ 2 files changed, 28 insertions(+), 2 deletions(-)