@@ -72,6 +72,78 @@ struct kmem_cache *blk_requestq_cachep;
*/
static struct workqueue_struct *kblockd_workqueue;
+/**
+ * blk_queue_flag_set - atomically set a queue flag
+ * @flag: flag to be set
+ * @q: request queue
+ */
+void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ queue_flag_set(flag, q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+EXPORT_SYMBOL(blk_queue_flag_set);
+
+/**
+ * blk_queue_flag_clear - atomically clear a queue flag
+ * @flag: flag to be cleared
+ * @q: request queue
+ */
+void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ queue_flag_clear(flag, q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+EXPORT_SYMBOL(blk_queue_flag_clear);
+
+/**
+ * blk_queue_flag_test_and_set - atomically test and set a queue flag
+ * @flag: flag to be set
+ * @q: request queue
+ *
+ * Returns the previous value of @flag - 0 if the flag was not set and 1 if
+ * the flag was already set.
+ */
+bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
+{
+ unsigned long flags;
+ bool res;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ res = queue_flag_test_and_set(flag, q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
+
+/**
+ * blk_queue_flag_test_and_clear - atomically test and clear a queue flag
+ * @flag: flag to be cleared
+ * @q: request queue
+ *
+ * Returns the previous value of @flag - 0 if the flag was not set and 1 if
+ * the flag was set.
+ */
+bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q)
+{
+ unsigned long flags;
+ bool res;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ res = queue_flag_test_and_clear(flag, q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_clear);
+
static void blk_clear_congested(struct request_list *rl, int sync)
{
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -362,25 +434,14 @@ EXPORT_SYMBOL(blk_sync_queue);
*/
int blk_set_preempt_only(struct request_queue *q)
{
- unsigned long flags;
- int res;
-
- spin_lock_irqsave(q->queue_lock, flags);
- res = queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- return res;
+ return blk_queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q);
}
EXPORT_SYMBOL_GPL(blk_set_preempt_only);
void blk_clear_preempt_only(struct request_queue *q)
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
+ blk_queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
wake_up_all(&q->mq_freeze_wq);
- spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL_GPL(blk_clear_preempt_only);
@@ -630,9 +691,7 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
void blk_set_queue_dying(struct request_queue *q)
{
- spin_lock_irq(q->queue_lock);
- queue_flag_set(QUEUE_FLAG_DYING, q);
- spin_unlock_irq(q->queue_lock);
+ blk_queue_flag_set(QUEUE_FLAG_DYING, q);
/*
* When queue DYING flag is set, we need to block new req
@@ -198,11 +198,7 @@ EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
*/
void blk_mq_quiesce_queue_nowait(struct request_queue *q)
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- queue_flag_set(QUEUE_FLAG_QUIESCED, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
}
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
@@ -243,11 +239,7 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
*/
void blk_mq_unquiesce_queue(struct request_queue *q)
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
/* dispatch requests which are inserted during quiescing */
blk_mq_run_hw_queues(q, true);
@@ -859,12 +859,10 @@ EXPORT_SYMBOL(blk_queue_update_dma_alignment);
void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
{
- spin_lock_irq(q->queue_lock);
if (queueable)
- queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q);
+ blk_queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q);
else
- queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q);
- spin_unlock_irq(q->queue_lock);
+ blk_queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q);
}
EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
@@ -276,12 +276,10 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \
if (neg) \
val = !val; \
\
- spin_lock_irq(q->queue_lock); \
if (val) \
- queue_flag_set(QUEUE_FLAG_##flag, q); \
+ blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
else \
- queue_flag_clear(QUEUE_FLAG_##flag, q); \
- spin_unlock_irq(q->queue_lock); \
+ blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
return ret; \
}
@@ -414,12 +412,10 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
if (ret < 0)
return ret;
- spin_lock_irq(q->queue_lock);
if (poll_on)
- queue_flag_set(QUEUE_FLAG_POLL, q);
+ blk_queue_flag_set(QUEUE_FLAG_POLL, q);
else
- queue_flag_clear(QUEUE_FLAG_POLL, q);
- spin_unlock_irq(q->queue_lock);
+ blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
return ret;
}
@@ -487,12 +483,10 @@ static ssize_t queue_wc_store(struct request_queue *q, const char *page,
if (set == -1)
return -EINVAL;
- spin_lock_irq(q->queue_lock);
if (set)
- queue_flag_set(QUEUE_FLAG_WC, q);
+ blk_queue_flag_set(QUEUE_FLAG_WC, q);
else
- queue_flag_clear(QUEUE_FLAG_WC, q);
- spin_unlock_irq(q->queue_lock);
+ blk_queue_flag_clear(QUEUE_FLAG_WC, q);
return count;
}
@@ -948,9 +942,7 @@ void blk_unregister_queue(struct gendisk *disk)
*/
mutex_lock(&q->sysfs_lock);
- spin_lock_irq(q->queue_lock);
- queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
- spin_unlock_irq(q->queue_lock);
+ blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
/*
* Remove the sysfs attributes before unregistering the queue data
@@ -57,12 +57,10 @@ ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
char *p = (char *) buf;
val = simple_strtoul(p, &p, 10);
- spin_lock_irq(q->queue_lock);
if (val)
- queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
+ blk_queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
else
- queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
- spin_unlock_irq(q->queue_lock);
+ blk_queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
}
return count;
@@ -705,6 +705,11 @@ struct request_queue {
(1 << QUEUE_FLAG_SAME_COMP) | \
(1 << QUEUE_FLAG_POLL))
+void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
+void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
+bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
+bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
+
/*
* @q->queue_lock is set while a queue is being initialized. Since we know
* that no other threads access the queue object before @q->queue_lock has
Introduce functions that modify the queue flags and that protect these modifications with the request queue lock. Except for moving one wake_up_all() call from inside to outside a critical section, this patch does not change any functionality. Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Hannes Reinecke <hare@suse.de> Cc: Johannes Thumshirn <jthumshirn@suse.de> Cc: Ming Lei <ming.lei@redhat.com> --- block/blk-core.c | 91 +++++++++++++++++++++++++++++++++++++++++--------- block/blk-mq.c | 12 ++----- block/blk-settings.c | 6 ++-- block/blk-sysfs.c | 22 ++++-------- block/blk-timeout.c | 6 ++-- include/linux/blkdev.h | 5 +++ 6 files changed, 93 insertions(+), 49 deletions(-)