@@ -1082,6 +1082,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
if (blkcg_init_queue(q))
goto fail_ref;
+ /* Set queue default mirrors to 0 explicitly. */
+ blk_queue_set_mirrors(q, 0);
+
return q;
fail_ref:
@@ -904,6 +904,24 @@ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
}
EXPORT_SYMBOL_GPL(blk_queue_write_cache);
+/*
+ * Get the number of read redundant mirrors.
+ */
+unsigned short blk_queue_get_mirrors(struct request_queue *q)
+{
+ return q->nr_mirrors;
+}
+EXPORT_SYMBOL(blk_queue_get_mirrors);
+
+/*
+ * Set the number of read redundant mirrors.
+ */
+void blk_queue_set_mirrors(struct request_queue *q, unsigned short mirrors)
+{
+ q->nr_mirrors = mirrors;
+}
+EXPORT_SYMBOL(blk_queue_set_mirrors);
+
static int __init blk_settings_init(void)
{
blk_max_low_pfn = max_low_pfn - 1;
@@ -680,6 +680,7 @@ struct request_queue {
#define BLK_MAX_WRITE_HINTS 5
u64 write_hints[BLK_MAX_WRITE_HINTS];
+ unsigned short nr_mirrors; /* Default value is zero */
};
#define QUEUE_FLAG_QUEUED 0 /* uses generic tag queueing */
@@ -1267,6 +1268,8 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
+extern unsigned short blk_queue_get_mirrors(struct request_queue *q);
+extern void blk_queue_set_mirrors(struct request_queue *q, unsigned short mirrors);
/*
* Number of physical segments as sent to the device.