@@ -2397,7 +2397,7 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
goto free_cpumask;
if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
- gfp, node))
+ gfp, node, false))
goto free_ctxs;
hctx->nr_ctx = 0;
@@ -479,7 +479,8 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx,
- ilog2(8), GFP_KERNEL, hctx->numa_node)) {
+ ilog2(8), GFP_KERNEL, hctx->numa_node,
+ false)) {
while (--i >= 0)
sbitmap_free(&khd->kcq_map[i]);
goto err_kcqs;
@@ -181,7 +181,7 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
goto fail_alloc_descs;
rc = sbitmap_init_node(&wq->sbmap, num_descs, -1, GFP_KERNEL,
- dev_to_node(dev));
+ dev_to_node(dev), false);
if (rc < 0)
goto fail_sbitmap_init;
@@ -45,7 +45,7 @@ struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
percpu_up_read(&wq->submit_lock);
}
- idx = sbitmap_get(&wq->sbmap, 0, false);
+ idx = sbitmap_get(&wq->sbmap, 0);
if (idx < 0) {
atomic_dec(&wq->dq_count);
return ERR_PTR(-EAGAIN);
@@ -61,6 +61,11 @@ struct sbitmap {
*/
unsigned int map_nr;
+ /**
+ * @round_robin: Allocate bits in strict round-robin order.
+ */
+ bool round_robin;
+
/**
* @map: Allocated bitmap.
*/
@@ -129,11 +134,6 @@ struct sbitmap_queue {
*/
atomic_t ws_active;
- /**
- * @round_robin: Allocate bits in strict round-robin order.
- */
- bool round_robin;
-
/**
* @min_shallow_depth: The minimum shallow depth which may be passed to
* sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
@@ -149,11 +149,14 @@ struct sbitmap_queue {
* given, a good default is chosen.
* @flags: Allocation flags.
* @node: Memory node to allocate on.
+ * @round_robin: If true, be stricter about allocation order; always allocate
+ * starting from the last allocated bit. This is less efficient
+ * than the default behavior (false).
*
* Return: Zero on success or negative errno on failure.
*/
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
- gfp_t flags, int node);
+ gfp_t flags, int node, bool round_robin);
/**
* sbitmap_free() - Free memory used by a &struct sbitmap.
@@ -179,15 +182,12 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
* sbitmap_get() - Try to allocate a free bit from a &struct sbitmap.
* @sb: Bitmap to allocate from.
* @alloc_hint: Hint for where to start searching for a free bit.
- * @round_robin: If true, be stricter about allocation order; always allocate
- * starting from the last allocated bit. This is less efficient
- * than the default behavior (false).
*
* This operation provides acquire barrier semantics if it succeeds.
*
* Return: Non-negative allocated bit number if successful, -1 otherwise.
*/
-int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin);
+int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint);
/**
* sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
@@ -42,7 +42,7 @@ static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
}
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
- gfp_t flags, int node)
+ gfp_t flags, int node, bool round_robin)
{
unsigned int bits_per_word;
unsigned int i;
@@ -67,6 +67,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
sb->shift = shift;
sb->depth = depth;
sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
+ sb->round_robin = round_robin;
if (depth == 0) {
sb->map = NULL;
@@ -137,14 +138,14 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
}
static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
- unsigned int alloc_hint, bool round_robin)
+ unsigned int alloc_hint)
{
int nr;
do {
nr = __sbitmap_get_word(&sb->map[index].word,
sb->map[index].depth, alloc_hint,
- !round_robin);
+ !sb->round_robin);
if (nr != -1)
break;
if (!sbitmap_deferred_clear(sb, index))
@@ -154,7 +155,7 @@ static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
return nr;
}
-int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
+int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint)
{
unsigned int i, index;
int nr = -1;
@@ -166,14 +167,13 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
* alloc_hint to find the right word index. No point in looping
* twice in find_next_zero_bit() for that case.
*/
- if (round_robin)
+ if (sb->round_robin)
alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
else
alloc_hint = 0;
for (i = 0; i < sb->map_nr; i++) {
- nr = sbitmap_find_bit_in_index(sb, index, alloc_hint,
- round_robin);
+ nr = sbitmap_find_bit_in_index(sb, index, alloc_hint);
if (nr != -1) {
nr += index << sb->shift;
break;
@@ -355,7 +355,8 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
int ret;
int i;
- ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
+ ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node,
+ round_robin);
if (ret)
return ret;
@@ -387,7 +388,6 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
}
- sbq->round_robin = round_robin;
return 0;
}
EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
@@ -429,12 +429,12 @@ int __sbitmap_queue_get(struct sbitmap_queue *sbq)
hint = depth ? prandom_u32() % depth : 0;
this_cpu_write(*sbq->alloc_hint, hint);
}
- nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
+ nr = sbitmap_get(&sbq->sb, hint);
if (nr == -1) {
/* If the map is full, a hint won't do us much good. */
this_cpu_write(*sbq->alloc_hint, 0);
- } else if (nr == hint || unlikely(sbq->round_robin)) {
+ } else if (nr == hint || unlikely(sbq->sb.round_robin)) {
/* Only update the hint if we used it. */
hint = nr + 1;
if (hint >= depth - 1)
@@ -465,7 +465,7 @@ int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
if (nr == -1) {
/* If the map is full, a hint won't do us much good. */
this_cpu_write(*sbq->alloc_hint, 0);
- } else if (nr == hint || unlikely(sbq->round_robin)) {
+ } else if (nr == hint || unlikely(sbq->sb.round_robin)) {
/* Only update the hint if we used it. */
hint = nr + 1;
if (hint >= depth - 1)
@@ -581,7 +581,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
smp_mb__after_atomic();
sbitmap_queue_wake_up(sbq);
- if (likely(!sbq->round_robin && nr < sbq->sb.depth))
+ if (likely(!sbq->sb.round_robin && nr < sbq->sb.depth))
*per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
}
EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
@@ -638,7 +638,7 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
}
seq_puts(m, "}\n");
- seq_printf(m, "round_robin=%d\n", sbq->round_robin);
+ seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin);
seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_show);
Now allocation round_robin info is maintained by sbitmap_queue. Actually, bit allocation belongs to sbitmap. Also the following patch will move alloc_hint to sbitmap for users with high depth. So move round_robin to sbitmap. Cc: Omar Sandoval <osandov@fb.com> Cc: Sathya Prakash <sathya.prakash@broadcom.com> Cc: Chaitra P B <chaitra.basappa@broadcom.com> Cc: Suganath Prabu Subramani <suganath-prabu.subramani@broadcom.com> Cc: Kashyap Desai <kashyap.desai@broadcom.com> Cc: Sumit Saxena <sumit.saxena@broadcom.com> Cc: Shivasharan S <shivasharan.srikanteshwara@broadcom.com> Cc: Ewan D. Milne <emilne@redhat.com> Cc: Hannes Reinecke <hare@suse.de> Cc: Bart Van Assche <bart.vanassche@wdc.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> --- block/blk-mq.c | 2 +- block/kyber-iosched.c | 3 ++- drivers/dma/idxd/device.c | 2 +- drivers/dma/idxd/submit.c | 2 +- include/linux/sbitmap.h | 20 ++++++++++---------- lib/sbitmap.c | 28 ++++++++++++++-------------- 6 files changed, 29 insertions(+), 28 deletions(-)