@@ -32,11 +32,6 @@ struct sbitmap_word {
* @cleared: word holding cleared bits
*/
unsigned long cleared ____cacheline_aligned_in_smp;
-
- /**
- * @swap_lock: Held while swapping word <-> cleared
- */
- spinlock_t swap_lock;
} ____cacheline_aligned_in_smp;
/**
@@ -16,13 +16,9 @@ static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
{
struct sbitmap_word *map = &sb->map[index];
unsigned long mask, val;
- bool ret = false;
- unsigned long flags;
- spin_lock_irqsave(&map->swap_lock, flags);
-
- if (!map->cleared)
- goto out_unlock;
+ if (!READ_ONCE(map->cleared))
+ return false;
/*
* First get a stable cleared mask, setting the old mask to 0.
@@ -36,10 +32,7 @@ static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
val = map->word;
} while (cmpxchg(&map->word, val, val & ~mask) != val);
- ret = true;
-out_unlock:
- spin_unlock_irqrestore(&map->swap_lock, flags);
- return ret;
+ return true;
}
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
@@ -81,7 +74,6 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
for (i = 0; i < sb->map_nr; i++) {
sb->map[i].depth = min(depth, bits_per_word);
depth -= sb->map[i].depth;
- spin_lock_init(&sb->map[i].swap_lock);
}
return 0;
}
map->swap_lock serialises concurrent calls to sbitmap_deferred_clear(), however that function is already works in atomic fashion and guarantees to not loose bits while applying map->cleared bitmask. Remove spinlocking in sbitmap_deferred_clear(). For a one-threaded tag allocation heavy test on top of nullblk it yields ~1.0-1.5% t-put increase, and according to perf 3% -> 1.5% cycle reduction of sbitmap_get(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- include/linux/sbitmap.h | 5 ----- lib/sbitmap.c | 14 +++----------- 2 files changed, 3 insertions(+), 16 deletions(-)