@@ -14,30 +14,31 @@
*/
static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
{
+ struct sbitmap_word *map = &sb->map[index];
unsigned long mask, val;
bool ret = false;
unsigned long flags;
- spin_lock_irqsave(&sb->map[index].swap_lock, flags);
+ spin_lock_irqsave(&map->swap_lock, flags);
- if (!sb->map[index].cleared)
+ if (!map->cleared)
goto out_unlock;
/*
* First get a stable cleared mask, setting the old mask to 0.
*/
- mask = xchg(&sb->map[index].cleared, 0);
+ mask = xchg(&map->cleared, 0);
/*
* Now clear the masked bits in our free word
*/
do {
- val = sb->map[index].word;
- } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val);
+ val = map->word;
+ } while (cmpxchg(&map->word, val, val & ~mask) != val);
ret = true;
out_unlock:
- spin_unlock_irqrestore(&sb->map[index].swap_lock, flags);
+ spin_unlock_irqrestore(&map->swap_lock, flags);
return ret;
}
Because of spinlocks and atomics sbitmap_deferred_clear() have to reload &sb->map[index] on each access even though the map address won't change. Hint it by explicitly caching it in a variable. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- lib/sbitmap.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-)