diff mbox series

[v5] sbitmap: fix io hung due to race on sbitmap_word::cleared

Message ID 20240703022807.642115-1-yang.yang@vivo.com (mailing list archive)
State New, archived
Headers show
Series [v5] sbitmap: fix io hung due to race on sbitmap_word::cleared | expand

Commit Message

YangYang July 3, 2024, 2:28 a.m. UTC
Configuration for sbq:
  depth=64, wake_batch=6, shift=6, map_nr=1

1. There are 64 requests in progress:
  map->word = 0xFFFFFFFFFFFFFFFF
2. After all the 64 requests complete, and no more requests come:
  map->word = 0xFFFFFFFFFFFFFFFF, map->cleared = 0xFFFFFFFFFFFFFFFF
3. Now two tasks try to allocate requests:
  T1:                                       T2:
  __blk_mq_get_tag                          .
  __sbitmap_queue_get                       .
  sbitmap_get                               .
  sbitmap_find_bit                          .
  sbitmap_find_bit_in_word                  .
  __sbitmap_get_word  -> nr=-1              __blk_mq_get_tag
  sbitmap_deferred_clear                    __sbitmap_queue_get
  /* map->cleared=0xFFFFFFFFFFFFFFFF */     sbitmap_find_bit
    if (!READ_ONCE(map->cleared))           sbitmap_find_bit_in_word
      return false;                         __sbitmap_get_word -> nr=-1
    mask = xchg(&map->cleared, 0)           sbitmap_deferred_clear
    atomic_long_andnot()                    /* map->cleared=0 */
                                              if (!(map->cleared))
                                                return false;
                                     /*
                                      * map->cleared is cleared by T1
                                      * T2 fail to acquire the tag
                                      */

4. T2 is the sole tag waiter. When T1 puts the tag, T2 cannot be woken
up due to the wake_batch being set at 6. If no more requests come, T1
will wait here indefinitely.

This patch achieves two purposes:
1. Check on ->cleared and update on both ->cleared and ->word need to
be done atomically, and using spinlock could be the simplest solution.
So revert commit 661d4f55a794 ("sbitmap: remove swap_lock"), which
may cause potential race.

2. Add extra check in sbitmap_deferred_clear(), to identify whether
->word has free bits.

Fixes: 661d4f55a794 ("sbitmap: remove swap_lock")
Signed-off-by: Yang Yang <yang.yang@vivo.com>

---
Changes from v4:
  - Add some comments according to suggestion
Changes from v3:
  - Add more arguments to sbitmap_deferred_clear(), for those who
    don't care about the return value, just pass 0
  - Consider the situation when using sbitmap_get_shallow()
  - Consider the situation when ->round_robin is true
  - Modify commit message
Changes from v2:
  - Modify commit message by suggestion
  - Add extra check in sbitmap_deferred_clear() by suggestion
Changes from v1:
  - simply revert commit 661d4f55a794 ("sbitmap: remove swap_lock")
---
 include/linux/sbitmap.h |  5 +++++
 lib/sbitmap.c           | 46 ++++++++++++++++++++++++++++++++++-------
 2 files changed, 43 insertions(+), 8 deletions(-)

Comments

Ming Lei July 3, 2024, 3:30 a.m. UTC | #1
On Wed, Jul 3, 2024 at 10:28 AM Yang Yang <yang.yang@vivo.com> wrote:
>
> Configuration for sbq:
>   depth=64, wake_batch=6, shift=6, map_nr=1
>
> 1. There are 64 requests in progress:
>   map->word = 0xFFFFFFFFFFFFFFFF
> 2. After all the 64 requests complete, and no more requests come:
>   map->word = 0xFFFFFFFFFFFFFFFF, map->cleared = 0xFFFFFFFFFFFFFFFF
> 3. Now two tasks try to allocate requests:
>   T1:                                       T2:
>   __blk_mq_get_tag                          .
>   __sbitmap_queue_get                       .
>   sbitmap_get                               .
>   sbitmap_find_bit                          .
>   sbitmap_find_bit_in_word                  .
>   __sbitmap_get_word  -> nr=-1              __blk_mq_get_tag
>   sbitmap_deferred_clear                    __sbitmap_queue_get
>   /* map->cleared=0xFFFFFFFFFFFFFFFF */     sbitmap_find_bit
>     if (!READ_ONCE(map->cleared))           sbitmap_find_bit_in_word
>       return false;                         __sbitmap_get_word -> nr=-1
>     mask = xchg(&map->cleared, 0)           sbitmap_deferred_clear
>     atomic_long_andnot()                    /* map->cleared=0 */
>                                               if (!(map->cleared))
>                                                 return false;
>                                      /*
>                                       * map->cleared is cleared by T1
>                                       * T2 fail to acquire the tag
>                                       */
>
> 4. T2 is the sole tag waiter. When T1 puts the tag, T2 cannot be woken
> up due to the wake_batch being set at 6. If no more requests come, T1
> will wait here indefinitely.
>
> This patch achieves two purposes:
> 1. Check on ->cleared and update on both ->cleared and ->word need to
> be done atomically, and using spinlock could be the simplest solution.
> So revert commit 661d4f55a794 ("sbitmap: remove swap_lock"), which
> may cause potential race.
>
> 2. Add extra check in sbitmap_deferred_clear(), to identify whether
> ->word has free bits.
>
> Fixes: 661d4f55a794 ("sbitmap: remove swap_lock")
> Signed-off-by: Yang Yang <yang.yang@vivo.com>

Reviewed-by: Ming Lei <ming.lei@redhat.com>
Pavel Begunkov July 3, 2024, 11:55 a.m. UTC | #2
On 7/3/24 03:28, Yang Yang wrote:
> Configuration for sbq:
>    depth=64, wake_batch=6, shift=6, map_nr=1
> 
> 1. There are 64 requests in progress:
>    map->word = 0xFFFFFFFFFFFFFFFF
> 2. After all the 64 requests complete, and no more requests come:
>    map->word = 0xFFFFFFFFFFFFFFFF, map->cleared = 0xFFFFFFFFFFFFFFFF
> 3. Now two tasks try to allocate requests:
>    T1:                                       T2:
>    __blk_mq_get_tag                          .
>    __sbitmap_queue_get                       .
>    sbitmap_get                               .
>    sbitmap_find_bit                          .
>    sbitmap_find_bit_in_word                  .
>    __sbitmap_get_word  -> nr=-1              __blk_mq_get_tag
>    sbitmap_deferred_clear                    __sbitmap_queue_get
>    /* map->cleared=0xFFFFFFFFFFFFFFFF */     sbitmap_find_bit
>      if (!READ_ONCE(map->cleared))           sbitmap_find_bit_in_word
>        return false;                         __sbitmap_get_word -> nr=-1
>      mask = xchg(&map->cleared, 0)           sbitmap_deferred_clear
>      atomic_long_andnot()                    /* map->cleared=0 */
>                                                if (!(map->cleared))
>                                                  return false;
>                                       /*
>                                        * map->cleared is cleared by T1
>                                        * T2 fail to acquire the tag
>                                        */
> 
> 4. T2 is the sole tag waiter. When T1 puts the tag, T2 cannot be woken
> up due to the wake_batch being set at 6. If no more requests come, T1
> will wait here indefinitely.
> 
> This patch achieves two purposes:
> 1. Check on ->cleared and update on both ->cleared and ->word need to
> be done atomically, and using spinlock could be the simplest solution.
> So revert commit 661d4f55a794 ("sbitmap: remove swap_lock"), which
> may cause potential race.
> 
> 2. Add extra check in sbitmap_deferred_clear(), to identify whether
> ->word has free bits.
> 
> Fixes: 661d4f55a794 ("sbitmap: remove swap_lock")

Is it blamed right? Considering that the revert alone doesn't fix
the problem, it sounds like the 2nd step might need to be ported
to kernels even without the blamed commit.
YangYang July 3, 2024, 12:28 p.m. UTC | #3
On 2024/7/3 19:55, Pavel Begunkov wrote:
> On 7/3/24 03:28, Yang Yang wrote:
>> Configuration for sbq:
>>    depth=64, wake_batch=6, shift=6, map_nr=1
>>
>> 1. There are 64 requests in progress:
>>    map->word = 0xFFFFFFFFFFFFFFFF
>> 2. After all the 64 requests complete, and no more requests come:
>>    map->word = 0xFFFFFFFFFFFFFFFF, map->cleared = 0xFFFFFFFFFFFFFFFF
>> 3. Now two tasks try to allocate requests:
>>    T1:                                       T2:
>>    __blk_mq_get_tag                          .
>>    __sbitmap_queue_get                       .
>>    sbitmap_get                               .
>>    sbitmap_find_bit                          .
>>    sbitmap_find_bit_in_word                  .
>>    __sbitmap_get_word  -> nr=-1              __blk_mq_get_tag
>>    sbitmap_deferred_clear                    __sbitmap_queue_get
>>    /* map->cleared=0xFFFFFFFFFFFFFFFF */     sbitmap_find_bit
>>      if (!READ_ONCE(map->cleared))           sbitmap_find_bit_in_word
>>        return false;                         __sbitmap_get_word -> nr=-1
>>      mask = xchg(&map->cleared, 0)           sbitmap_deferred_clear
>>      atomic_long_andnot()                    /* map->cleared=0 */
>>                                                if (!(map->cleared))
>>                                                  return false;
>>                                       /*
>>                                        * map->cleared is cleared by T1
>>                                        * T2 fail to acquire the tag
>>                                        */
>>
>> 4. T2 is the sole tag waiter. When T1 puts the tag, T2 cannot be woken
>> up due to the wake_batch being set at 6. If no more requests come, T1
>> will wait here indefinitely.
>>
>> This patch achieves two purposes:
>> 1. Check on ->cleared and update on both ->cleared and ->word need to
>> be done atomically, and using spinlock could be the simplest solution.
>> So revert commit 661d4f55a794 ("sbitmap: remove swap_lock"), which
>> may cause potential race.
>>
>> 2. Add extra check in sbitmap_deferred_clear(), to identify whether
>> ->word has free bits.
>>
>> Fixes: 661d4f55a794 ("sbitmap: remove swap_lock")
> 
> Is it blamed right? Considering that the revert alone doesn't fix
> the problem, it sounds like the 2nd step might need to be ported
> to kernels even without the blamed commit.
> 

Got it. How would you feel about removing
"Fixes: 661d4f55a794 ("sbitmap: remove swap_lock")" and modifying
the commit message as follows?

This patch achieves two purposes:
1. Check on ->cleared and update on both ->cleared and ->word need to
be done atomically, and using spinlock could be the simplest solution.
2. Add extra check in sbitmap_deferred_clear(), to identify whether
->word has free bits.

Thanks.
Pavel Begunkov July 5, 2024, 10:58 a.m. UTC | #4
On 7/3/24 13:28, YangYang wrote:
> On 2024/7/3 19:55, Pavel Begunkov wrote:
>> On 7/3/24 03:28, Yang Yang wrote:
>>> Configuration for sbq:
>>>    depth=64, wake_batch=6, shift=6, map_nr=1
>>>
>>> 1. There are 64 requests in progress:
>>>    map->word = 0xFFFFFFFFFFFFFFFF
>>> 2. After all the 64 requests complete, and no more requests come:
>>>    map->word = 0xFFFFFFFFFFFFFFFF, map->cleared = 0xFFFFFFFFFFFFFFFF
>>> 3. Now two tasks try to allocate requests:
>>>    T1:                                       T2:
>>>    __blk_mq_get_tag                          .
>>>    __sbitmap_queue_get                       .
>>>    sbitmap_get                               .
>>>    sbitmap_find_bit                          .
>>>    sbitmap_find_bit_in_word                  .
>>>    __sbitmap_get_word  -> nr=-1              __blk_mq_get_tag
>>>    sbitmap_deferred_clear                    __sbitmap_queue_get
>>>    /* map->cleared=0xFFFFFFFFFFFFFFFF */     sbitmap_find_bit
>>>      if (!READ_ONCE(map->cleared))           sbitmap_find_bit_in_word
>>>        return false;                         __sbitmap_get_word -> nr=-1
>>>      mask = xchg(&map->cleared, 0)           sbitmap_deferred_clear
>>>      atomic_long_andnot()                    /* map->cleared=0 */
>>>                                                if (!(map->cleared))
>>>                                                  return false;
>>>                                       /*
>>>                                        * map->cleared is cleared by T1
>>>                                        * T2 fail to acquire the tag
>>>                                        */
>>>
>>> 4. T2 is the sole tag waiter. When T1 puts the tag, T2 cannot be woken
>>> up due to the wake_batch being set at 6. If no more requests come, T1
>>> will wait here indefinitely.
>>>
>>> This patch achieves two purposes:
>>> 1. Check on ->cleared and update on both ->cleared and ->word need to
>>> be done atomically, and using spinlock could be the simplest solution.
>>> So revert commit 661d4f55a794 ("sbitmap: remove swap_lock"), which
>>> may cause potential race.
>>>
>>> 2. Add extra check in sbitmap_deferred_clear(), to identify whether
>>> ->word has free bits.
>>>
>>> Fixes: 661d4f55a794 ("sbitmap: remove swap_lock")
>>
>> Is it blamed right? Considering that the revert alone doesn't fix
>> the problem, it sounds like the 2nd step might need to be ported
>> to kernels even without the blamed commit.
>>
> 
> Got it. How would you feel about removing
> "Fixes: 661d4f55a794 ("sbitmap: remove swap_lock")" and modifying
> the commit message as follows?

We need a fixes tag, which commit caused it? Can it happen prior
to ea86ea2cdced ("sbitmap: ammortize cost of clearing bits")?


> This patch achieves two purposes:
> 1. Check on ->cleared and update on both ->cleared and ->word need to
> be done atomically, and using spinlock could be the simplest solution.
> 2. Add extra check in sbitmap_deferred_clear(), to identify whether
> ->word has free bits.
> 
> Thanks.
YangYang July 8, 2024, 7:54 a.m. UTC | #5
On 2024/7/5 18:58, Pavel Begunkov wrote:
> On 7/3/24 13:28, YangYang wrote:
>> On 2024/7/3 19:55, Pavel Begunkov wrote:
>>> On 7/3/24 03:28, Yang Yang wrote:
>>>> Configuration for sbq:
>>>>    depth=64, wake_batch=6, shift=6, map_nr=1
>>>>
>>>> 1. There are 64 requests in progress:
>>>>    map->word = 0xFFFFFFFFFFFFFFFF
>>>> 2. After all the 64 requests complete, and no more requests come:
>>>>    map->word = 0xFFFFFFFFFFFFFFFF, map->cleared = 0xFFFFFFFFFFFFFFFF
>>>> 3. Now two tasks try to allocate requests:
>>>>    T1:                                       T2:
>>>>    __blk_mq_get_tag                          .
>>>>    __sbitmap_queue_get                       .
>>>>    sbitmap_get                               .
>>>>    sbitmap_find_bit                          .
>>>>    sbitmap_find_bit_in_word                  .
>>>>    __sbitmap_get_word  -> nr=-1              __blk_mq_get_tag
>>>>    sbitmap_deferred_clear                    __sbitmap_queue_get
>>>>    /* map->cleared=0xFFFFFFFFFFFFFFFF */     sbitmap_find_bit
>>>>      if (!READ_ONCE(map->cleared))           sbitmap_find_bit_in_word
>>>>        return false;                         __sbitmap_get_word -> nr=-1
>>>>      mask = xchg(&map->cleared, 0)           sbitmap_deferred_clear
>>>>      atomic_long_andnot()                    /* map->cleared=0 */
>>>>                                                if (!(map->cleared))
>>>>                                                  return false;
>>>>                                       /*
>>>>                                        * map->cleared is cleared by T1
>>>>                                        * T2 fail to acquire the tag
>>>>                                        */
>>>>
>>>> 4. T2 is the sole tag waiter. When T1 puts the tag, T2 cannot be woken
>>>> up due to the wake_batch being set at 6. If no more requests come, T1
>>>> will wait here indefinitely.
>>>>
>>>> This patch achieves two purposes:
>>>> 1. Check on ->cleared and update on both ->cleared and ->word need to
>>>> be done atomically, and using spinlock could be the simplest solution.
>>>> So revert commit 661d4f55a794 ("sbitmap: remove swap_lock"), which
>>>> may cause potential race.
>>>>
>>>> 2. Add extra check in sbitmap_deferred_clear(), to identify whether
>>>> ->word has free bits.
>>>>
>>>> Fixes: 661d4f55a794 ("sbitmap: remove swap_lock")
>>>
>>> Is it blamed right? Considering that the revert alone doesn't fix
>>> the problem, it sounds like the 2nd step might need to be ported
>>> to kernels even without the blamed commit.
>>>
>>
>> Got it. How would you feel about removing
>> "Fixes: 661d4f55a794 ("sbitmap: remove swap_lock")" and modifying
>> the commit message as follows?
> 
> We need a fixes tag, which commit caused it? Can it happen prior
> to ea86ea2cdced ("sbitmap: ammortize cost of clearing bits")?

The initial introduction of the sbitmap_deferred_clear function also
introduced a potential race condition.
So the fixes tag should be:
Fixes: ea86ea2cdced ("sbitmap: ammortize cost of clearing bits")

Thanks.

> 
>> This patch achieves two purposes:
>> 1. Check on ->cleared and update on both ->cleared and ->word need to
>> be done atomically, and using spinlock could be the simplest solution.
>> 2. Add extra check in sbitmap_deferred_clear(), to identify whether
>> ->word has free bits.
>>
>> Thanks.
>
diff mbox series

Patch

diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index d662cf136021..ec0b0e73c906 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -36,6 +36,11 @@  struct sbitmap_word {
 	 * @cleared: word holding cleared bits
 	 */
 	unsigned long cleared ____cacheline_aligned_in_smp;
+
+	/**
+	 * @swap_lock: Held while swapping word <-> cleared
+	 */
+	spinlock_t swap_lock;
 } ____cacheline_aligned_in_smp;
 
 /**
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 1e453f825c05..22d6e86ba87f 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -60,12 +60,35 @@  static inline void update_alloc_hint_after_get(struct sbitmap *sb,
 /*
  * See if we have deferred clears that we can batch move
  */
-static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
+static inline bool sbitmap_deferred_clear(struct sbitmap_word *map,
+		unsigned int depth, unsigned int alloc_hint, bool wrap)
 {
-	unsigned long mask;
+	unsigned long mask, flags, word_mask;
+	bool ret = false;
 
-	if (!READ_ONCE(map->cleared))
-		return false;
+	spin_lock_irqsave(&map->swap_lock, flags);
+
+	if (!map->cleared) {
+		if (depth > 0) {
+			word_mask = (~0UL) >> (BITS_PER_LONG - depth);
+			/*
+			 * The current behavior is to always retry after moving
+			 * ->cleared to word, and we change it to retry in case
+			 * of any free bits. To avoid dead loop, we need to take
+			 * wrap & alloc_hint into account. Without this, a soft
+			 * lockup was detected in our test environment.
+			 */
+			if (!wrap && alloc_hint)
+				word_mask &= ~((1UL << alloc_hint) - 1);
+
+			if ((READ_ONCE(map->word) & word_mask) == word_mask)
+				ret = false;
+			else
+				ret = true;
+		}
+
+		goto out_unlock;
+	}
 
 	/*
 	 * First get a stable cleared mask, setting the old mask to 0.
@@ -77,7 +100,10 @@  static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
 	 */
 	atomic_long_andnot(mask, (atomic_long_t *)&map->word);
 	BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word));
-	return true;
+	ret = true;
+out_unlock:
+	spin_unlock_irqrestore(&map->swap_lock, flags);
+	return ret;
 }
 
 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
@@ -85,6 +111,7 @@  int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
 		      bool alloc_hint)
 {
 	unsigned int bits_per_word;
+	int i;
 
 	if (shift < 0)
 		shift = sbitmap_calculate_shift(depth);
@@ -116,6 +143,9 @@  int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
 		return -ENOMEM;
 	}
 
+	for (i = 0; i < sb->map_nr; i++)
+		spin_lock_init(&sb->map[i].swap_lock);
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(sbitmap_init_node);
@@ -126,7 +156,7 @@  void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
 	unsigned int i;
 
 	for (i = 0; i < sb->map_nr; i++)
-		sbitmap_deferred_clear(&sb->map[i]);
+		sbitmap_deferred_clear(&sb->map[i], 0, 0, 0);
 
 	sb->depth = depth;
 	sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
@@ -179,7 +209,7 @@  static int sbitmap_find_bit_in_word(struct sbitmap_word *map,
 					alloc_hint, wrap);
 		if (nr != -1)
 			break;
-		if (!sbitmap_deferred_clear(map))
+		if (!sbitmap_deferred_clear(map, depth, alloc_hint, wrap))
 			break;
 	} while (1);
 
@@ -496,7 +526,7 @@  unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
 		unsigned int map_depth = __map_depth(sb, index);
 		unsigned long val;
 
-		sbitmap_deferred_clear(map);
+		sbitmap_deferred_clear(map, 0, 0, 0);
 		val = READ_ONCE(map->word);
 		if (val == (1UL << (map_depth - 1)) - 1)
 			goto next;