diff mbox series

[v2,2/8] mm: swap: remove unneeded VM_BUG_ON(*map != SWAP_HAS_CACHE) in swap_entry_range_free()

Message ID 20250318150614.6415-3-shikemeng@huaweicloud.com (mailing list archive)
State New
Headers show
Series Minor cleanups and improvements to swap freeing code | expand

Commit Message

Kemeng Shi March 18, 2025, 3:06 p.m. UTC
As all callers of swap_entry_range_free() have already ensured slots to
be freed are marked as SWAP_HAS_CACHE while holding the cluster lock,
the BUG_ON check can be safely removed. After this, the function
swap_entry_range_free() could drop any kind of last flag, rename it to
swap_entries_free() and update it's comment accordingly.

This is a preparation to use swap_entries_free() to drop last ref count
and SWAP_MAP_SHMEM flag.

Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
---
 mm/swapfile.c | 27 +++++++++++++--------------
 1 file changed, 13 insertions(+), 14 deletions(-)

Comments

Kairui Song March 19, 2025, 5:18 a.m. UTC | #1
On Tue, Mar 18, 2025 at 2:10 PM Kemeng Shi <shikemeng@huaweicloud.com> wrote:
>
> As all callers of swap_entry_range_free() have already ensured slots to
> be freed are marked as SWAP_HAS_CACHE while holding the cluster lock,
> the BUG_ON check can be safely removed. After this, the function
> swap_entry_range_free() could drop any kind of last flag, rename it to
> swap_entries_free() and update it's comment accordingly.
>
> This is a preparation to use swap_entries_free() to drop last ref count
> and SWAP_MAP_SHMEM flag.
>
> Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
> Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
> ---
>  mm/swapfile.c | 27 +++++++++++++--------------
>  1 file changed, 13 insertions(+), 14 deletions(-)
>
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index 5a775456e26c..0aa7ce82c013 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
> @@ -52,9 +52,9 @@
>  static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
>                                  unsigned char);
>  static void free_swap_count_continuations(struct swap_info_struct *);
> -static void swap_entry_range_free(struct swap_info_struct *si,
> -                                 struct swap_cluster_info *ci,
> -                                 swp_entry_t entry, unsigned int nr_pages);
> +static void swap_entries_free(struct swap_info_struct *si,
> +                             struct swap_cluster_info *ci,
> +                             swp_entry_t entry, unsigned int nr_pages);
>  static void swap_range_alloc(struct swap_info_struct *si,
>                              unsigned int nr_entries);
>  static bool folio_swapcache_freeable(struct folio *folio);
> @@ -1463,7 +1463,7 @@ static unsigned char swap_entry_put(struct swap_info_struct *si,
>         ci = lock_cluster(si, offset);
>         usage = swap_entry_put_locked(si, offset, 1);
>         if (!usage)
> -               swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
> +               swap_entries_free(si, ci, swp_entry(si->type, offset), 1);
>         unlock_cluster(ci);
>
>         return usage;
> @@ -1493,7 +1493,7 @@ static bool swap_entries_put_nr(struct swap_info_struct *si,
>         for (i = 0; i < nr; i++)
>                 WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE);
>         if (!has_cache)
> -               swap_entry_range_free(si, ci, entry, nr);
> +               swap_entries_free(si, ci, entry, nr);
>         unlock_cluster(ci);
>
>         return has_cache;
> @@ -1512,12 +1512,12 @@ static bool swap_entries_put_nr(struct swap_info_struct *si,
>  }
>
>  /*
> - * Drop the last HAS_CACHE flag of swap entries, caller have to
> - * ensure all entries belong to the same cgroup.
> + * Drop the last flag(1, SWAP_HAS_CACHE or SWAP_MAP_SHMEM) of swap entries,
> + * caller have to ensure all entries belong to the same cgroup and cluster.
>   */
> -static void swap_entry_range_free(struct swap_info_struct *si,
> -                                 struct swap_cluster_info *ci,
> -                                 swp_entry_t entry, unsigned int nr_pages)
> +static void swap_entries_free(struct swap_info_struct *si,
> +                             struct swap_cluster_info *ci,
> +                             swp_entry_t entry, unsigned int nr_pages)
>  {
>         unsigned long offset = swp_offset(entry);
>         unsigned char *map = si->swap_map + offset;
> @@ -1530,7 +1530,6 @@ static void swap_entry_range_free(struct swap_info_struct *si,
>
>         ci->count -= nr_pages;
>         do {
> -               VM_BUG_ON(*map != SWAP_HAS_CACHE);

Hi Kemeng

Instead of just dropping this check, maybe it's better to change it to
something like VM_BUG_ON(!*map); to catch potential SWAP double free?
I've found this check very helpful for debugging, especially for
catching concurrency problems.

Or more strictly: VM_BUG_ON(*map !=  SWAP_HAS_CACHE && *map != 1 &&
*map != SWAP_MAP_SHMEM);, you may introduce a helper to check if a
entry is the "last map" like this and use it somewhere else too.


>                 *map = 0;
>         } while (++map < map_end);
>
> @@ -1553,7 +1552,7 @@ static void cluster_swap_free_nr(struct swap_info_struct *si,
>         ci = lock_cluster(si, offset);
>         do {
>                 if (!swap_entry_put_locked(si, offset, usage))
> -                       swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
> +                       swap_entries_free(si, ci, swp_entry(si->type, offset), 1);
>         } while (++offset < end);
>         unlock_cluster(ci);
>  }
> @@ -1596,11 +1595,11 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry)
>
>         ci = lock_cluster(si, offset);
>         if (swap_only_has_cache(si, offset, size))
> -               swap_entry_range_free(si, ci, entry, size);
> +               swap_entries_free(si, ci, entry, size);
>         else {
>                 for (int i = 0; i < size; i++, entry.val++) {
>                         if (!swap_entry_put_locked(si, offset + i, SWAP_HAS_CACHE))
> -                               swap_entry_range_free(si, ci, entry, 1);
> +                               swap_entries_free(si, ci, entry, 1);
>                 }
>         }
>         unlock_cluster(ci);
> --
> 2.30.0
>
Kemeng Shi March 20, 2025, 1:10 a.m. UTC | #2
on 3/19/2025 1:18 PM, Kairui Song wrote:
> On Tue, Mar 18, 2025 at 2:10 PM Kemeng Shi <shikemeng@huaweicloud.com> wrote:
>>
>> As all callers of swap_entry_range_free() have already ensured slots to
>> be freed are marked as SWAP_HAS_CACHE while holding the cluster lock,
>> the BUG_ON check can be safely removed. After this, the function
>> swap_entry_range_free() could drop any kind of last flag, rename it to
>> swap_entries_free() and update it's comment accordingly.
>>
>> This is a preparation to use swap_entries_free() to drop last ref count
>> and SWAP_MAP_SHMEM flag.
>>
>> Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
>> Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
>> ---
>>  mm/swapfile.c | 27 +++++++++++++--------------
>>  1 file changed, 13 insertions(+), 14 deletions(-)
>>
>> diff --git a/mm/swapfile.c b/mm/swapfile.c
>> index 5a775456e26c..0aa7ce82c013 100644
>> --- a/mm/swapfile.c
>> +++ b/mm/swapfile.c
>> @@ -52,9 +52,9 @@
>>  static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
>>                                  unsigned char);
>>  static void free_swap_count_continuations(struct swap_info_struct *);
>> -static void swap_entry_range_free(struct swap_info_struct *si,
>> -                                 struct swap_cluster_info *ci,
>> -                                 swp_entry_t entry, unsigned int nr_pages);
>> +static void swap_entries_free(struct swap_info_struct *si,
>> +                             struct swap_cluster_info *ci,
>> +                             swp_entry_t entry, unsigned int nr_pages);
>>  static void swap_range_alloc(struct swap_info_struct *si,
>>                              unsigned int nr_entries);
>>  static bool folio_swapcache_freeable(struct folio *folio);
>> @@ -1463,7 +1463,7 @@ static unsigned char swap_entry_put(struct swap_info_struct *si,
>>         ci = lock_cluster(si, offset);
>>         usage = swap_entry_put_locked(si, offset, 1);
>>         if (!usage)
>> -               swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
>> +               swap_entries_free(si, ci, swp_entry(si->type, offset), 1);
>>         unlock_cluster(ci);
>>
>>         return usage;
>> @@ -1493,7 +1493,7 @@ static bool swap_entries_put_nr(struct swap_info_struct *si,
>>         for (i = 0; i < nr; i++)
>>                 WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE);
>>         if (!has_cache)
>> -               swap_entry_range_free(si, ci, entry, nr);
>> +               swap_entries_free(si, ci, entry, nr);
>>         unlock_cluster(ci);
>>
>>         return has_cache;
>> @@ -1512,12 +1512,12 @@ static bool swap_entries_put_nr(struct swap_info_struct *si,
>>  }
>>
>>  /*
>> - * Drop the last HAS_CACHE flag of swap entries, caller have to
>> - * ensure all entries belong to the same cgroup.
>> + * Drop the last flag(1, SWAP_HAS_CACHE or SWAP_MAP_SHMEM) of swap entries,
>> + * caller have to ensure all entries belong to the same cgroup and cluster.
>>   */
>> -static void swap_entry_range_free(struct swap_info_struct *si,
>> -                                 struct swap_cluster_info *ci,
>> -                                 swp_entry_t entry, unsigned int nr_pages)
>> +static void swap_entries_free(struct swap_info_struct *si,
>> +                             struct swap_cluster_info *ci,
>> +                             swp_entry_t entry, unsigned int nr_pages)
>>  {
>>         unsigned long offset = swp_offset(entry);
>>         unsigned char *map = si->swap_map + offset;
>> @@ -1530,7 +1530,6 @@ static void swap_entry_range_free(struct swap_info_struct *si,
>>
>>         ci->count -= nr_pages;
>>         do {
>> -               VM_BUG_ON(*map != SWAP_HAS_CACHE);
> 
> Hi Kemeng
> 
> Instead of just dropping this check, maybe it's better to change it to
> something like VM_BUG_ON(!*map); to catch potential SWAP double free?
> I've found this check very helpful for debugging, especially for
> catching concurrency problems.
Sure, will keep VM_BUG_ON as it's useful.
> 
> Or more strictly: VM_BUG_ON(*map !=  SWAP_HAS_CACHE && *map != 1 &&
> *map != SWAP_MAP_SHMEM);, you may introduce a helper to check if a
> entry is the "last map" like this and use it somewhere else too.
I'd add VM_BUG_ON in this way to catch unexpected freeing.

Thanks,
Kemeng
> 
> 
>>                 *map = 0;
>>         } while (++map < map_end);
>>
>> @@ -1553,7 +1552,7 @@ static void cluster_swap_free_nr(struct swap_info_struct *si,
>>         ci = lock_cluster(si, offset);
>>         do {
>>                 if (!swap_entry_put_locked(si, offset, usage))
>> -                       swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
>> +                       swap_entries_free(si, ci, swp_entry(si->type, offset), 1);
>>         } while (++offset < end);
>>         unlock_cluster(ci);
>>  }
>> @@ -1596,11 +1595,11 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry)
>>
>>         ci = lock_cluster(si, offset);
>>         if (swap_only_has_cache(si, offset, size))
>> -               swap_entry_range_free(si, ci, entry, size);
>> +               swap_entries_free(si, ci, entry, size);
>>         else {
>>                 for (int i = 0; i < size; i++, entry.val++) {
>>                         if (!swap_entry_put_locked(si, offset + i, SWAP_HAS_CACHE))
>> -                               swap_entry_range_free(si, ci, entry, 1);
>> +                               swap_entries_free(si, ci, entry, 1);
>>                 }
>>         }
>>         unlock_cluster(ci);
>> --
>> 2.30.0
>>
>
diff mbox series

Patch

diff --git a/mm/swapfile.c b/mm/swapfile.c
index 5a775456e26c..0aa7ce82c013 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -52,9 +52,9 @@ 
 static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
 				 unsigned char);
 static void free_swap_count_continuations(struct swap_info_struct *);
-static void swap_entry_range_free(struct swap_info_struct *si,
-				  struct swap_cluster_info *ci,
-				  swp_entry_t entry, unsigned int nr_pages);
+static void swap_entries_free(struct swap_info_struct *si,
+			      struct swap_cluster_info *ci,
+			      swp_entry_t entry, unsigned int nr_pages);
 static void swap_range_alloc(struct swap_info_struct *si,
 			     unsigned int nr_entries);
 static bool folio_swapcache_freeable(struct folio *folio);
@@ -1463,7 +1463,7 @@  static unsigned char swap_entry_put(struct swap_info_struct *si,
 	ci = lock_cluster(si, offset);
 	usage = swap_entry_put_locked(si, offset, 1);
 	if (!usage)
-		swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
+		swap_entries_free(si, ci, swp_entry(si->type, offset), 1);
 	unlock_cluster(ci);
 
 	return usage;
@@ -1493,7 +1493,7 @@  static bool swap_entries_put_nr(struct swap_info_struct *si,
 	for (i = 0; i < nr; i++)
 		WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE);
 	if (!has_cache)
-		swap_entry_range_free(si, ci, entry, nr);
+		swap_entries_free(si, ci, entry, nr);
 	unlock_cluster(ci);
 
 	return has_cache;
@@ -1512,12 +1512,12 @@  static bool swap_entries_put_nr(struct swap_info_struct *si,
 }
 
 /*
- * Drop the last HAS_CACHE flag of swap entries, caller have to
- * ensure all entries belong to the same cgroup.
+ * Drop the last flag(1, SWAP_HAS_CACHE or SWAP_MAP_SHMEM) of swap entries,
+ * caller have to ensure all entries belong to the same cgroup and cluster.
  */
-static void swap_entry_range_free(struct swap_info_struct *si,
-				  struct swap_cluster_info *ci,
-				  swp_entry_t entry, unsigned int nr_pages)
+static void swap_entries_free(struct swap_info_struct *si,
+			      struct swap_cluster_info *ci,
+			      swp_entry_t entry, unsigned int nr_pages)
 {
 	unsigned long offset = swp_offset(entry);
 	unsigned char *map = si->swap_map + offset;
@@ -1530,7 +1530,6 @@  static void swap_entry_range_free(struct swap_info_struct *si,
 
 	ci->count -= nr_pages;
 	do {
-		VM_BUG_ON(*map != SWAP_HAS_CACHE);
 		*map = 0;
 	} while (++map < map_end);
 
@@ -1553,7 +1552,7 @@  static void cluster_swap_free_nr(struct swap_info_struct *si,
 	ci = lock_cluster(si, offset);
 	do {
 		if (!swap_entry_put_locked(si, offset, usage))
-			swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
+			swap_entries_free(si, ci, swp_entry(si->type, offset), 1);
 	} while (++offset < end);
 	unlock_cluster(ci);
 }
@@ -1596,11 +1595,11 @@  void put_swap_folio(struct folio *folio, swp_entry_t entry)
 
 	ci = lock_cluster(si, offset);
 	if (swap_only_has_cache(si, offset, size))
-		swap_entry_range_free(si, ci, entry, size);
+		swap_entries_free(si, ci, entry, size);
 	else {
 		for (int i = 0; i < size; i++, entry.val++) {
 			if (!swap_entry_put_locked(si, offset + i, SWAP_HAS_CACHE))
-				swap_entry_range_free(si, ci, entry, 1);
+				swap_entries_free(si, ci, entry, 1);
 		}
 	}
 	unlock_cluster(ci);