@@ -39,8 +39,7 @@ int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
void __delete_from_swap_cache(struct folio *folio,
swp_entry_t entry, void *shadow);
void delete_from_swap_cache(struct folio *folio);
-void clear_shadow_from_swap_cache(int type, unsigned long begin,
- unsigned long end);
+void clear_shadow_from_swap_cache(swp_entry_t entry);
struct folio *swap_cache_get_folio(swp_entry_t entry,
struct vm_area_struct *vma, unsigned long addr);
struct folio *filemap_get_incore_folio(struct address_space *mapping,
@@ -148,8 +147,7 @@ static inline void delete_from_swap_cache(struct folio *folio)
{
}
-static inline void clear_shadow_from_swap_cache(int type, unsigned long begin,
- unsigned long end)
+static inline void clear_shadow_from_swap_cache(swp_entry_t entry)
{
}
@@ -245,34 +245,17 @@ void delete_from_swap_cache(struct folio *folio)
folio_ref_sub(folio, folio_nr_pages(folio));
}
-void clear_shadow_from_swap_cache(int type, unsigned long begin,
- unsigned long end)
+void clear_shadow_from_swap_cache(swp_entry_t entry)
{
- unsigned long curr = begin;
- void *old;
-
- for (;;) {
- swp_entry_t entry = swp_entry(type, curr);
- struct address_space *address_space = swap_address_space(entry);
- XA_STATE(xas, &address_space->i_pages, curr);
-
- xas_set_update(&xas, workingset_update_node);
+ struct address_space *address_space = swap_address_space(entry);
+ XA_STATE(xas, &address_space->i_pages, swp_offset(entry));
- xa_lock_irq(&address_space->i_pages);
- xas_for_each(&xas, old, end) {
- if (!xa_is_value(old))
- continue;
- xas_store(&xas, NULL);
- }
- xa_unlock_irq(&address_space->i_pages);
+ xas_set_update(&xas, workingset_update_node);
- /* search the next swapcache until we meet end */
- curr >>= SWAP_ADDRESS_SPACE_SHIFT;
- curr++;
- curr <<= SWAP_ADDRESS_SPACE_SHIFT;
- if (curr > end)
- break;
- }
+ xa_lock_irq(&address_space->i_pages);
+ if (xa_is_value(xas_load(&xas)))
+ xas_store(&xas, NULL);
+ xa_unlock_irq(&address_space->i_pages);
}
/*
@@ -724,7 +724,6 @@ static void add_to_avail_list(struct swap_info_struct *p)
static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
unsigned int nr_entries)
{
- unsigned long begin = offset;
unsigned long end = offset + nr_entries - 1;
void (*swap_slot_free_notify)(struct block_device *, unsigned long);
@@ -748,7 +747,6 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
swap_slot_free_notify(si->bdev, offset);
offset++;
}
- clear_shadow_from_swap_cache(si->type, begin, end);
/*
* Make sure that try_to_unuse() observes si->inuse_pages reaching 0
@@ -1605,6 +1603,8 @@ bool folio_free_swap(struct folio *folio)
/*
* Free the swap entry like above, but also try to
* free the page cache entry if it is the last user.
+ * Useful when clearing the swap map and swap cache
+ * without reading swap content (eg. unmap, MADV_FREE)
*/
int free_swap_and_cache(swp_entry_t entry)
{
@@ -1626,6 +1626,8 @@ int free_swap_and_cache(swp_entry_t entry)
!swap_page_trans_huge_swapped(p, entry))
__try_to_reclaim_swap(p, swp_offset(entry),
TTRS_UNMAPPED | TTRS_FULL);
+ if (!count)
+ clear_shadow_from_swap_cache(entry);
put_swap_device(p);
}
return p != NULL;