@@ -48,7 +48,7 @@ struct folio *filemap_get_incore_folio(struct address_space *mapping,
struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr,
struct swap_iocb **plug);
-struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
+struct folio *swap_cache_alloc_or_get(swp_entry_t entry, gfp_t gfp_flags,
struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
bool skip_if_exists);
struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
@@ -385,7 +385,7 @@ struct folio *filemap_get_incore_folio(struct address_space *mapping,
return folio;
}
-struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+struct folio *swap_cache_alloc_or_get(swp_entry_t entry, gfp_t gfp_mask,
struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
bool skip_if_exists)
{
@@ -443,12 +443,12 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
goto fail_put_swap;
/*
- * Protect against a recursive call to __read_swap_cache_async()
+ * Protect against a recursive call to swap_cache_alloc_or_get()
* on the same entry waiting forever here because SWAP_HAS_CACHE
* is set but the folio is not the swap cache yet. This can
* happen today if mem_cgroup_swapin_charge_folio() below
* triggers reclaim through zswap, which may call
- * __read_swap_cache_async() in the writeback path.
+ * swap_cache_alloc_or_get() in the writeback path.
*/
if (skip_if_exists)
goto fail_put_swap;
@@ -457,7 +457,7 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* We might race against __delete_from_swap_cache(), and
* stumble across a swap_map entry whose SWAP_HAS_CACHE
* has not yet been cleared. Or race against another
- * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
+ * swap_cache_alloc_or_get(), which has set SWAP_HAS_CACHE
* in swap_map, but not yet added its folio to swap cache.
*/
schedule_timeout_uninterruptible(1);
@@ -505,7 +505,7 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* the swap entry is no longer in use.
*
* get/put_swap_device() aren't needed to call this function, because
- * __read_swap_cache_async() call them and swap_read_folio() holds the
+ * swap_cache_alloc_or_get() call them and swap_read_folio() holds the
* swap cache folio lock.
*/
struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
@@ -518,7 +518,7 @@ struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct folio *folio;
mpol = get_vma_policy(vma, addr, 0, &ilx);
- folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
+ folio = swap_cache_alloc_or_get(entry, gfp_mask, mpol, ilx,
&page_allocated, false);
mpol_cond_put(mpol);
@@ -634,7 +634,7 @@ struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
blk_start_plug(&plug);
for (offset = start_offset; offset <= end_offset ; offset++) {
/* Ok, do the async read-ahead now */
- folio = __read_swap_cache_async(
+ folio = swap_cache_alloc_or_get(
swp_entry(swp_type(entry), offset),
gfp_mask, mpol, ilx, &page_allocated, false);
if (!folio)
@@ -653,7 +653,7 @@ struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
lru_add_drain(); /* Push any new pages onto the LRU now */
skip:
/* The page was likely read above, so no need for plugging here */
- folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
+ folio = swap_cache_alloc_or_get(entry, gfp_mask, mpol, ilx,
&page_allocated, false);
if (unlikely(page_allocated)) {
zswap_folio_swapin(folio);
@@ -809,7 +809,7 @@ static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
continue;
pte_unmap(pte);
pte = NULL;
- folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
+ folio = swap_cache_alloc_or_get(entry, gfp_mask, mpol, ilx,
&page_allocated, false);
if (!folio)
continue;
@@ -829,7 +829,7 @@ static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
lru_add_drain();
skip:
/* The folio was likely read above, so no need for plugging here */
- folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
+ folio = swap_cache_alloc_or_get(targ_entry, gfp_mask, mpol, targ_ilx,
&page_allocated, false);
if (unlikely(page_allocated)) {
zswap_folio_swapin(folio);
@@ -855,7 +855,7 @@ struct folio *swapin_direct(swp_entry_t entry, gfp_t gfp_mask,
pgoff_t ilx;
mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
- folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
+ folio = swap_cache_alloc_or_get(entry, gfp_mask, mpol, ilx,
&page_allocated, false);
mpol_cond_put(mpol);
@@ -1249,7 +1249,7 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
* CPU1 CPU2
* do_swap_page()
* ... swapoff+swapon
- * __read_swap_cache_async()
+ * swap_cache_alloc_or_get()
* swapcache_prepare()
* __swap_duplicate()
* // check swap_map
@@ -1126,7 +1126,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
/* try to allocate swap cache folio */
mpol = get_task_policy(current);
- folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
+ folio = swap_cache_alloc_or_get(swpentry, GFP_KERNEL, mpol,
NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
if (!folio)
return -ENOMEM;