Message ID | 20240102175338.62012-8-ryncsn@gmail.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | swapin refactor for optimization and unified readahead | expand |
Hi Kairui,
kernel test robot noticed the following build warnings:
[auto build test WARNING on akpm-mm/mm-everything]
[also build test WARNING on next-20240103]
[cannot apply to linus/master v6.7-rc8]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Kairui-Song/mm-swapfile-c-add-back-some-comment/20240103-015650
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/20240102175338.62012-8-ryncsn%40gmail.com
patch subject: [PATCH v2 7/9] mm/swap: avoid a duplicated swap cache lookup for SWP_SYNCHRONOUS_IO
config: arc-vdk_hs38_smp_defconfig (https://download.01.org/0day-ci/archive/20240103/202401032010.yrIDf885-lkp@intel.com/config)
compiler: arc-elf-gcc (GCC) 13.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240103/202401032010.yrIDf885-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202401032010.yrIDf885-lkp@intel.com/
All warnings (new ones prefixed by >>):
>> mm/swap_state.c:884: warning: Function parameter or member 'shadow' not described in 'swapin_direct'
vim +884 mm/swap_state.c
d9bfcfdc41e8e5 Huang Ying 2017-09-06 872
b16a5db0ccd159 Kairui Song 2024-01-03 873 /**
b16a5db0ccd159 Kairui Song 2024-01-03 874 * swapin_direct - swap in folios skipping swap cache and readahead
b16a5db0ccd159 Kairui Song 2024-01-03 875 * @entry: swap entry of this memory
b16a5db0ccd159 Kairui Song 2024-01-03 876 * @gfp_mask: memory allocation flags
b16a5db0ccd159 Kairui Song 2024-01-03 877 * @vmf: fault information
b16a5db0ccd159 Kairui Song 2024-01-03 878 *
b16a5db0ccd159 Kairui Song 2024-01-03 879 * Returns the struct folio for entry and addr after the swap entry is read
b16a5db0ccd159 Kairui Song 2024-01-03 880 * in.
b16a5db0ccd159 Kairui Song 2024-01-03 881 */
983c0b807f7eda Kairui Song 2024-01-03 882 static struct folio *swapin_direct(swp_entry_t entry, gfp_t gfp_mask,
cd81b9fd3de376 Kairui Song 2024-01-03 883 struct vm_fault *vmf, void *shadow)
b16a5db0ccd159 Kairui Song 2024-01-03 @884 {
b16a5db0ccd159 Kairui Song 2024-01-03 885 struct vm_area_struct *vma = vmf->vma;
b16a5db0ccd159 Kairui Song 2024-01-03 886 struct folio *folio;
b16a5db0ccd159 Kairui Song 2024-01-03 887
b16a5db0ccd159 Kairui Song 2024-01-03 888 /* skip swapcache */
b16a5db0ccd159 Kairui Song 2024-01-03 889 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
b16a5db0ccd159 Kairui Song 2024-01-03 890 vma, vmf->address, false);
b16a5db0ccd159 Kairui Song 2024-01-03 891 if (folio) {
9e22e4254bdb8c Kairui Song 2024-01-03 892 if (mem_cgroup_swapin_charge_folio(folio, NULL,
64ae20cbed3891 Kairui Song 2024-01-03 893 GFP_KERNEL, entry)) {
b16a5db0ccd159 Kairui Song 2024-01-03 894 folio_put(folio);
b16a5db0ccd159 Kairui Song 2024-01-03 895 return NULL;
b16a5db0ccd159 Kairui Song 2024-01-03 896 }
64ae20cbed3891 Kairui Song 2024-01-03 897
64ae20cbed3891 Kairui Song 2024-01-03 898 __folio_set_locked(folio);
64ae20cbed3891 Kairui Song 2024-01-03 899 __folio_set_swapbacked(folio);
64ae20cbed3891 Kairui Song 2024-01-03 900
b16a5db0ccd159 Kairui Song 2024-01-03 901 mem_cgroup_swapin_uncharge_swap(entry);
b16a5db0ccd159 Kairui Song 2024-01-03 902
b16a5db0ccd159 Kairui Song 2024-01-03 903 if (shadow)
b16a5db0ccd159 Kairui Song 2024-01-03 904 workingset_refault(folio, shadow);
b16a5db0ccd159 Kairui Song 2024-01-03 905
b16a5db0ccd159 Kairui Song 2024-01-03 906 folio_add_lru(folio);
b16a5db0ccd159 Kairui Song 2024-01-03 907
b16a5db0ccd159 Kairui Song 2024-01-03 908 /* To provide entry to swap_read_folio() */
b16a5db0ccd159 Kairui Song 2024-01-03 909 folio->swap = entry;
b16a5db0ccd159 Kairui Song 2024-01-03 910 swap_read_folio(folio, true, NULL);
b16a5db0ccd159 Kairui Song 2024-01-03 911 folio->private = NULL;
b16a5db0ccd159 Kairui Song 2024-01-03 912 }
b16a5db0ccd159 Kairui Song 2024-01-03 913
b16a5db0ccd159 Kairui Song 2024-01-03 914 return folio;
b16a5db0ccd159 Kairui Song 2024-01-03 915 }
b16a5db0ccd159 Kairui Song 2024-01-03 916
diff --git a/mm/shmem.c b/mm/shmem.c index 928aa2304932..9da9f7a0e620 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1872,7 +1872,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, } /* Look it up and read it in.. */ - folio = swap_cache_get_folio(swap, NULL, 0); + folio = swap_cache_get_folio(swap, NULL, 0, NULL); if (!folio) { /* Or update major stats only when swapin succeeds?? */ if (fault_type) { diff --git a/mm/swap.h b/mm/swap.h index 1f4cdb324bf0..9180411afcfe 100644 --- a/mm/swap.h +++ b/mm/swap.h @@ -58,7 +58,8 @@ void delete_from_swap_cache(struct folio *folio); void clear_shadow_from_swap_cache(int type, unsigned long begin, unsigned long end); struct folio *swap_cache_get_folio(swp_entry_t entry, - struct vm_area_struct *vma, unsigned long addr); + struct vm_area_struct *vma, unsigned long addr, + void **shadowp); struct folio *filemap_get_incore_folio(struct address_space *mapping, pgoff_t index); diff --git a/mm/swap_state.c b/mm/swap_state.c index f6f1e6f5d782..21badd4f0fc7 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -335,12 +335,18 @@ static inline bool swap_use_vma_readahead(void) * Caller must lock the swap device or hold a reference to keep it valid. */ struct folio *swap_cache_get_folio(swp_entry_t entry, - struct vm_area_struct *vma, unsigned long addr) + struct vm_area_struct *vma, unsigned long addr, void **shadowp) { struct folio *folio; - folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry)); - if (!IS_ERR(folio)) { + folio = filemap_get_entry(swap_address_space(entry), swp_offset(entry)); + if (xa_is_value(folio)) { + if (shadowp) + *shadowp = folio; + return NULL; + } + + if (folio) { bool vma_ra = swap_use_vma_readahead(); bool readahead; @@ -370,8 +376,6 @@ struct folio *swap_cache_get_folio(swp_entry_t entry, if (!vma || !vma_ra) atomic_inc(&swapin_readahead_hits); } - } else { - folio = NULL; } return folio; @@ -876,11 +880,10 @@ static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask, * in. */ static struct folio *swapin_direct(swp_entry_t entry, gfp_t gfp_mask, - struct vm_fault *vmf) + struct vm_fault *vmf, void *shadow) { struct vm_area_struct *vma = vmf->vma; struct folio *folio; - void *shadow = NULL; /* skip swapcache */ folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, @@ -897,7 +900,6 @@ static struct folio *swapin_direct(swp_entry_t entry, gfp_t gfp_mask, mem_cgroup_swapin_uncharge_swap(entry); - shadow = get_shadow_from_swap_cache(entry); if (shadow) workingset_refault(folio, shadow); @@ -931,17 +933,18 @@ struct folio *swapin_entry(swp_entry_t entry, gfp_t gfp_mask, { enum swap_cache_result cache_result; struct mempolicy *mpol; + void *shadow = NULL; struct folio *folio; pgoff_t ilx; - folio = swap_cache_get_folio(entry, vmf->vma, vmf->address); + folio = swap_cache_get_folio(entry, vmf->vma, vmf->address, &shadow); if (folio) { cache_result = SWAP_CACHE_HIT; goto done; } if (swap_use_no_readahead(swp_swap_info(entry), entry)) { - folio = swapin_direct(entry, gfp_mask, vmf); + folio = swapin_direct(entry, gfp_mask, vmf, shadow); cache_result = SWAP_CACHE_BYPASS; } else { mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx); @@ -952,7 +955,6 @@ struct folio *swapin_entry(swp_entry_t entry, gfp_t gfp_mask, mpol_cond_put(mpol); cache_result = SWAP_CACHE_MISS; } - done: if (result) *result = cache_result;