diff mbox series

[v2,1/6] swap: remove remnants of polling from read_swap_cache_async

Message ID 20230609005158.2421285-2-surenb@google.com (mailing list archive)
State Handled Elsewhere
Headers show
Series Per-vma lock support for swap and userfaults | expand

Commit Message

Suren Baghdasaryan June 9, 2023, 12:51 a.m. UTC
Commit [1] introduced IO polling support during swapin to reduce
swap read latency for block devices that can be polled. However later
commit [2] removed polling support. Therefore it seems safe to remove
do_poll parameter in read_swap_cache_async and always call swap_readpage
with synchronous=false waiting for IO completion in folio_lock_or_retry.

[1] commit 23955622ff8d ("swap: add block io poll in swapin path")
[2] commit 9650b453a3d4 ("block: ignore RWF_HIPRI hint for sync dio")

Suggested-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
 mm/madvise.c    |  4 ++--
 mm/swap.h       |  1 -
 mm/swap_state.c | 12 +++++-------
 3 files changed, 7 insertions(+), 10 deletions(-)

Comments

Huang, Ying June 9, 2023, 1:57 a.m. UTC | #1
+ Ming Lei for confirmation.

Suren Baghdasaryan <surenb@google.com> writes:

> Commit [1] introduced IO polling support during swapin to reduce
> swap read latency for block devices that can be polled. However later
> commit [2] removed polling support. Therefore it seems safe to remove
> do_poll parameter in read_swap_cache_async and always call swap_readpage
> with synchronous=false waiting for IO completion in folio_lock_or_retry.
>
> [1] commit 23955622ff8d ("swap: add block io poll in swapin path")
> [2] commit 9650b453a3d4 ("block: ignore RWF_HIPRI hint for sync dio")
>
> Suggested-by: Huang Ying <ying.huang@intel.com>
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>

Looks good to me!  Thanks!

Reviewed-by: "Huang, Ying" <ying.huang@intel.com>

> ---
>  mm/madvise.c    |  4 ++--
>  mm/swap.h       |  1 -
>  mm/swap_state.c | 12 +++++-------
>  3 files changed, 7 insertions(+), 10 deletions(-)
>
> diff --git a/mm/madvise.c b/mm/madvise.c
> index b5ffbaf616f5..b1e8adf1234e 100644
> --- a/mm/madvise.c
> +++ b/mm/madvise.c
> @@ -215,7 +215,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
>  			continue;
>  
>  		page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
> -					     vma, index, false, &splug);
> +					     vma, index, &splug);
>  		if (page)
>  			put_page(page);
>  	}
> @@ -252,7 +252,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
>  		rcu_read_unlock();
>  
>  		page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
> -					     NULL, 0, false, &splug);
> +					     NULL, 0, &splug);
>  		if (page)
>  			put_page(page);
>  
> diff --git a/mm/swap.h b/mm/swap.h
> index 7c033d793f15..8a3c7a0ace4f 100644
> --- a/mm/swap.h
> +++ b/mm/swap.h
> @@ -46,7 +46,6 @@ struct folio *filemap_get_incore_folio(struct address_space *mapping,
>  struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
>  				   struct vm_area_struct *vma,
>  				   unsigned long addr,
> -				   bool do_poll,
>  				   struct swap_iocb **plug);
>  struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
>  				     struct vm_area_struct *vma,
> diff --git a/mm/swap_state.c b/mm/swap_state.c
> index b76a65ac28b3..a3839de71f3f 100644
> --- a/mm/swap_state.c
> +++ b/mm/swap_state.c
> @@ -517,15 +517,14 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
>   */
>  struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
>  				   struct vm_area_struct *vma,
> -				   unsigned long addr, bool do_poll,
> -				   struct swap_iocb **plug)
> +				   unsigned long addr, struct swap_iocb **plug)
>  {
>  	bool page_was_allocated;
>  	struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
>  			vma, addr, &page_was_allocated);
>  
>  	if (page_was_allocated)
> -		swap_readpage(retpage, do_poll, plug);
> +		swap_readpage(retpage, false, plug);
>  
>  	return retpage;
>  }
> @@ -620,7 +619,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
>  	struct swap_info_struct *si = swp_swap_info(entry);
>  	struct blk_plug plug;
>  	struct swap_iocb *splug = NULL;
> -	bool do_poll = true, page_allocated;
> +	bool page_allocated;
>  	struct vm_area_struct *vma = vmf->vma;
>  	unsigned long addr = vmf->address;
>  
> @@ -628,7 +627,6 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
>  	if (!mask)
>  		goto skip;
>  
> -	do_poll = false;
>  	/* Read a page_cluster sized and aligned cluster around offset. */
>  	start_offset = offset & ~mask;
>  	end_offset = offset | mask;
> @@ -660,7 +658,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
>  	lru_add_drain();	/* Push any new pages onto the LRU now */
>  skip:
>  	/* The page was likely read above, so no need for plugging here */
> -	return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL);
> +	return read_swap_cache_async(entry, gfp_mask, vma, addr, NULL);
>  }
>  
>  int init_swap_address_space(unsigned int type, unsigned long nr_pages)
> @@ -825,7 +823,7 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
>  skip:
>  	/* The page was likely read above, so no need for plugging here */
>  	return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
> -				     ra_info.win == 1, NULL);
> +				     NULL);
>  }
>  
>  /**
Ming Lei June 9, 2023, 3:13 a.m. UTC | #2
On Fri, Jun 9, 2023 at 9:58 AM Huang, Ying <ying.huang@intel.com> wrote:
>
> + Ming Lei for confirmation.

Good catch, it isn't necessary to pass the polling parameter now.

Thanks,
Suren Baghdasaryan June 9, 2023, 6:50 p.m. UTC | #3
On Thu, Jun 8, 2023 at 8:14 PM Ming Lei <ming.lei@redhat.com> wrote:
>
> On Fri, Jun 9, 2023 at 9:58 AM Huang, Ying <ying.huang@intel.com> wrote:
> >
> > + Ming Lei for confirmation.
>
> Good catch, it isn't necessary to pass the polling parameter now.

Thanks folks for reviewing and confirming!

>
> Thanks,
>
Christoph Hellwig June 12, 2023, 4:53 a.m. UTC | #4
Looks good:

Reviewed-by: Christoph Hellwig <hch@lst.de>
diff mbox series

Patch

diff --git a/mm/madvise.c b/mm/madvise.c
index b5ffbaf616f5..b1e8adf1234e 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -215,7 +215,7 @@  static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
 			continue;
 
 		page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
-					     vma, index, false, &splug);
+					     vma, index, &splug);
 		if (page)
 			put_page(page);
 	}
@@ -252,7 +252,7 @@  static void force_shm_swapin_readahead(struct vm_area_struct *vma,
 		rcu_read_unlock();
 
 		page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
-					     NULL, 0, false, &splug);
+					     NULL, 0, &splug);
 		if (page)
 			put_page(page);
 
diff --git a/mm/swap.h b/mm/swap.h
index 7c033d793f15..8a3c7a0ace4f 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -46,7 +46,6 @@  struct folio *filemap_get_incore_folio(struct address_space *mapping,
 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 				   struct vm_area_struct *vma,
 				   unsigned long addr,
-				   bool do_poll,
 				   struct swap_iocb **plug);
 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 				     struct vm_area_struct *vma,
diff --git a/mm/swap_state.c b/mm/swap_state.c
index b76a65ac28b3..a3839de71f3f 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -517,15 +517,14 @@  struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
  */
 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 				   struct vm_area_struct *vma,
-				   unsigned long addr, bool do_poll,
-				   struct swap_iocb **plug)
+				   unsigned long addr, struct swap_iocb **plug)
 {
 	bool page_was_allocated;
 	struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
 			vma, addr, &page_was_allocated);
 
 	if (page_was_allocated)
-		swap_readpage(retpage, do_poll, plug);
+		swap_readpage(retpage, false, plug);
 
 	return retpage;
 }
@@ -620,7 +619,7 @@  struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
 	struct swap_info_struct *si = swp_swap_info(entry);
 	struct blk_plug plug;
 	struct swap_iocb *splug = NULL;
-	bool do_poll = true, page_allocated;
+	bool page_allocated;
 	struct vm_area_struct *vma = vmf->vma;
 	unsigned long addr = vmf->address;
 
@@ -628,7 +627,6 @@  struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
 	if (!mask)
 		goto skip;
 
-	do_poll = false;
 	/* Read a page_cluster sized and aligned cluster around offset. */
 	start_offset = offset & ~mask;
 	end_offset = offset | mask;
@@ -660,7 +658,7 @@  struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
 	lru_add_drain();	/* Push any new pages onto the LRU now */
 skip:
 	/* The page was likely read above, so no need for plugging here */
-	return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL);
+	return read_swap_cache_async(entry, gfp_mask, vma, addr, NULL);
 }
 
 int init_swap_address_space(unsigned int type, unsigned long nr_pages)
@@ -825,7 +823,7 @@  static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
 skip:
 	/* The page was likely read above, so no need for plugging here */
 	return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
-				     ra_info.win == 1, NULL);
+				     NULL);
 }
 
 /**