diff mbox series

[2/9] mm/hugetlb: convert hugetlbfs_pagecache_present() to folios

Message ID 20230119211446.54165-3-sidhartha.kumar@oracle.com (mailing list archive)
State New
Headers show
Series convert hugetlb fault functions to folios | expand

Commit Message

Sidhartha Kumar Jan. 19, 2023, 9:14 p.m. UTC
Convert hugetlbfs_pagecache_present() to use folios internally by
replacing a call to find_get_page() to filemap_get_folio().

Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
---
 mm/hugetlb.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

Comments

Matthew Wilcox Jan. 20, 2023, 5:43 a.m. UTC | #1
On Thu, Jan 19, 2023 at 01:14:39PM -0800, Sidhartha Kumar wrote:
> +++ b/mm/hugetlb.c
> @@ -5653,15 +5653,15 @@ static bool hugetlbfs_pagecache_present(struct hstate *h,
>  {
>  	struct address_space *mapping;
>  	pgoff_t idx;
> -	struct page *page;
> +	struct folio *folio;
>  
>  	mapping = vma->vm_file->f_mapping;
>  	idx = vma_hugecache_offset(h, vma, address);
>  
> -	page = find_get_page(mapping, idx);
> -	if (page)
> -		put_page(page);
> -	return page != NULL;
> +	folio = filemap_get_folio(mapping, idx);
> +	if (folio)
> +		folio_put(folio);
> +	return folio != NULL;
>  }

Seems to me this function could be ...

	struct address_space *mapping = vma->vm_file->f_mapping;
	pgoff_t index = vma_hugecache_offset(h, vma, address);
	bool present;

	rcu_read_lock();
	present = page_cache_next_miss(mapping, index, 1) != index;
	rcu_read_unlock();

	return present;

No need to get/drop a refcount on the folio.  It's a bit similar to
filemap_range_has_page(), but the API is wrong.  Maybe there's room
for a little refactoring here.
Mike Kravetz Jan. 20, 2023, 11:40 p.m. UTC | #2
On 01/20/23 05:43, Matthew Wilcox wrote:
> On Thu, Jan 19, 2023 at 01:14:39PM -0800, Sidhartha Kumar wrote:
> > +++ b/mm/hugetlb.c
> > @@ -5653,15 +5653,15 @@ static bool hugetlbfs_pagecache_present(struct hstate *h,
> >  {
> >  	struct address_space *mapping;
> >  	pgoff_t idx;
> > -	struct page *page;
> > +	struct folio *folio;
> >  
> >  	mapping = vma->vm_file->f_mapping;
> >  	idx = vma_hugecache_offset(h, vma, address);
> >  
> > -	page = find_get_page(mapping, idx);
> > -	if (page)
> > -		put_page(page);
> > -	return page != NULL;
> > +	folio = filemap_get_folio(mapping, idx);
> > +	if (folio)
> > +		folio_put(folio);
> > +	return folio != NULL;
> >  }
> 
> Seems to me this function could be ...
> 
> 	struct address_space *mapping = vma->vm_file->f_mapping;
> 	pgoff_t index = vma_hugecache_offset(h, vma, address);
> 	bool present;
> 
> 	rcu_read_lock();
> 	present = page_cache_next_miss(mapping, index, 1) != index;
> 	rcu_read_unlock();
> 
> 	return present;
> 
> No need to get/drop a refcount on the folio.  It's a bit similar to
> filemap_range_has_page(), but the API is wrong.  Maybe there's room
> for a little refactoring here.

Thanks Matthew, I did not know those APIs were available.  Perhaps just
use page_cache_next_miss as suggested above for now.

FYI - There is the same pattern in hugetlbfs_fallocate()

		/* See if already present in mapping to avoid alloc/free */
		folio = filemap_get_folio(mapping, index);
		if (folio) {
			folio_put(folio);
			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
			hugetlb_drop_vma_policy(&pseudo_vma);
			continue;
		}
diff mbox series

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 849206e94742..04cbdf5025a5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5653,15 +5653,15 @@  static bool hugetlbfs_pagecache_present(struct hstate *h,
 {
 	struct address_space *mapping;
 	pgoff_t idx;
-	struct page *page;
+	struct folio *folio;
 
 	mapping = vma->vm_file->f_mapping;
 	idx = vma_hugecache_offset(h, vma, address);
 
-	page = find_get_page(mapping, idx);
-	if (page)
-		put_page(page);
-	return page != NULL;
+	folio = filemap_get_folio(mapping, idx);
+	if (folio)
+		folio_put(folio);
+	return folio != NULL;
 }
 
 int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,