diff mbox series

[7/8] hugetlb: create hugetlb_unmap_file_folio to unmap single file folio

Message ID 20220824175757.20590-8-mike.kravetz@oracle.com (mailing list archive)
State New
Headers show
Series hugetlb: Use new vma mutex for huge pmd sharing synchronization | expand

Commit Message

Mike Kravetz Aug. 24, 2022, 5:57 p.m. UTC
Create the new routine hugetlb_unmap_file_folio that will unmap a single
file folio.  This is refactored code from hugetlb_vmdelete_list.  It is
modified to do locking within the routine itself and check whether the
page is mapped within a specific vma before unmapping.

This refactoring will be put to use and expanded upon in a subsequent
patch adding vma specific locking.

Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
---
 fs/hugetlbfs/inode.c | 123 +++++++++++++++++++++++++++++++++----------
 1 file changed, 94 insertions(+), 29 deletions(-)

Comments

Miaohe Lin Aug. 29, 2022, 2:44 a.m. UTC | #1
On 2022/8/25 1:57, Mike Kravetz wrote:
> Create the new routine hugetlb_unmap_file_folio that will unmap a single
> file folio.  This is refactored code from hugetlb_vmdelete_list.  It is
> modified to do locking within the routine itself and check whether the
> page is mapped within a specific vma before unmapping.
> 
> This refactoring will be put to use and expanded upon in a subsequent
> patch adding vma specific locking.
> 
> Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
> ---
>  fs/hugetlbfs/inode.c | 123 +++++++++++++++++++++++++++++++++----------
>  1 file changed, 94 insertions(+), 29 deletions(-)
> 
> diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
> index e83fd31671b3..b93d131b0cb5 100644
> --- a/fs/hugetlbfs/inode.c
> +++ b/fs/hugetlbfs/inode.c
> @@ -371,6 +371,94 @@ static void hugetlb_delete_from_page_cache(struct page *page)
>  	delete_from_page_cache(page);
>  }
>  
> +/*
> + * Called with i_mmap_rwsem held for inode based vma maps.  This makes
> + * sure vma (and vm_mm) will not go away.  We also hold the hugetlb fault
> + * mutex for the page in the mapping.  So, we can not race with page being
> + * faulted into the vma.
> + */
> +static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
> +				unsigned long addr, struct page *page)
> +{
> +	pte_t *ptep, pte;
> +
> +	ptep = huge_pte_offset(vma->vm_mm, addr,
> +			huge_page_size(hstate_vma(vma)));
> +
> +	if (!ptep)
> +		return false;
> +
> +	pte = huge_ptep_get(ptep);
> +	if (huge_pte_none(pte) || !pte_present(pte))
> +		return false;
> +
> +	if (pte_page(pte) == page)
> +		return true;

I'm thinking whether pte entry could change after we check it since huge_pte_lock is not held here.
But I think holding i_mmap_rwsem in writelock mode should give us such a guarantee, e.g. migration
entry is changed back to huge pte entry while holding i_mmap_rwsem in readlock mode.
Or am I miss something?

> +
> +	return false;
> +}
> +
> +/*
> + * Can vma_offset_start/vma_offset_end overflow on 32-bit arches?
> + * No, because the interval tree returns us only those vmas
> + * which overlap the truncated area starting at pgoff,
> + * and no vma on a 32-bit arch can span beyond the 4GB.
> + */
> +static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start)
> +{
> +	if (vma->vm_pgoff < start)
> +		return (start - vma->vm_pgoff) << PAGE_SHIFT;
> +	else
> +		return 0;
> +}
> +
> +static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end)
> +{
> +	unsigned long t_end;
> +
> +	if (!end)
> +		return vma->vm_end;
> +
> +	t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start;
> +	if (t_end > vma->vm_end)
> +		t_end = vma->vm_end;
> +	return t_end;
> +}
> +
> +/*
> + * Called with hugetlb fault mutex held.  Therefore, no more mappings to
> + * this folio can be created while executing the routine.
> + */
> +static void hugetlb_unmap_file_folio(struct hstate *h,
> +					struct address_space *mapping,
> +					struct folio *folio, pgoff_t index)
> +{
> +	struct rb_root_cached *root = &mapping->i_mmap;
> +	struct page *page = &folio->page;
> +	struct vm_area_struct *vma;
> +	unsigned long v_start;
> +	unsigned long v_end;
> +	pgoff_t start, end;
> +
> +	start = index * pages_per_huge_page(h);
> +	end = ((index + 1) * pages_per_huge_page(h));

It seems the outer parentheses is unneeded?

Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>

Thanks,
Miaohe Lin
Mike Kravetz Aug. 29, 2022, 10:37 p.m. UTC | #2
On 08/29/22 10:44, Miaohe Lin wrote:
> On 2022/8/25 1:57, Mike Kravetz wrote:
> > Create the new routine hugetlb_unmap_file_folio that will unmap a single
> > file folio.  This is refactored code from hugetlb_vmdelete_list.  It is
> > modified to do locking within the routine itself and check whether the
> > page is mapped within a specific vma before unmapping.
> > 
> > This refactoring will be put to use and expanded upon in a subsequent
> > patch adding vma specific locking.
> > 
> > Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
> > ---
> >  fs/hugetlbfs/inode.c | 123 +++++++++++++++++++++++++++++++++----------
> >  1 file changed, 94 insertions(+), 29 deletions(-)
> > 
> > diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
> > index e83fd31671b3..b93d131b0cb5 100644
> > --- a/fs/hugetlbfs/inode.c
> > +++ b/fs/hugetlbfs/inode.c
> > @@ -371,6 +371,94 @@ static void hugetlb_delete_from_page_cache(struct page *page)
> >  	delete_from_page_cache(page);
> >  }
> >  
> > +/*
> > + * Called with i_mmap_rwsem held for inode based vma maps.  This makes
> > + * sure vma (and vm_mm) will not go away.  We also hold the hugetlb fault
> > + * mutex for the page in the mapping.  So, we can not race with page being
> > + * faulted into the vma.
> > + */
> > +static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
> > +				unsigned long addr, struct page *page)
> > +{
> > +	pte_t *ptep, pte;
> > +
> > +	ptep = huge_pte_offset(vma->vm_mm, addr,
> > +			huge_page_size(hstate_vma(vma)));
> > +
> > +	if (!ptep)
> > +		return false;
> > +
> > +	pte = huge_ptep_get(ptep);
> > +	if (huge_pte_none(pte) || !pte_present(pte))
> > +		return false;
> > +
> > +	if (pte_page(pte) == page)
> > +		return true;
> 
> I'm thinking whether pte entry could change after we check it since huge_pte_lock is not held here.
> But I think holding i_mmap_rwsem in writelock mode should give us such a guarantee, e.g. migration
> entry is changed back to huge pte entry while holding i_mmap_rwsem in readlock mode.
> Or am I miss something?

Let me think about this.  I do not think it is possible, but you ask good
questions.

Do note that this is the same locking sequence used at the beginning of the
page fault code where the decision to call hugetlb_no_page() is made.

> 
> > +
> > +	return false;
> > +}
> > +
> > +/*
> > + * Can vma_offset_start/vma_offset_end overflow on 32-bit arches?
> > + * No, because the interval tree returns us only those vmas
> > + * which overlap the truncated area starting at pgoff,
> > + * and no vma on a 32-bit arch can span beyond the 4GB.
> > + */
> > +static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start)
> > +{
> > +	if (vma->vm_pgoff < start)
> > +		return (start - vma->vm_pgoff) << PAGE_SHIFT;
> > +	else
> > +		return 0;
> > +}
> > +
> > +static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end)
> > +{
> > +	unsigned long t_end;
> > +
> > +	if (!end)
> > +		return vma->vm_end;
> > +
> > +	t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start;
> > +	if (t_end > vma->vm_end)
> > +		t_end = vma->vm_end;
> > +	return t_end;
> > +}
> > +
> > +/*
> > + * Called with hugetlb fault mutex held.  Therefore, no more mappings to
> > + * this folio can be created while executing the routine.
> > + */
> > +static void hugetlb_unmap_file_folio(struct hstate *h,
> > +					struct address_space *mapping,
> > +					struct folio *folio, pgoff_t index)
> > +{
> > +	struct rb_root_cached *root = &mapping->i_mmap;
> > +	struct page *page = &folio->page;
> > +	struct vm_area_struct *vma;
> > +	unsigned long v_start;
> > +	unsigned long v_end;
> > +	pgoff_t start, end;
> > +
> > +	start = index * pages_per_huge_page(h);
> > +	end = ((index + 1) * pages_per_huge_page(h));
> 
> It seems the outer parentheses is unneeded?

Correct.  Thanks.
Miaohe Lin Aug. 30, 2022, 2:46 a.m. UTC | #3
On 2022/8/30 6:37, Mike Kravetz wrote:
> On 08/29/22 10:44, Miaohe Lin wrote:
>> On 2022/8/25 1:57, Mike Kravetz wrote:
>>> Create the new routine hugetlb_unmap_file_folio that will unmap a single
>>> file folio.  This is refactored code from hugetlb_vmdelete_list.  It is
>>> modified to do locking within the routine itself and check whether the
>>> page is mapped within a specific vma before unmapping.
>>>
>>> This refactoring will be put to use and expanded upon in a subsequent
>>> patch adding vma specific locking.
>>>
>>> Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
>>> ---
>>>  fs/hugetlbfs/inode.c | 123 +++++++++++++++++++++++++++++++++----------
>>>  1 file changed, 94 insertions(+), 29 deletions(-)
>>>
>>> diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
>>> index e83fd31671b3..b93d131b0cb5 100644
>>> --- a/fs/hugetlbfs/inode.c
>>> +++ b/fs/hugetlbfs/inode.c
>>> @@ -371,6 +371,94 @@ static void hugetlb_delete_from_page_cache(struct page *page)
>>>  	delete_from_page_cache(page);
>>>  }
>>>  
>>> +/*
>>> + * Called with i_mmap_rwsem held for inode based vma maps.  This makes
>>> + * sure vma (and vm_mm) will not go away.  We also hold the hugetlb fault
>>> + * mutex for the page in the mapping.  So, we can not race with page being
>>> + * faulted into the vma.
>>> + */
>>> +static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
>>> +				unsigned long addr, struct page *page)
>>> +{
>>> +	pte_t *ptep, pte;
>>> +
>>> +	ptep = huge_pte_offset(vma->vm_mm, addr,
>>> +			huge_page_size(hstate_vma(vma)));
>>> +
>>> +	if (!ptep)
>>> +		return false;
>>> +
>>> +	pte = huge_ptep_get(ptep);
>>> +	if (huge_pte_none(pte) || !pte_present(pte))
>>> +		return false;
>>> +
>>> +	if (pte_page(pte) == page)
>>> +		return true;
>>
>> I'm thinking whether pte entry could change after we check it since huge_pte_lock is not held here.
>> But I think holding i_mmap_rwsem in writelock mode should give us such a guarantee, e.g. migration
>> entry is changed back to huge pte entry while holding i_mmap_rwsem in readlock mode.
>> Or am I miss something?
> 
> Let me think about this.  I do not think it is possible, but you ask good
> questions.
> 
> Do note that this is the same locking sequence used at the beginning of the
> page fault code where the decision to call hugetlb_no_page() is made.

Yes, hugetlb_fault() can tolerate the stale pte entry because pte entry will be re-checked later under the page table lock.
However if we see a stale pte entry here, the page might be leftover after truncated and thus break truncation? But I'm not
sure whether this will occur. Maybe the i_mmap_rwsem writelock and hugetlb_fault_mutex can prevent this issue.

Thanks,
Miaohe Lin


> 
>>
>>> +
>>> +	return false;
>>> +}
>>> +
>>> +/*
>>> + * Can vma_offset_start/vma_offset_end overflow on 32-bit arches?
>>> + * No, because the interval tree returns us only those vmas
>>> + * which overlap the truncated area starting at pgoff,
>>> + * and no vma on a 32-bit arch can span beyond the 4GB.
>>> + */
>>> +static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start)
>>> +{
>>> +	if (vma->vm_pgoff < start)
>>> +		return (start - vma->vm_pgoff) << PAGE_SHIFT;
>>> +	else
>>> +		return 0;
>>> +}
>>> +
>>> +static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end)
>>> +{
>>> +	unsigned long t_end;
>>> +
>>> +	if (!end)
>>> +		return vma->vm_end;
>>> +
>>> +	t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start;
>>> +	if (t_end > vma->vm_end)
>>> +		t_end = vma->vm_end;
>>> +	return t_end;
>>> +}
>>> +
>>> +/*
>>> + * Called with hugetlb fault mutex held.  Therefore, no more mappings to
>>> + * this folio can be created while executing the routine.
>>> + */
>>> +static void hugetlb_unmap_file_folio(struct hstate *h,
>>> +					struct address_space *mapping,
>>> +					struct folio *folio, pgoff_t index)
>>> +{
>>> +	struct rb_root_cached *root = &mapping->i_mmap;
>>> +	struct page *page = &folio->page;
>>> +	struct vm_area_struct *vma;
>>> +	unsigned long v_start;
>>> +	unsigned long v_end;
>>> +	pgoff_t start, end;
>>> +
>>> +	start = index * pages_per_huge_page(h);
>>> +	end = ((index + 1) * pages_per_huge_page(h));
>>
>> It seems the outer parentheses is unneeded?
> 
> Correct.  Thanks.
>
Mike Kravetz Sept. 2, 2022, 9:35 p.m. UTC | #4
On 08/30/22 10:46, Miaohe Lin wrote:
> On 2022/8/30 6:37, Mike Kravetz wrote:
> > On 08/29/22 10:44, Miaohe Lin wrote:
> >> On 2022/8/25 1:57, Mike Kravetz wrote:
> >>> Create the new routine hugetlb_unmap_file_folio that will unmap a single
> >>> file folio.  This is refactored code from hugetlb_vmdelete_list.  It is
> >>> modified to do locking within the routine itself and check whether the
> >>> page is mapped within a specific vma before unmapping.
> >>>
> >>> This refactoring will be put to use and expanded upon in a subsequent
> >>> patch adding vma specific locking.
> >>>
> >>> Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
> >>> ---
> >>>  fs/hugetlbfs/inode.c | 123 +++++++++++++++++++++++++++++++++----------
> >>>  1 file changed, 94 insertions(+), 29 deletions(-)
> >>>
> >>> diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
> >>> index e83fd31671b3..b93d131b0cb5 100644
> >>> --- a/fs/hugetlbfs/inode.c
> >>> +++ b/fs/hugetlbfs/inode.c
> >>> @@ -371,6 +371,94 @@ static void hugetlb_delete_from_page_cache(struct page *page)
> >>>  	delete_from_page_cache(page);
> >>>  }
> >>>  
> >>> +/*
> >>> + * Called with i_mmap_rwsem held for inode based vma maps.  This makes
> >>> + * sure vma (and vm_mm) will not go away.  We also hold the hugetlb fault
> >>> + * mutex for the page in the mapping.  So, we can not race with page being
> >>> + * faulted into the vma.
> >>> + */
> >>> +static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
> >>> +				unsigned long addr, struct page *page)
> >>> +{
> >>> +	pte_t *ptep, pte;
> >>> +
> >>> +	ptep = huge_pte_offset(vma->vm_mm, addr,
> >>> +			huge_page_size(hstate_vma(vma)));
> >>> +
> >>> +	if (!ptep)
> >>> +		return false;
> >>> +
> >>> +	pte = huge_ptep_get(ptep);
> >>> +	if (huge_pte_none(pte) || !pte_present(pte))
> >>> +		return false;
> >>> +
> >>> +	if (pte_page(pte) == page)
> >>> +		return true;
> >>
> >> I'm thinking whether pte entry could change after we check it since huge_pte_lock is not held here.
> >> But I think holding i_mmap_rwsem in writelock mode should give us such a guarantee, e.g. migration
> >> entry is changed back to huge pte entry while holding i_mmap_rwsem in readlock mode.
> >> Or am I miss something?
> > 
> > Let me think about this.  I do not think it is possible, but you ask good
> > questions.
> > 
> > Do note that this is the same locking sequence used at the beginning of the
> > page fault code where the decision to call hugetlb_no_page() is made.
> 
> Yes, hugetlb_fault() can tolerate the stale pte entry because pte entry will be re-checked later under the page table lock.
> However if we see a stale pte entry here, the page might be leftover after truncated and thus break truncation? But I'm not
> sure whether this will occur. Maybe the i_mmap_rwsem writelock and hugetlb_fault_mutex can prevent this issue.
> 

I looked at this some more.  Just to be clear, we only need to worry
about modifications of pte_page().  Racing with other pte modifications
such as accessed, or protection changes is acceptable.

Of course, the fault mutex prevents faults from happening.  i_mmap_rwsem
protects against unmap and truncation operations as well as migration as
you noted above.  I believe the only other place where we update pte_page()
is when copying page table such as during fork.  However, with commit
bcd51a3c679d "Lazy page table copies in fork()" we are going to skip
copying for files and rely on page faults to populate the tables.

I believe we are safe from races with just the fault mutex and i_mmap_rwsem.
Miaohe Lin Sept. 5, 2022, 2:32 a.m. UTC | #5
On 2022/9/3 5:35, Mike Kravetz wrote:
> On 08/30/22 10:46, Miaohe Lin wrote:
>> On 2022/8/30 6:37, Mike Kravetz wrote:
>>> On 08/29/22 10:44, Miaohe Lin wrote:
>>>> On 2022/8/25 1:57, Mike Kravetz wrote:
>>>>> Create the new routine hugetlb_unmap_file_folio that will unmap a single
>>>>> file folio.  This is refactored code from hugetlb_vmdelete_list.  It is
>>>>> modified to do locking within the routine itself and check whether the
>>>>> page is mapped within a specific vma before unmapping.
>>>>>
>>>>> This refactoring will be put to use and expanded upon in a subsequent
>>>>> patch adding vma specific locking.
>>>>>
>>>>> Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
>>>>> ---
>>>>>  fs/hugetlbfs/inode.c | 123 +++++++++++++++++++++++++++++++++----------
>>>>>  1 file changed, 94 insertions(+), 29 deletions(-)
>>>>>
>>>>> diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
>>>>> index e83fd31671b3..b93d131b0cb5 100644
>>>>> --- a/fs/hugetlbfs/inode.c
>>>>> +++ b/fs/hugetlbfs/inode.c
>>>>> @@ -371,6 +371,94 @@ static void hugetlb_delete_from_page_cache(struct page *page)
>>>>>  	delete_from_page_cache(page);
>>>>>  }
>>>>>  
>>>>> +/*
>>>>> + * Called with i_mmap_rwsem held for inode based vma maps.  This makes
>>>>> + * sure vma (and vm_mm) will not go away.  We also hold the hugetlb fault
>>>>> + * mutex for the page in the mapping.  So, we can not race with page being
>>>>> + * faulted into the vma.
>>>>> + */
>>>>> +static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
>>>>> +				unsigned long addr, struct page *page)
>>>>> +{
>>>>> +	pte_t *ptep, pte;
>>>>> +
>>>>> +	ptep = huge_pte_offset(vma->vm_mm, addr,
>>>>> +			huge_page_size(hstate_vma(vma)));
>>>>> +
>>>>> +	if (!ptep)
>>>>> +		return false;
>>>>> +
>>>>> +	pte = huge_ptep_get(ptep);
>>>>> +	if (huge_pte_none(pte) || !pte_present(pte))
>>>>> +		return false;
>>>>> +
>>>>> +	if (pte_page(pte) == page)
>>>>> +		return true;
>>>>
>>>> I'm thinking whether pte entry could change after we check it since huge_pte_lock is not held here.
>>>> But I think holding i_mmap_rwsem in writelock mode should give us such a guarantee, e.g. migration
>>>> entry is changed back to huge pte entry while holding i_mmap_rwsem in readlock mode.
>>>> Or am I miss something?
>>>
>>> Let me think about this.  I do not think it is possible, but you ask good
>>> questions.
>>>
>>> Do note that this is the same locking sequence used at the beginning of the
>>> page fault code where the decision to call hugetlb_no_page() is made.
>>
>> Yes, hugetlb_fault() can tolerate the stale pte entry because pte entry will be re-checked later under the page table lock.
>> However if we see a stale pte entry here, the page might be leftover after truncated and thus break truncation? But I'm not
>> sure whether this will occur. Maybe the i_mmap_rwsem writelock and hugetlb_fault_mutex can prevent this issue.
>>
> 
> I looked at this some more.  Just to be clear, we only need to worry
> about modifications of pte_page().  Racing with other pte modifications
> such as accessed, or protection changes is acceptable.
> 
> Of course, the fault mutex prevents faults from happening.  i_mmap_rwsem
> protects against unmap and truncation operations as well as migration as
> you noted above.  I believe the only other place where we update pte_page()
> is when copying page table such as during fork.  However, with commit
> bcd51a3c679d "Lazy page table copies in fork()" we are going to skip
> copying for files and rely on page faults to populate the tables.
> 
> I believe we are safe from races with just the fault mutex and i_mmap_rwsem.

I believe your analysis is right. Thanks for your clarifying.

Thanks,
Miaohe Lin
diff mbox series

Patch

diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index e83fd31671b3..b93d131b0cb5 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -371,6 +371,94 @@  static void hugetlb_delete_from_page_cache(struct page *page)
 	delete_from_page_cache(page);
 }
 
+/*
+ * Called with i_mmap_rwsem held for inode based vma maps.  This makes
+ * sure vma (and vm_mm) will not go away.  We also hold the hugetlb fault
+ * mutex for the page in the mapping.  So, we can not race with page being
+ * faulted into the vma.
+ */
+static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
+				unsigned long addr, struct page *page)
+{
+	pte_t *ptep, pte;
+
+	ptep = huge_pte_offset(vma->vm_mm, addr,
+			huge_page_size(hstate_vma(vma)));
+
+	if (!ptep)
+		return false;
+
+	pte = huge_ptep_get(ptep);
+	if (huge_pte_none(pte) || !pte_present(pte))
+		return false;
+
+	if (pte_page(pte) == page)
+		return true;
+
+	return false;
+}
+
+/*
+ * Can vma_offset_start/vma_offset_end overflow on 32-bit arches?
+ * No, because the interval tree returns us only those vmas
+ * which overlap the truncated area starting at pgoff,
+ * and no vma on a 32-bit arch can span beyond the 4GB.
+ */
+static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start)
+{
+	if (vma->vm_pgoff < start)
+		return (start - vma->vm_pgoff) << PAGE_SHIFT;
+	else
+		return 0;
+}
+
+static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end)
+{
+	unsigned long t_end;
+
+	if (!end)
+		return vma->vm_end;
+
+	t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start;
+	if (t_end > vma->vm_end)
+		t_end = vma->vm_end;
+	return t_end;
+}
+
+/*
+ * Called with hugetlb fault mutex held.  Therefore, no more mappings to
+ * this folio can be created while executing the routine.
+ */
+static void hugetlb_unmap_file_folio(struct hstate *h,
+					struct address_space *mapping,
+					struct folio *folio, pgoff_t index)
+{
+	struct rb_root_cached *root = &mapping->i_mmap;
+	struct page *page = &folio->page;
+	struct vm_area_struct *vma;
+	unsigned long v_start;
+	unsigned long v_end;
+	pgoff_t start, end;
+
+	start = index * pages_per_huge_page(h);
+	end = ((index + 1) * pages_per_huge_page(h));
+
+	i_mmap_lock_write(mapping);
+
+	vma_interval_tree_foreach(vma, root, start, end - 1) {
+		v_start = vma_offset_start(vma, start);
+		v_end = vma_offset_end(vma, end);
+
+		if (!hugetlb_vma_maps_page(vma, vma->vm_start + v_start, page))
+			continue;
+
+		unmap_hugepage_range(vma, vma->vm_start + v_start, v_end,
+				NULL, ZAP_FLAG_DROP_MARKER);
+	}
+
+	i_mmap_unlock_write(mapping);
+}
+
 static void
 hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
 		      zap_flags_t zap_flags)
@@ -383,30 +471,13 @@  hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
 	 * an inclusive "last".
 	 */
 	vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) {
-		unsigned long v_offset;
+		unsigned long v_start;
 		unsigned long v_end;
 
-		/*
-		 * Can the expression below overflow on 32-bit arches?
-		 * No, because the interval tree returns us only those vmas
-		 * which overlap the truncated area starting at pgoff,
-		 * and no vma on a 32-bit arch can span beyond the 4GB.
-		 */
-		if (vma->vm_pgoff < start)
-			v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
-		else
-			v_offset = 0;
-
-		if (!end)
-			v_end = vma->vm_end;
-		else {
-			v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
-							+ vma->vm_start;
-			if (v_end > vma->vm_end)
-				v_end = vma->vm_end;
-		}
+		v_start = vma_offset_start(vma, start);
+		v_end = vma_offset_end(vma, end);
 
-		unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
+		unmap_hugepage_range(vma, vma->vm_start + v_start, v_end,
 				     NULL, zap_flags);
 	}
 }
@@ -428,14 +499,8 @@  static bool remove_inode_single_folio(struct hstate *h, struct inode *inode,
 	 * the fault mutex.  The mutex will prevent faults
 	 * until we finish removing the folio.
 	 */
-	if (unlikely(folio_mapped(folio))) {
-		i_mmap_lock_write(mapping);
-		hugetlb_vmdelete_list(&mapping->i_mmap,
-					index * pages_per_huge_page(h),
-					(index + 1) * pages_per_huge_page(h),
-					ZAP_FLAG_DROP_MARKER);
-		i_mmap_unlock_write(mapping);
-	}
+	if (unlikely(folio_mapped(folio)))
+		hugetlb_unmap_file_folio(h, mapping, folio, index);
 
 	folio_lock(folio);
 	/*