diff mbox series

[v2,1/9] mm: introduce pmd_install() helper

Message ID 20210819031858.98043-2-zhengqi.arch@bytedance.com (mailing list archive)
State New
Headers show
Series Free user PTE page table pages | expand

Commit Message

Qi Zheng Aug. 19, 2021, 3:18 a.m. UTC
Currently we have three times the same few lines repeated in the
code. Deduplicate them by newly introduced pmd_install() helper.

Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
---
 include/linux/mm.h |  1 +
 mm/filemap.c       | 11 ++---------
 mm/memory.c        | 34 ++++++++++++++++------------------
 3 files changed, 19 insertions(+), 27 deletions(-)

Comments

David Hildenbrand Aug. 24, 2021, 4:26 p.m. UTC | #1
On 19.08.21 05:18, Qi Zheng wrote:
> Currently we have three times the same few lines repeated in the
> code. Deduplicate them by newly introduced pmd_install() helper.
> 
> Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
> ---
>   include/linux/mm.h |  1 +
>   mm/filemap.c       | 11 ++---------
>   mm/memory.c        | 34 ++++++++++++++++------------------
>   3 files changed, 19 insertions(+), 27 deletions(-)
> 
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index ce8fc0fd6d6e..57e48217bd71 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -2471,6 +2471,7 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
>   	return ptl;
>   }
>   
> +extern void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
>   extern void __init pagecache_init(void);
>   extern void __init free_area_init_memoryless_node(int nid);
>   extern void free_initmem(void);
> diff --git a/mm/filemap.c b/mm/filemap.c
> index 53913fced7ae..9f773059c6dc 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -3210,15 +3210,8 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page)
>   	    }
>   	}
>   
> -	if (pmd_none(*vmf->pmd)) {
> -		vmf->ptl = pmd_lock(mm, vmf->pmd);
> -		if (likely(pmd_none(*vmf->pmd))) {
> -			mm_inc_nr_ptes(mm);
> -			pmd_populate(mm, vmf->pmd, vmf->prealloc_pte);
> -			vmf->prealloc_pte = NULL;
> -		}
> -		spin_unlock(vmf->ptl);
> -	}
> +	if (pmd_none(*vmf->pmd))
> +		pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
>   
>   	/* See comment in handle_pte_fault() */
>   	if (pmd_devmap_trans_unstable(vmf->pmd)) {
> diff --git a/mm/memory.c b/mm/memory.c
> index 39e7a1495c3c..ef7b1762e996 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -433,9 +433,20 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
>   	}
>   }
>   
> +void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
> +{
> +	spinlock_t *ptl = pmd_lock(mm, pmd);
> +
> +	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
> +		mm_inc_nr_ptes(mm);
> +		pmd_populate(mm, pmd, *pte);
> +		*pte = NULL;
> +	}
> +	spin_unlock(ptl);
> +}
> +
>   int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
>   {
> -	spinlock_t *ptl;
>   	pgtable_t new = pte_alloc_one(mm);
>   	if (!new)
>   		return -ENOMEM;
> @@ -455,13 +466,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
>   	 */
>   	smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
>   
> -	ptl = pmd_lock(mm, pmd);
> -	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
> -		mm_inc_nr_ptes(mm);
> -		pmd_populate(mm, pmd, new);
> -		new = NULL;
> -	}
> -	spin_unlock(ptl);
> +	pmd_install(mm, pmd, &new);
>   	if (new)
>   		pte_free(mm, new);
>   	return 0;
> @@ -4027,17 +4032,10 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
>   				return ret;
>   		}
>   
> -		if (vmf->prealloc_pte) {
> -			vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
> -			if (likely(pmd_none(*vmf->pmd))) {
> -				mm_inc_nr_ptes(vma->vm_mm);
> -				pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
> -				vmf->prealloc_pte = NULL;
> -			}
> -			spin_unlock(vmf->ptl);
> -		} else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
> +		if (vmf->prealloc_pte)
> +			pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
> +		else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
>   			return VM_FAULT_OOM;
> -		}
>   	}
>   
>   	/* See comment in handle_pte_fault() */
> 

Reviewed-by: David Hildenbrand <david@redhat.com>

That's mostly unrelated to the remaining part of the series and can be 
picked up early.
Qi Zheng Aug. 25, 2021, 4:20 p.m. UTC | #2
On 2021/8/25 AM12:26, David Hildenbrand wrote:
> On 19.08.21 05:18, Qi Zheng wrote:
>> Currently we have three times the same few lines repeated in the
>> code. Deduplicate them by newly introduced pmd_install() helper.
>>
>> Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
>> ---
>>   include/linux/mm.h |  1 +
>>   mm/filemap.c       | 11 ++---------
>>   mm/memory.c        | 34 ++++++++++++++++------------------
>>   3 files changed, 19 insertions(+), 27 deletions(-)
>>
>> diff --git a/include/linux/mm.h b/include/linux/mm.h
>> index ce8fc0fd6d6e..57e48217bd71 100644
>> --- a/include/linux/mm.h
>> +++ b/include/linux/mm.h
>> @@ -2471,6 +2471,7 @@ static inline spinlock_t *pud_lock(struct 
>> mm_struct *mm, pud_t *pud)
>>       return ptl;
>>   }
>> +extern void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t 
>> *pte);
>>   extern void __init pagecache_init(void);
>>   extern void __init free_area_init_memoryless_node(int nid);
>>   extern void free_initmem(void);
>> diff --git a/mm/filemap.c b/mm/filemap.c
>> index 53913fced7ae..9f773059c6dc 100644
>> --- a/mm/filemap.c
>> +++ b/mm/filemap.c
>> @@ -3210,15 +3210,8 @@ static bool filemap_map_pmd(struct vm_fault 
>> *vmf, struct page *page)
>>           }
>>       }
>> -    if (pmd_none(*vmf->pmd)) {
>> -        vmf->ptl = pmd_lock(mm, vmf->pmd);
>> -        if (likely(pmd_none(*vmf->pmd))) {
>> -            mm_inc_nr_ptes(mm);
>> -            pmd_populate(mm, vmf->pmd, vmf->prealloc_pte);
>> -            vmf->prealloc_pte = NULL;
>> -        }
>> -        spin_unlock(vmf->ptl);
>> -    }
>> +    if (pmd_none(*vmf->pmd))
>> +        pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
>>       /* See comment in handle_pte_fault() */
>>       if (pmd_devmap_trans_unstable(vmf->pmd)) {
>> diff --git a/mm/memory.c b/mm/memory.c
>> index 39e7a1495c3c..ef7b1762e996 100644
>> --- a/mm/memory.c
>> +++ b/mm/memory.c
>> @@ -433,9 +433,20 @@ void free_pgtables(struct mmu_gather *tlb, struct 
>> vm_area_struct *vma,
>>       }
>>   }
>> +void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
>> +{
>> +    spinlock_t *ptl = pmd_lock(mm, pmd);
>> +
>> +    if (likely(pmd_none(*pmd))) {    /* Has another populated it ? */
>> +        mm_inc_nr_ptes(mm);
>> +        pmd_populate(mm, pmd, *pte);
>> +        *pte = NULL;
>> +    }
>> +    spin_unlock(ptl);
>> +}
>> +
>>   int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
>>   {
>> -    spinlock_t *ptl;
>>       pgtable_t new = pte_alloc_one(mm);
>>       if (!new)
>>           return -ENOMEM;
>> @@ -455,13 +466,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
>>        */
>>       smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
>> -    ptl = pmd_lock(mm, pmd);
>> -    if (likely(pmd_none(*pmd))) {    /* Has another populated it ? */
>> -        mm_inc_nr_ptes(mm);
>> -        pmd_populate(mm, pmd, new);
>> -        new = NULL;
>> -    }
>> -    spin_unlock(ptl);
>> +    pmd_install(mm, pmd, &new);
>>       if (new)
>>           pte_free(mm, new);
>>       return 0;
>> @@ -4027,17 +4032,10 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
>>                   return ret;
>>           }
>> -        if (vmf->prealloc_pte) {
>> -            vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
>> -            if (likely(pmd_none(*vmf->pmd))) {
>> -                mm_inc_nr_ptes(vma->vm_mm);
>> -                pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
>> -                vmf->prealloc_pte = NULL;
>> -            }
>> -            spin_unlock(vmf->ptl);
>> -        } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
>> +        if (vmf->prealloc_pte)
>> +            pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
>> +        else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
>>               return VM_FAULT_OOM;
>> -        }
>>       }
>>       /* See comment in handle_pte_fault() */
>>
> 
> Reviewed-by: David Hildenbrand <david@redhat.com>

Thanks for your review, I will add this to the patch v3.

> 
> That's mostly unrelated to the remaining part of the series and can be 
> picked up early.

The implementation of subsequent patches depends on pmd_install().
So I am worried that if this patch is submitted as a separate patch,
subsequent patches will not be updated until this patch is merged.
What do you think?

>
David Hildenbrand Aug. 25, 2021, 4:32 p.m. UTC | #3
On 25.08.21 18:20, Qi Zheng wrote:
> 
> 
> On 2021/8/25 AM12:26, David Hildenbrand wrote:
>> On 19.08.21 05:18, Qi Zheng wrote:
>>> Currently we have three times the same few lines repeated in the
>>> code. Deduplicate them by newly introduced pmd_install() helper.
>>>
>>> Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
>>> ---
>>>    include/linux/mm.h |  1 +
>>>    mm/filemap.c       | 11 ++---------
>>>    mm/memory.c        | 34 ++++++++++++++++------------------
>>>    3 files changed, 19 insertions(+), 27 deletions(-)
>>>
>>> diff --git a/include/linux/mm.h b/include/linux/mm.h
>>> index ce8fc0fd6d6e..57e48217bd71 100644
>>> --- a/include/linux/mm.h
>>> +++ b/include/linux/mm.h
>>> @@ -2471,6 +2471,7 @@ static inline spinlock_t *pud_lock(struct
>>> mm_struct *mm, pud_t *pud)
>>>        return ptl;
>>>    }
>>> +extern void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t
>>> *pte);
>>>    extern void __init pagecache_init(void);
>>>    extern void __init free_area_init_memoryless_node(int nid);
>>>    extern void free_initmem(void);
>>> diff --git a/mm/filemap.c b/mm/filemap.c
>>> index 53913fced7ae..9f773059c6dc 100644
>>> --- a/mm/filemap.c
>>> +++ b/mm/filemap.c
>>> @@ -3210,15 +3210,8 @@ static bool filemap_map_pmd(struct vm_fault
>>> *vmf, struct page *page)
>>>            }
>>>        }
>>> -    if (pmd_none(*vmf->pmd)) {
>>> -        vmf->ptl = pmd_lock(mm, vmf->pmd);
>>> -        if (likely(pmd_none(*vmf->pmd))) {
>>> -            mm_inc_nr_ptes(mm);
>>> -            pmd_populate(mm, vmf->pmd, vmf->prealloc_pte);
>>> -            vmf->prealloc_pte = NULL;
>>> -        }
>>> -        spin_unlock(vmf->ptl);
>>> -    }
>>> +    if (pmd_none(*vmf->pmd))
>>> +        pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
>>>        /* See comment in handle_pte_fault() */
>>>        if (pmd_devmap_trans_unstable(vmf->pmd)) {
>>> diff --git a/mm/memory.c b/mm/memory.c
>>> index 39e7a1495c3c..ef7b1762e996 100644
>>> --- a/mm/memory.c
>>> +++ b/mm/memory.c
>>> @@ -433,9 +433,20 @@ void free_pgtables(struct mmu_gather *tlb, struct
>>> vm_area_struct *vma,
>>>        }
>>>    }
>>> +void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
>>> +{
>>> +    spinlock_t *ptl = pmd_lock(mm, pmd);
>>> +
>>> +    if (likely(pmd_none(*pmd))) {    /* Has another populated it ? */
>>> +        mm_inc_nr_ptes(mm);
>>> +        pmd_populate(mm, pmd, *pte);
>>> +        *pte = NULL;
>>> +    }
>>> +    spin_unlock(ptl);
>>> +}
>>> +
>>>    int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
>>>    {
>>> -    spinlock_t *ptl;
>>>        pgtable_t new = pte_alloc_one(mm);
>>>        if (!new)
>>>            return -ENOMEM;
>>> @@ -455,13 +466,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
>>>         */
>>>        smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
>>> -    ptl = pmd_lock(mm, pmd);
>>> -    if (likely(pmd_none(*pmd))) {    /* Has another populated it ? */
>>> -        mm_inc_nr_ptes(mm);
>>> -        pmd_populate(mm, pmd, new);
>>> -        new = NULL;
>>> -    }
>>> -    spin_unlock(ptl);
>>> +    pmd_install(mm, pmd, &new);
>>>        if (new)
>>>            pte_free(mm, new);
>>>        return 0;
>>> @@ -4027,17 +4032,10 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
>>>                    return ret;
>>>            }
>>> -        if (vmf->prealloc_pte) {
>>> -            vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
>>> -            if (likely(pmd_none(*vmf->pmd))) {
>>> -                mm_inc_nr_ptes(vma->vm_mm);
>>> -                pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
>>> -                vmf->prealloc_pte = NULL;
>>> -            }
>>> -            spin_unlock(vmf->ptl);
>>> -        } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
>>> +        if (vmf->prealloc_pte)
>>> +            pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
>>> +        else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
>>>                return VM_FAULT_OOM;
>>> -        }
>>>        }
>>>        /* See comment in handle_pte_fault() */
>>>
>>
>> Reviewed-by: David Hildenbrand <david@redhat.com>
> 
> Thanks for your review, I will add this to the patch v3.
> 
>>
>> That's mostly unrelated to the remaining part of the series and can be
>> picked up early.
> 
> The implementation of subsequent patches depends on pmd_install().
> So I am worried that if this patch is submitted as a separate patch,
> subsequent patches will not be updated until this patch is merged.
> What do you think?

Usually I tend to send cleanups out independently, and then just base 
the other series on top of the other series.

I'll have some more comments in reply to v2. It's fairly hard to review 
because you do a lot of complicated stuff in only a handful of patches 
:) I'll try to think of something reasonable on how to split this up to 
make it easier to digest.
Qi Zheng Aug. 26, 2021, 3:04 a.m. UTC | #4
On 2021/8/26 AM12:32, David Hildenbrand wrote:
> On 25.08.21 18:20, Qi Zheng wrote:
>>
>>
>> On 2021/8/25 AM12:26, David Hildenbrand wrote:
>>> On 19.08.21 05:18, Qi Zheng wrote:
>>>> Currently we have three times the same few lines repeated in the
>>>
>>> Reviewed-by: David Hildenbrand <david@redhat.com>
>>
>> Thanks for your review, I will add this to the patch v3.
>>
>>>
>>> That's mostly unrelated to the remaining part of the series and can be
>>> picked up early.
>>
>> The implementation of subsequent patches depends on pmd_install().
>> So I am worried that if this patch is submitted as a separate patch,
>> subsequent patches will not be updated until this patch is merged.
>> What do you think?
> 
> Usually I tend to send cleanups out independently, and then just base 
> the other series on top of the other series.

LGTM, I will submit [PATCH v2 1/9] and [PATCH v2 2/9] as a separate
patch series.

> 
> I'll have some more comments in reply to v2. It's fairly hard to revie > because you do a lot of complicated stuff in only a handful of patches
> :) I'll try to think of something reasonable on how to split this up to 
> make it easier to digest.
> 

Thank you very much, and I look forward to your suggestions. At the same
time, I'll also find ways to organize the code more clearly and
concisely, and add documentation to explain the new APIs.

Thanks,
Qi
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index ce8fc0fd6d6e..57e48217bd71 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2471,6 +2471,7 @@  static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
 	return ptl;
 }
 
+extern void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
 extern void __init pagecache_init(void);
 extern void __init free_area_init_memoryless_node(int nid);
 extern void free_initmem(void);
diff --git a/mm/filemap.c b/mm/filemap.c
index 53913fced7ae..9f773059c6dc 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3210,15 +3210,8 @@  static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page)
 	    }
 	}
 
-	if (pmd_none(*vmf->pmd)) {
-		vmf->ptl = pmd_lock(mm, vmf->pmd);
-		if (likely(pmd_none(*vmf->pmd))) {
-			mm_inc_nr_ptes(mm);
-			pmd_populate(mm, vmf->pmd, vmf->prealloc_pte);
-			vmf->prealloc_pte = NULL;
-		}
-		spin_unlock(vmf->ptl);
-	}
+	if (pmd_none(*vmf->pmd))
+		pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
 
 	/* See comment in handle_pte_fault() */
 	if (pmd_devmap_trans_unstable(vmf->pmd)) {
diff --git a/mm/memory.c b/mm/memory.c
index 39e7a1495c3c..ef7b1762e996 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -433,9 +433,20 @@  void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
 	}
 }
 
+void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
+{
+	spinlock_t *ptl = pmd_lock(mm, pmd);
+
+	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
+		mm_inc_nr_ptes(mm);
+		pmd_populate(mm, pmd, *pte);
+		*pte = NULL;
+	}
+	spin_unlock(ptl);
+}
+
 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
 {
-	spinlock_t *ptl;
 	pgtable_t new = pte_alloc_one(mm);
 	if (!new)
 		return -ENOMEM;
@@ -455,13 +466,7 @@  int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
 	 */
 	smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
 
-	ptl = pmd_lock(mm, pmd);
-	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
-		mm_inc_nr_ptes(mm);
-		pmd_populate(mm, pmd, new);
-		new = NULL;
-	}
-	spin_unlock(ptl);
+	pmd_install(mm, pmd, &new);
 	if (new)
 		pte_free(mm, new);
 	return 0;
@@ -4027,17 +4032,10 @@  vm_fault_t finish_fault(struct vm_fault *vmf)
 				return ret;
 		}
 
-		if (vmf->prealloc_pte) {
-			vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
-			if (likely(pmd_none(*vmf->pmd))) {
-				mm_inc_nr_ptes(vma->vm_mm);
-				pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
-				vmf->prealloc_pte = NULL;
-			}
-			spin_unlock(vmf->ptl);
-		} else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
+		if (vmf->prealloc_pte)
+			pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
+		else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
 			return VM_FAULT_OOM;
-		}
 	}
 
 	/* See comment in handle_pte_fault() */