diff mbox series

mm/thp: make set_huge_zero_page() return void

Message ID 20190930195528.32553-1-rcampbell@nvidia.com (mailing list archive)
State New, archived
Headers show
Series mm/thp: make set_huge_zero_page() return void | expand

Commit Message

Ralph Campbell Sept. 30, 2019, 7:55 p.m. UTC
The return value from set_huge_zero_page() is never checked so simplify
the code by making it return void.

Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
---
 mm/huge_memory.c | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

Comments

Kirill A. Shutemov Oct. 1, 2019, 11:52 a.m. UTC | #1
On Mon, Sep 30, 2019 at 12:55:28PM -0700, Ralph Campbell wrote:
> The return value from set_huge_zero_page() is never checked so simplify
> the code by making it return void.
> 
> Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
> ---
>  mm/huge_memory.c | 6 ++----
>  1 file changed, 2 insertions(+), 4 deletions(-)
> 
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index c5cb6dcd6c69..6cf0ee65538d 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -686,20 +686,18 @@ static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
>  }
>  
>  /* Caller must hold page table lock. */
> -static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
> +static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
>  		struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
>  		struct page *zero_page)
>  {
>  	pmd_t entry;
> -	if (!pmd_none(*pmd))
> -		return false;
> +

Wat? So you just bindly overwrite whatever is there?

NAK.

>  	entry = mk_pmd(zero_page, vma->vm_page_prot);
>  	entry = pmd_mkhuge(entry);
>  	if (pgtable)
>  		pgtable_trans_huge_deposit(mm, pmd, pgtable);
>  	set_pmd_at(mm, haddr, pmd, entry);
>  	mm_inc_nr_ptes(mm);
> -	return true;
>  }
>  
>  vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
> -- 
> 2.20.1
> 
>
Ralph Campbell Oct. 1, 2019, 5:54 p.m. UTC | #2
On 10/1/19 4:52 AM, Kirill A. Shutemov wrote:
> On Mon, Sep 30, 2019 at 12:55:28PM -0700, Ralph Campbell wrote:
>> The return value from set_huge_zero_page() is never checked so simplify
>> the code by making it return void.
>>
>> Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
>> ---
>>   mm/huge_memory.c | 6 ++----
>>   1 file changed, 2 insertions(+), 4 deletions(-)
>>
>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>> index c5cb6dcd6c69..6cf0ee65538d 100644
>> --- a/mm/huge_memory.c
>> +++ b/mm/huge_memory.c
>> @@ -686,20 +686,18 @@ static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
>>   }
>>   
>>   /* Caller must hold page table lock. */
>> -static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
>> +static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
>>   		struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
>>   		struct page *zero_page)
>>   {
>>   	pmd_t entry;
>> -	if (!pmd_none(*pmd))
>> -		return false;
>> +
> 
> Wat? So you just bindly overwrite whatever is there?
> 
> NAK.

No, the only two places it is called from are 
do_huge_pmd_anonymous_page() which has the pmd_lock()
and already checked pmd_none() is true and
copy_huge_pmd() where the destination page table
is being filled in with no other threads accessing it.
I guess I should have put this analysis in the changelog.

Still, I guess putting a pmd_none() check in copy_huge_pmd()
as future proofing new callers of copy_huge_pmd() would
make sense.

>>   	entry = mk_pmd(zero_page, vma->vm_page_prot);
>>   	entry = pmd_mkhuge(entry);
>>   	if (pgtable)
>>   		pgtable_trans_huge_deposit(mm, pmd, pgtable);
>>   	set_pmd_at(mm, haddr, pmd, entry);
>>   	mm_inc_nr_ptes(mm);
>> -	return true;
>>   }
>>   
>>   vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
>> -- 
>> 2.20.1
>>
>>
>
diff mbox series

Patch

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c5cb6dcd6c69..6cf0ee65538d 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -686,20 +686,18 @@  static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
 }
 
 /* Caller must hold page table lock. */
-static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
+static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
 		struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
 		struct page *zero_page)
 {
 	pmd_t entry;
-	if (!pmd_none(*pmd))
-		return false;
+
 	entry = mk_pmd(zero_page, vma->vm_page_prot);
 	entry = pmd_mkhuge(entry);
 	if (pgtable)
 		pgtable_trans_huge_deposit(mm, pmd, pgtable);
 	set_pmd_at(mm, haddr, pmd, entry);
 	mm_inc_nr_ptes(mm);
-	return true;
 }
 
 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)