diff mbox series

[v4,3/4] mm/memory.c: Add memory read privilege on page fault handling

Message ID 1589882610-7291-3-git-send-email-maobibo@loongson.cn (mailing list archive)
State New, archived
Headers show
Series [v4,1/4] MIPS: Do not flush tlb page when updating PTE entry | expand

Commit Message

bibo mao May 19, 2020, 10:03 a.m. UTC
Here add pte_sw_mkyoung function to make page readable on MIPS
platform during page fault handling. This patch improves page
fault latency about 10% on my MIPS machine with lmbench
lat_pagefault case.

It is noop function on other arches, there is no negative
influence on those architectures.

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
---
 arch/mips/include/asm/pgtable.h |  2 ++
 include/asm-generic/pgtable.h   | 15 +++++++++++++++
 mm/memory.c                     |  3 +++
 3 files changed, 20 insertions(+)

Comments

Andrew Morton May 20, 2020, 1:30 a.m. UTC | #1
On Tue, 19 May 2020 18:03:29 +0800 Bibo Mao <maobibo@loongson.cn> wrote:

> Here add pte_sw_mkyoung function to make page readable on MIPS
> platform during page fault handling. This patch improves page
> fault latency about 10% on my MIPS machine with lmbench
> lat_pagefault case.
> 
> It is noop function on other arches, there is no negative
> influence on those architectures.
> 
> --- a/arch/mips/include/asm/pgtable.h
> +++ b/arch/mips/include/asm/pgtable.h
> @@ -414,6 +414,8 @@ static inline pte_t pte_mkyoung(pte_t pte)
>  	return pte;
>  }
>  
> +#define pte_sw_mkyoung	pte_mkyoung
> +
>  #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
>  static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
>  
> --- a/include/asm-generic/pgtable.h
> +++ b/include/asm-generic/pgtable.h
> @@ -227,6 +227,21 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
>  }
>  #endif
>  
> +/*
> + * On some architectures hardware does not set page access bit when accessing
> + * memory page, it is responsibilty of software setting this bit. It brings
> + * out extra page fault penalty to track page access bit. For optimization page
> + * access bit can be set during all page fault flow on these arches.
> + * To be differentiate with macro pte_mkyoung, this macro is used on platforms
> + * where software maintains page access bit.
> + */
> +#ifndef pte_sw_mkyoung
> +static inline pte_t pte_sw_mkyoung(pte_t pte)
> +{
> +	return pte;
> +}
> +#endif

Yup, that's neat enough.  Thanks for making this change.  It looks like
all architectures include asm-generic/pgtable.h so that's fine.

It's conventional to add a

#define pte_sw_mkyoung pte_sw_mkyoung

immediately above the #endif there, so we can't try to implement
pte_sw_mkyoung() twice if this header gets included twice.  But the
header has #ifndef _ASM_GENERIC_PGTABLE_H around the whole thing so
that should be OK.
bibo mao May 20, 2020, 8:22 a.m. UTC | #2
On 05/20/2020 09:30 AM, Andrew Morton wrote:
> On Tue, 19 May 2020 18:03:29 +0800 Bibo Mao <maobibo@loongson.cn> wrote:
> 
>> Here add pte_sw_mkyoung function to make page readable on MIPS
>> platform during page fault handling. This patch improves page
>> fault latency about 10% on my MIPS machine with lmbench
>> lat_pagefault case.
>>
>> It is noop function on other arches, there is no negative
>> influence on those architectures.
>>
>> --- a/arch/mips/include/asm/pgtable.h
>> +++ b/arch/mips/include/asm/pgtable.h
>> @@ -414,6 +414,8 @@ static inline pte_t pte_mkyoung(pte_t pte)
>>  	return pte;
>>  }
>>  
>> +#define pte_sw_mkyoung	pte_mkyoung
>> +
>>  #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
>>  static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
>>  
>> --- a/include/asm-generic/pgtable.h
>> +++ b/include/asm-generic/pgtable.h
>> @@ -227,6 +227,21 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
>>  }
>>  #endif
>>  
>> +/*
>> + * On some architectures hardware does not set page access bit when accessing
>> + * memory page, it is responsibilty of software setting this bit. It brings
>> + * out extra page fault penalty to track page access bit. For optimization page
>> + * access bit can be set during all page fault flow on these arches.
>> + * To be differentiate with macro pte_mkyoung, this macro is used on platforms
>> + * where software maintains page access bit.
>> + */
>> +#ifndef pte_sw_mkyoung
>> +static inline pte_t pte_sw_mkyoung(pte_t pte)
>> +{
>> +	return pte;
>> +}
>> +#endif
> 
> Yup, that's neat enough.  Thanks for making this change.  It looks like
> all architectures include asm-generic/pgtable.h so that's fine.
> 
> It's conventional to add a
> 
> #define pte_sw_mkyoung pte_sw_mkyoung
> 
> immediately above the #endif there, so we can't try to implement
> pte_sw_mkyoung() twice if this header gets included twice.  But the
> header has #ifndef _ASM_GENERIC_PGTABLE_H around the whole thing so
> that should be OK.

Sure, will do, and thanks for your kindly help and guidance
diff mbox series

Patch

diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 0d625c2..755d534 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -414,6 +414,8 @@  static inline pte_t pte_mkyoung(pte_t pte)
 	return pte;
 }
 
+#define pte_sw_mkyoung	pte_mkyoung
+
 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
 
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 329b8c8..2542ef1 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -227,6 +227,21 @@  static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
 }
 #endif
 
+/*
+ * On some architectures hardware does not set page access bit when accessing
+ * memory page, it is responsibilty of software setting this bit. It brings
+ * out extra page fault penalty to track page access bit. For optimization page
+ * access bit can be set during all page fault flow on these arches.
+ * To be differentiate with macro pte_mkyoung, this macro is used on platforms
+ * where software maintains page access bit.
+ */
+#ifndef pte_sw_mkyoung
+static inline pte_t pte_sw_mkyoung(pte_t pte)
+{
+	return pte;
+}
+#endif
+
 #ifndef pte_savedwrite
 #define pte_savedwrite pte_write
 #endif
diff --git a/mm/memory.c b/mm/memory.c
index 2eb59a9..d9700b1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2704,6 +2704,7 @@  static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 		}
 		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
 		entry = mk_pte(new_page, vma->vm_page_prot);
+		entry = pte_sw_mkyoung(entry);
 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 		/*
 		 * Clear the pte entry and flush it first, before updating the
@@ -3378,6 +3379,7 @@  static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 	__SetPageUptodate(page);
 
 	entry = mk_pte(page, vma->vm_page_prot);
+	entry = pte_sw_mkyoung(entry);
 	if (vma->vm_flags & VM_WRITE)
 		entry = pte_mkwrite(pte_mkdirty(entry));
 
@@ -3660,6 +3662,7 @@  vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
 
 	flush_icache_page(vma, page);
 	entry = mk_pte(page, vma->vm_page_prot);
+	entry = pte_sw_mkyoung(entry);
 	if (write)
 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 	/* copy-on-write page */