diff mbox series

[RFC,V1,08/11] riscv: Apply Svnapot for base page mapping

Message ID 20231123065708.91345-9-luxu.kernel@bytedance.com (mailing list archive)
State RFC
Headers show
Series riscv: Introduce 64K base page | expand

Checks

Context Check Description
conchuod/vmtest-for-next-PR fail PR summary
conchuod/patch-8-test-1 fail .github/scripts/patches/build_rv32_defconfig.sh
conchuod/patch-8-test-2 success .github/scripts/patches/build_rv64_clang_allmodconfig.sh
conchuod/patch-8-test-3 success .github/scripts/patches/build_rv64_gcc_allmodconfig.sh
conchuod/patch-8-test-4 success .github/scripts/patches/build_rv64_nommu_k210_defconfig.sh
conchuod/patch-8-test-5 success .github/scripts/patches/build_rv64_nommu_virt_defconfig.sh
conchuod/patch-8-test-6 warning .github/scripts/patches/checkpatch.sh
conchuod/patch-8-test-7 success .github/scripts/patches/dtb_warn_rv64.sh
conchuod/patch-8-test-8 success .github/scripts/patches/header_inline.sh
conchuod/patch-8-test-9 success .github/scripts/patches/kdoc.sh
conchuod/patch-8-test-10 success .github/scripts/patches/module_param.sh
conchuod/patch-8-test-11 success .github/scripts/patches/verify_fixes.sh
conchuod/patch-8-test-12 success .github/scripts/patches/verify_signedoff.sh

Commit Message

Xu Lu Nov. 23, 2023, 6:57 a.m. UTC
The Svnapot extension on RISC-V is like contiguous PTE on ARM64. It
allows ptes of a naturally aligned power-of 2 (NAPOT) memory range be
encoded in the same format to save the TLB space.

This commit applies Svnapot for each base page's mapping. This commit is
the key to achieving larger base page's performance optimization.

Signed-off-by: Xu Lu <luxu.kernel@bytedance.com>
---
 arch/riscv/include/asm/pgtable.h | 34 +++++++++++++++++++++++++++-----
 1 file changed, 29 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 56366f07985d..803dc5fb6314 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -230,6 +230,16 @@  static __always_inline unsigned long __pte_napot(unsigned long pteval)
 	return pteval & _PAGE_NAPOT;
 }
 
+static __always_inline unsigned long __pte_mknapot(unsigned long pteval,
+		unsigned int order)
+{
+	int pos = order - 1 + _PAGE_PFN_SHIFT;
+	unsigned long napot_bit = BIT(pos);
+	unsigned long napot_mask = ~GENMASK(pos, _PAGE_PFN_SHIFT);
+
+	return (pteval & napot_mask) | napot_bit | _PAGE_NAPOT;
+}
+
 static inline pte_t __pte(unsigned long pteval)
 {
 	pte_t pte;
@@ -348,13 +358,11 @@  static inline unsigned long pte_napot(pte_t pte)
 	return __pte_napot(pte_val(pte));
 }
 
-static inline pte_t pte_mknapot(pte_t pte, unsigned int order)
+static inline pte_t pte_mknapot(pte_t pte, unsigned int page_order)
 {
-	int pos = order - 1 + _PAGE_PFN_SHIFT;
-	unsigned long napot_bit = BIT(pos);
-	unsigned long napot_mask = ~GENMASK(pos, _PAGE_PFN_SHIFT);
+	unsigned int hw_page_order = page_order + (PAGE_SHIFT - HW_PAGE_SHIFT);
 
-	return __pte((pte_val(pte) & napot_mask) | napot_bit | _PAGE_NAPOT);
+	return __pte(__pte_mknapot(pte_val(pte), hw_page_order));
 }
 
 #else
@@ -366,6 +374,11 @@  static inline unsigned long pte_napot(pte_t pte)
 	return 0;
 }
 
+static inline pte_t pte_mknapot(pte_t pte, unsigned int page_order)
+{
+	return pte;
+}
+
 #endif /* CONFIG_RISCV_ISA_SVNAPOT */
 
 /* Yields the page frame number (PFN) of a page table entry */
@@ -585,6 +598,17 @@  static inline int pte_same(pte_t pte_a, pte_t pte_b)
  */
 static inline void set_pte(pte_t *ptep, pte_t pteval)
 {
+	unsigned long order;
+
+	/*
+	 * has_svnapot() always return false before riscv_isa is initialized.
+	 */
+	if (has_svnapot() && pte_present(pteval) && !pte_napot(pteval)) {
+		for_each_napot_order(order) {
+			if (napot_cont_shift(order) == PAGE_SHIFT)
+				pteval = pte_mknapot(pteval, order);
+		}
+	}
 	*ptep = pteval;
 }