diff mbox series

[V3,05/14] KVM: MIPS: Use lddir/ldpte instructions to lookup gpa_mm.pgd

Message ID 1588500367-1056-6-git-send-email-chenhc@lemote.com (mailing list archive)
State New, archived
Headers show
Series KVM: MIPS: Add Loongson-3 support (Host Side) | expand

Commit Message

Huacai Chen May 3, 2020, 10:05 a.m. UTC
Loongson-3 can use lddir/ldpte instuctions to accelerate page table
walking, so use them to lookup gpa_mm.pgd.

Signed-off-by: Huacai Chen <chenhc@lemote.com>
Co-developed-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
---
 arch/mips/kvm/entry.c | 19 ++++++++++++++++++-
 1 file changed, 18 insertions(+), 1 deletion(-)

Comments

Aleksandar Markovic May 8, 2020, 10:46 a.m. UTC | #1
нед, 3. мај 2020. у 12:11 Huacai Chen <chenhc@lemote.com> је написао/ла:
>
> Loongson-3 can use lddir/ldpte instuctions to accelerate page table
> walking, so use them to lookup gpa_mm.pgd.
>
> Signed-off-by: Huacai Chen <chenhc@lemote.com>
> Co-developed-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
> ---
>  arch/mips/kvm/entry.c | 19 ++++++++++++++++++-
>  1 file changed, 18 insertions(+), 1 deletion(-)
>
> diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
> index 16e1c93..fd71694 100644
> --- a/arch/mips/kvm/entry.c
> +++ b/arch/mips/kvm/entry.c
> @@ -56,6 +56,7 @@
>  #define C0_BADVADDR    8, 0
>  #define C0_BADINSTR    8, 1
>  #define C0_BADINSTRP   8, 2
> +#define C0_PGD         9, 7
>  #define C0_ENTRYHI     10, 0
>  #define C0_GUESTCTL1   10, 4
>  #define C0_STATUS      12, 0
> @@ -307,7 +308,10 @@ static void *kvm_mips_build_enter_guest(void *addr)
>
>  #ifdef CONFIG_KVM_MIPS_VZ
>         /* Save normal linux process pgd (VZ guarantees pgd_reg is set) */
> -       UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
> +       if (cpu_has_ldpte)
> +               UASM_i_MFC0(&p, K0, C0_PWBASE);
> +       else
> +               UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
>         UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1);
>
>         /*
> @@ -469,8 +473,10 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
>         u32 *p = addr;
>         struct uasm_label labels[2];
>         struct uasm_reloc relocs[2];
> +#ifndef CONFIG_CPU_LOONGSON64
>         struct uasm_label *l = labels;
>         struct uasm_reloc *r = relocs;
> +#endif
>
>         memset(labels, 0, sizeof(labels));
>         memset(relocs, 0, sizeof(relocs));
> @@ -490,6 +496,16 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
>          */
>         preempt_disable();
>
> +#ifdef CONFIG_CPU_LOONGSON64
> +       UASM_i_MFC0(&p, K1, C0_PGD);
> +       uasm_i_lddir(&p, K0, K1, 3);  /* global page dir */
> +#ifndef __PAGETABLE_PMD_FOLDED
> +       uasm_i_lddir(&p, K1, K0, 1);  /* middle page dir */
> +#endif
> +       uasm_i_ldpte(&p, K1, 0);      /* even */
> +       uasm_i_ldpte(&p, K1, 1);      /* odd */
> +       uasm_i_tlbwr(&p);
> +#else
>         /*
>          * Now for the actual refill bit. A lot of this can be common with the
>          * Linux TLB refill handler, however we don't need to handle so many
> @@ -512,6 +528,7 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
>         build_get_ptep(&p, K0, K1);
>         build_update_entries(&p, K0, K1);
>         build_tlb_write_entry(&p, &l, &r, tlb_random);
> +#endif
>
>         preempt_enable();
>
> --
> 2.7.0
>
Reviewed-by: Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>
diff mbox series

Patch

diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
index 16e1c93..fd71694 100644
--- a/arch/mips/kvm/entry.c
+++ b/arch/mips/kvm/entry.c
@@ -56,6 +56,7 @@ 
 #define C0_BADVADDR	8, 0
 #define C0_BADINSTR	8, 1
 #define C0_BADINSTRP	8, 2
+#define C0_PGD		9, 7
 #define C0_ENTRYHI	10, 0
 #define C0_GUESTCTL1	10, 4
 #define C0_STATUS	12, 0
@@ -307,7 +308,10 @@  static void *kvm_mips_build_enter_guest(void *addr)
 
 #ifdef CONFIG_KVM_MIPS_VZ
 	/* Save normal linux process pgd (VZ guarantees pgd_reg is set) */
-	UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
+	if (cpu_has_ldpte)
+		UASM_i_MFC0(&p, K0, C0_PWBASE);
+	else
+		UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
 	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1);
 
 	/*
@@ -469,8 +473,10 @@  void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
 	u32 *p = addr;
 	struct uasm_label labels[2];
 	struct uasm_reloc relocs[2];
+#ifndef CONFIG_CPU_LOONGSON64
 	struct uasm_label *l = labels;
 	struct uasm_reloc *r = relocs;
+#endif
 
 	memset(labels, 0, sizeof(labels));
 	memset(relocs, 0, sizeof(relocs));
@@ -490,6 +496,16 @@  void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
 	 */
 	preempt_disable();
 
+#ifdef CONFIG_CPU_LOONGSON64
+	UASM_i_MFC0(&p, K1, C0_PGD);
+	uasm_i_lddir(&p, K0, K1, 3);  /* global page dir */
+#ifndef __PAGETABLE_PMD_FOLDED
+	uasm_i_lddir(&p, K1, K0, 1);  /* middle page dir */
+#endif
+	uasm_i_ldpte(&p, K1, 0);      /* even */
+	uasm_i_ldpte(&p, K1, 1);      /* odd */
+	uasm_i_tlbwr(&p);
+#else
 	/*
 	 * Now for the actual refill bit. A lot of this can be common with the
 	 * Linux TLB refill handler, however we don't need to handle so many
@@ -512,6 +528,7 @@  void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
 	build_get_ptep(&p, K0, K1);
 	build_update_entries(&p, K0, K1);
 	build_tlb_write_entry(&p, &l, &r, tlb_random);
+#endif
 
 	preempt_enable();