diff mbox series

[v2,13/13] RISC-V: KVM: Use NACL HFENCEs for KVM request based HFENCEs

Message ID 20241020194734.58686-14-apatel@ventanamicro.com (mailing list archive)
State Handled Elsewhere, archived
Headers show
Series Accelerate KVM RISC-V when running as a guest | expand

Checks

Context Check Description
conchuod/vmtest-fixes-PR fail merge-conflict

Commit Message

Anup Patel Oct. 20, 2024, 7:47 p.m. UTC
When running under some other hypervisor, use SBI NACL based HFENCEs
for TLB shoot-down via KVM requests. This makes HFENCEs faster whenever
SBI nested acceleration is available.

Signed-off-by: Anup Patel <apatel@ventanamicro.com>
---
 arch/riscv/kvm/tlb.c | 57 +++++++++++++++++++++++++++++++-------------
 1 file changed, 40 insertions(+), 17 deletions(-)

Comments

Atish Patra Oct. 24, 2024, 11:56 p.m. UTC | #1
On Sun, Oct 20, 2024 at 12:48 PM Anup Patel <apatel@ventanamicro.com> wrote:
>
> When running under some other hypervisor, use SBI NACL based HFENCEs
> for TLB shoot-down via KVM requests. This makes HFENCEs faster whenever
> SBI nested acceleration is available.
>
> Signed-off-by: Anup Patel <apatel@ventanamicro.com>
> ---
>  arch/riscv/kvm/tlb.c | 57 +++++++++++++++++++++++++++++++-------------
>  1 file changed, 40 insertions(+), 17 deletions(-)
>
> diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c
> index 23c0e82b5103..2f91ea5f8493 100644
> --- a/arch/riscv/kvm/tlb.c
> +++ b/arch/riscv/kvm/tlb.c
> @@ -14,6 +14,7 @@
>  #include <asm/csr.h>
>  #include <asm/cpufeature.h>
>  #include <asm/insn-def.h>
> +#include <asm/kvm_nacl.h>
>
>  #define has_svinval()  riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)
>
> @@ -186,18 +187,24 @@ void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)
>
>  void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu)
>  {
> -       struct kvm_vmid *vmid;
> +       struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
> +       unsigned long vmid = READ_ONCE(v->vmid);
>
> -       vmid = &vcpu->kvm->arch.vmid;
> -       kvm_riscv_local_hfence_gvma_vmid_all(READ_ONCE(vmid->vmid));
> +       if (kvm_riscv_nacl_available())
> +               nacl_hfence_gvma_vmid_all(nacl_shmem(), vmid);
> +       else
> +               kvm_riscv_local_hfence_gvma_vmid_all(vmid);
>  }
>
>  void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu)
>  {
> -       struct kvm_vmid *vmid;
> +       struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
> +       unsigned long vmid = READ_ONCE(v->vmid);
>
> -       vmid = &vcpu->kvm->arch.vmid;
> -       kvm_riscv_local_hfence_vvma_all(READ_ONCE(vmid->vmid));
> +       if (kvm_riscv_nacl_available())
> +               nacl_hfence_vvma_all(nacl_shmem(), vmid);
> +       else
> +               kvm_riscv_local_hfence_vvma_all(vmid);
>  }
>
>  static bool vcpu_hfence_dequeue(struct kvm_vcpu *vcpu,
> @@ -251,6 +258,7 @@ static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu,
>
>  void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
>  {
> +       unsigned long vmid;
>         struct kvm_riscv_hfence d = { 0 };
>         struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
>
> @@ -259,26 +267,41 @@ void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
>                 case KVM_RISCV_HFENCE_UNKNOWN:
>                         break;
>                 case KVM_RISCV_HFENCE_GVMA_VMID_GPA:
> -                       kvm_riscv_local_hfence_gvma_vmid_gpa(
> -                                               READ_ONCE(v->vmid),
> -                                               d.addr, d.size, d.order);
> +                       vmid = READ_ONCE(v->vmid);
> +                       if (kvm_riscv_nacl_available())
> +                               nacl_hfence_gvma_vmid(nacl_shmem(), vmid,
> +                                                     d.addr, d.size, d.order);
> +                       else
> +                               kvm_riscv_local_hfence_gvma_vmid_gpa(vmid, d.addr,
> +                                                                    d.size, d.order);
>                         break;
>                 case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
>                         kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
> -                       kvm_riscv_local_hfence_vvma_asid_gva(
> -                                               READ_ONCE(v->vmid), d.asid,
> -                                               d.addr, d.size, d.order);
> +                       vmid = READ_ONCE(v->vmid);
> +                       if (kvm_riscv_nacl_available())
> +                               nacl_hfence_vvma_asid(nacl_shmem(), vmid, d.asid,
> +                                                     d.addr, d.size, d.order);
> +                       else
> +                               kvm_riscv_local_hfence_vvma_asid_gva(vmid, d.asid, d.addr,
> +                                                                    d.size, d.order);
>                         break;
>                 case KVM_RISCV_HFENCE_VVMA_ASID_ALL:
>                         kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
> -                       kvm_riscv_local_hfence_vvma_asid_all(
> -                                               READ_ONCE(v->vmid), d.asid);
> +                       vmid = READ_ONCE(v->vmid);
> +                       if (kvm_riscv_nacl_available())
> +                               nacl_hfence_vvma_asid_all(nacl_shmem(), vmid, d.asid);
> +                       else
> +                               kvm_riscv_local_hfence_vvma_asid_all(vmid, d.asid);
>                         break;
>                 case KVM_RISCV_HFENCE_VVMA_GVA:
>                         kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
> -                       kvm_riscv_local_hfence_vvma_gva(
> -                                               READ_ONCE(v->vmid),
> -                                               d.addr, d.size, d.order);
> +                       vmid = READ_ONCE(v->vmid);
> +                       if (kvm_riscv_nacl_available())
> +                               nacl_hfence_vvma(nacl_shmem(), vmid,
> +                                                d.addr, d.size, d.order);
> +                       else
> +                               kvm_riscv_local_hfence_vvma_gva(vmid, d.addr,
> +                                                               d.size, d.order);
>                         break;
>                 default:
>                         break;
> --
> 2.43.0
>

Reviewed-by: Atish Patra <atishp@rivosinc.com>
diff mbox series

Patch

diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c
index 23c0e82b5103..2f91ea5f8493 100644
--- a/arch/riscv/kvm/tlb.c
+++ b/arch/riscv/kvm/tlb.c
@@ -14,6 +14,7 @@ 
 #include <asm/csr.h>
 #include <asm/cpufeature.h>
 #include <asm/insn-def.h>
+#include <asm/kvm_nacl.h>
 
 #define has_svinval()	riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)
 
@@ -186,18 +187,24 @@  void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)
 
 void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu)
 {
-	struct kvm_vmid *vmid;
+	struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
+	unsigned long vmid = READ_ONCE(v->vmid);
 
-	vmid = &vcpu->kvm->arch.vmid;
-	kvm_riscv_local_hfence_gvma_vmid_all(READ_ONCE(vmid->vmid));
+	if (kvm_riscv_nacl_available())
+		nacl_hfence_gvma_vmid_all(nacl_shmem(), vmid);
+	else
+		kvm_riscv_local_hfence_gvma_vmid_all(vmid);
 }
 
 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu)
 {
-	struct kvm_vmid *vmid;
+	struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
+	unsigned long vmid = READ_ONCE(v->vmid);
 
-	vmid = &vcpu->kvm->arch.vmid;
-	kvm_riscv_local_hfence_vvma_all(READ_ONCE(vmid->vmid));
+	if (kvm_riscv_nacl_available())
+		nacl_hfence_vvma_all(nacl_shmem(), vmid);
+	else
+		kvm_riscv_local_hfence_vvma_all(vmid);
 }
 
 static bool vcpu_hfence_dequeue(struct kvm_vcpu *vcpu,
@@ -251,6 +258,7 @@  static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu,
 
 void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
 {
+	unsigned long vmid;
 	struct kvm_riscv_hfence d = { 0 };
 	struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
 
@@ -259,26 +267,41 @@  void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
 		case KVM_RISCV_HFENCE_UNKNOWN:
 			break;
 		case KVM_RISCV_HFENCE_GVMA_VMID_GPA:
-			kvm_riscv_local_hfence_gvma_vmid_gpa(
-						READ_ONCE(v->vmid),
-						d.addr, d.size, d.order);
+			vmid = READ_ONCE(v->vmid);
+			if (kvm_riscv_nacl_available())
+				nacl_hfence_gvma_vmid(nacl_shmem(), vmid,
+						      d.addr, d.size, d.order);
+			else
+				kvm_riscv_local_hfence_gvma_vmid_gpa(vmid, d.addr,
+								     d.size, d.order);
 			break;
 		case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
 			kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
-			kvm_riscv_local_hfence_vvma_asid_gva(
-						READ_ONCE(v->vmid), d.asid,
-						d.addr, d.size, d.order);
+			vmid = READ_ONCE(v->vmid);
+			if (kvm_riscv_nacl_available())
+				nacl_hfence_vvma_asid(nacl_shmem(), vmid, d.asid,
+						      d.addr, d.size, d.order);
+			else
+				kvm_riscv_local_hfence_vvma_asid_gva(vmid, d.asid, d.addr,
+								     d.size, d.order);
 			break;
 		case KVM_RISCV_HFENCE_VVMA_ASID_ALL:
 			kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
-			kvm_riscv_local_hfence_vvma_asid_all(
-						READ_ONCE(v->vmid), d.asid);
+			vmid = READ_ONCE(v->vmid);
+			if (kvm_riscv_nacl_available())
+				nacl_hfence_vvma_asid_all(nacl_shmem(), vmid, d.asid);
+			else
+				kvm_riscv_local_hfence_vvma_asid_all(vmid, d.asid);
 			break;
 		case KVM_RISCV_HFENCE_VVMA_GVA:
 			kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
-			kvm_riscv_local_hfence_vvma_gva(
-						READ_ONCE(v->vmid),
-						d.addr, d.size, d.order);
+			vmid = READ_ONCE(v->vmid);
+			if (kvm_riscv_nacl_available())
+				nacl_hfence_vvma(nacl_shmem(), vmid,
+						 d.addr, d.size, d.order);
+			else
+				kvm_riscv_local_hfence_vvma_gva(vmid, d.addr,
+								d.size, d.order);
 			break;
 		default:
 			break;