Message ID | 20240719160913.342027-11-apatel@ventanamicro.com (mailing list archive) |
---|---|
State | Handled Elsewhere |
Headers | show |
Series | Accelerate KVM RISC-V when running as a guest | expand |
Context | Check | Description |
---|---|---|
conchuod/vmtest-fixes-PR | fail | merge-conflict |
On Fri, Jul 19, 2024 at 9:10 AM Anup Patel <apatel@ventanamicro.com> wrote: > > When running under some other hypervisor, prefer nacl_csr_xyz() > for accessing AIA CSRs in the run-loop. This makes CSR access > faster whenever SBI nested acceleration is available. > > Signed-off-by: Anup Patel <apatel@ventanamicro.com> > --- > arch/riscv/kvm/aia.c | 97 ++++++++++++++++++++++++++++---------------- > 1 file changed, 63 insertions(+), 34 deletions(-) > > diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c > index 8ffae0330c89..dcced4db7fe8 100644 > --- a/arch/riscv/kvm/aia.c > +++ b/arch/riscv/kvm/aia.c > @@ -16,6 +16,7 @@ > #include <linux/percpu.h> > #include <linux/spinlock.h> > #include <asm/cpufeature.h> > +#include <asm/kvm_nacl.h> > > struct aia_hgei_control { > raw_spinlock_t lock; > @@ -88,7 +89,7 @@ void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu) > struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; > > if (kvm_riscv_aia_available()) > - csr->vsieh = csr_read(CSR_VSIEH); > + csr->vsieh = ncsr_read(CSR_VSIEH); > } > #endif > > @@ -115,7 +116,7 @@ bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask) > > hgei = aia_find_hgei(vcpu); > if (hgei > 0) > - return !!(csr_read(CSR_HGEIP) & BIT(hgei)); > + return !!(ncsr_read(CSR_HGEIP) & BIT(hgei)); > > return false; > } > @@ -128,45 +129,73 @@ void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu) > return; > > #ifdef CONFIG_32BIT > - csr_write(CSR_HVIPH, vcpu->arch.aia_context.guest_csr.hviph); > + ncsr_write(CSR_HVIPH, vcpu->arch.aia_context.guest_csr.hviph); > #endif > - csr_write(CSR_HVICTL, aia_hvictl_value(!!(csr->hvip & BIT(IRQ_VS_EXT)))); > + ncsr_write(CSR_HVICTL, aia_hvictl_value(!!(csr->hvip & BIT(IRQ_VS_EXT)))); > } > > void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu) > { > struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; > + void *nsh; > > if (!kvm_riscv_aia_available()) > return; > > - csr_write(CSR_VSISELECT, csr->vsiselect); > - csr_write(CSR_HVIPRIO1, csr->hviprio1); > - csr_write(CSR_HVIPRIO2, csr->hviprio2); > + if (kvm_riscv_nacl_sync_csr_available()) { > + nsh = nacl_shmem(); > + nacl_csr_write(nsh, CSR_VSISELECT, csr->vsiselect); > + nacl_csr_write(nsh, CSR_HVIPRIO1, csr->hviprio1); > + nacl_csr_write(nsh, CSR_HVIPRIO2, csr->hviprio2); > +#ifdef CONFIG_32BIT > + nacl_csr_write(nsh, CSR_VSIEH, csr->vsieh); > + nacl_csr_write(nsh, CSR_HVIPH, csr->hviph); > + nacl_csr_write(nsh, CSR_HVIPRIO1H, csr->hviprio1h); > + nacl_csr_write(nsh, CSR_HVIPRIO2H, csr->hviprio2h); > +#endif > + } else { > + csr_write(CSR_VSISELECT, csr->vsiselect); > + csr_write(CSR_HVIPRIO1, csr->hviprio1); > + csr_write(CSR_HVIPRIO2, csr->hviprio2); > #ifdef CONFIG_32BIT > - csr_write(CSR_VSIEH, csr->vsieh); > - csr_write(CSR_HVIPH, csr->hviph); > - csr_write(CSR_HVIPRIO1H, csr->hviprio1h); > - csr_write(CSR_HVIPRIO2H, csr->hviprio2h); > + csr_write(CSR_VSIEH, csr->vsieh); > + csr_write(CSR_HVIPH, csr->hviph); > + csr_write(CSR_HVIPRIO1H, csr->hviprio1h); > + csr_write(CSR_HVIPRIO2H, csr->hviprio2h); > #endif > + } > } > > void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu) > { > struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; > + void *nsh; > > if (!kvm_riscv_aia_available()) > return; > > - csr->vsiselect = csr_read(CSR_VSISELECT); > - csr->hviprio1 = csr_read(CSR_HVIPRIO1); > - csr->hviprio2 = csr_read(CSR_HVIPRIO2); > + if (kvm_riscv_nacl_available()) { > + nsh = nacl_shmem(); > + csr->vsiselect = nacl_csr_read(nsh, CSR_VSISELECT); > + csr->hviprio1 = nacl_csr_read(nsh, CSR_HVIPRIO1); > + csr->hviprio2 = nacl_csr_read(nsh, CSR_HVIPRIO2); > #ifdef CONFIG_32BIT > - csr->vsieh = csr_read(CSR_VSIEH); > - csr->hviph = csr_read(CSR_HVIPH); > - csr->hviprio1h = csr_read(CSR_HVIPRIO1H); > - csr->hviprio2h = csr_read(CSR_HVIPRIO2H); > + csr->vsieh = nacl_csr_read(nsh, CSR_VSIEH); > + csr->hviph = nacl_csr_read(nsh, CSR_HVIPH); > + csr->hviprio1h = nacl_csr_read(nsh, CSR_HVIPRIO1H); > + csr->hviprio2h = nacl_csr_read(nsh, CSR_HVIPRIO2H); > #endif > + } else { > + csr->vsiselect = csr_read(CSR_VSISELECT); > + csr->hviprio1 = csr_read(CSR_HVIPRIO1); > + csr->hviprio2 = csr_read(CSR_HVIPRIO2); > +#ifdef CONFIG_32BIT > + csr->vsieh = csr_read(CSR_VSIEH); > + csr->hviph = csr_read(CSR_HVIPH); > + csr->hviprio1h = csr_read(CSR_HVIPRIO1H); > + csr->hviprio2h = csr_read(CSR_HVIPRIO2H); > +#endif > + } > } > > int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu, > @@ -250,20 +279,20 @@ static u8 aia_get_iprio8(struct kvm_vcpu *vcpu, unsigned int irq) > > switch (bitpos / BITS_PER_LONG) { > case 0: > - hviprio = csr_read(CSR_HVIPRIO1); > + hviprio = ncsr_read(CSR_HVIPRIO1); > break; > case 1: > #ifndef CONFIG_32BIT > - hviprio = csr_read(CSR_HVIPRIO2); > + hviprio = ncsr_read(CSR_HVIPRIO2); > break; > #else > - hviprio = csr_read(CSR_HVIPRIO1H); > + hviprio = ncsr_read(CSR_HVIPRIO1H); > break; > case 2: > - hviprio = csr_read(CSR_HVIPRIO2); > + hviprio = ncsr_read(CSR_HVIPRIO2); > break; > case 3: > - hviprio = csr_read(CSR_HVIPRIO2H); > + hviprio = ncsr_read(CSR_HVIPRIO2H); > break; > #endif > default: > @@ -283,20 +312,20 @@ static void aia_set_iprio8(struct kvm_vcpu *vcpu, unsigned int irq, u8 prio) > > switch (bitpos / BITS_PER_LONG) { > case 0: > - hviprio = csr_read(CSR_HVIPRIO1); > + hviprio = ncsr_read(CSR_HVIPRIO1); > break; > case 1: > #ifndef CONFIG_32BIT > - hviprio = csr_read(CSR_HVIPRIO2); > + hviprio = ncsr_read(CSR_HVIPRIO2); > break; > #else > - hviprio = csr_read(CSR_HVIPRIO1H); > + hviprio = ncsr_read(CSR_HVIPRIO1H); > break; > case 2: > - hviprio = csr_read(CSR_HVIPRIO2); > + hviprio = ncsr_read(CSR_HVIPRIO2); > break; > case 3: > - hviprio = csr_read(CSR_HVIPRIO2H); > + hviprio = ncsr_read(CSR_HVIPRIO2H); > break; > #endif > default: > @@ -308,20 +337,20 @@ static void aia_set_iprio8(struct kvm_vcpu *vcpu, unsigned int irq, u8 prio) > > switch (bitpos / BITS_PER_LONG) { > case 0: > - csr_write(CSR_HVIPRIO1, hviprio); > + ncsr_write(CSR_HVIPRIO1, hviprio); > break; > case 1: > #ifndef CONFIG_32BIT > - csr_write(CSR_HVIPRIO2, hviprio); > + ncsr_write(CSR_HVIPRIO2, hviprio); > break; > #else > - csr_write(CSR_HVIPRIO1H, hviprio); > + ncsr_write(CSR_HVIPRIO1H, hviprio); > break; > case 2: > - csr_write(CSR_HVIPRIO2, hviprio); > + ncsr_write(CSR_HVIPRIO2, hviprio); > break; > case 3: > - csr_write(CSR_HVIPRIO2H, hviprio); > + ncsr_write(CSR_HVIPRIO2H, hviprio); > break; > #endif > default: > @@ -377,7 +406,7 @@ int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num, > return KVM_INSN_ILLEGAL_TRAP; > > /* First try to emulate in kernel space */ > - isel = csr_read(CSR_VSISELECT) & ISELECT_MASK; > + isel = ncsr_read(CSR_VSISELECT) & ISELECT_MASK; > if (isel >= ISELECT_IPRIO0 && isel <= ISELECT_IPRIO15) > return aia_rmw_iprio(vcpu, isel, val, new_val, wr_mask); > else if (isel >= IMSIC_FIRST && isel <= IMSIC_LAST && > -- > 2.34.1 > Reviewed-by: Atish Patra <atishp@rivosinc.com>
diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c index 8ffae0330c89..dcced4db7fe8 100644 --- a/arch/riscv/kvm/aia.c +++ b/arch/riscv/kvm/aia.c @@ -16,6 +16,7 @@ #include <linux/percpu.h> #include <linux/spinlock.h> #include <asm/cpufeature.h> +#include <asm/kvm_nacl.h> struct aia_hgei_control { raw_spinlock_t lock; @@ -88,7 +89,7 @@ void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu) struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; if (kvm_riscv_aia_available()) - csr->vsieh = csr_read(CSR_VSIEH); + csr->vsieh = ncsr_read(CSR_VSIEH); } #endif @@ -115,7 +116,7 @@ bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask) hgei = aia_find_hgei(vcpu); if (hgei > 0) - return !!(csr_read(CSR_HGEIP) & BIT(hgei)); + return !!(ncsr_read(CSR_HGEIP) & BIT(hgei)); return false; } @@ -128,45 +129,73 @@ void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu) return; #ifdef CONFIG_32BIT - csr_write(CSR_HVIPH, vcpu->arch.aia_context.guest_csr.hviph); + ncsr_write(CSR_HVIPH, vcpu->arch.aia_context.guest_csr.hviph); #endif - csr_write(CSR_HVICTL, aia_hvictl_value(!!(csr->hvip & BIT(IRQ_VS_EXT)))); + ncsr_write(CSR_HVICTL, aia_hvictl_value(!!(csr->hvip & BIT(IRQ_VS_EXT)))); } void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu) { struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; + void *nsh; if (!kvm_riscv_aia_available()) return; - csr_write(CSR_VSISELECT, csr->vsiselect); - csr_write(CSR_HVIPRIO1, csr->hviprio1); - csr_write(CSR_HVIPRIO2, csr->hviprio2); + if (kvm_riscv_nacl_sync_csr_available()) { + nsh = nacl_shmem(); + nacl_csr_write(nsh, CSR_VSISELECT, csr->vsiselect); + nacl_csr_write(nsh, CSR_HVIPRIO1, csr->hviprio1); + nacl_csr_write(nsh, CSR_HVIPRIO2, csr->hviprio2); +#ifdef CONFIG_32BIT + nacl_csr_write(nsh, CSR_VSIEH, csr->vsieh); + nacl_csr_write(nsh, CSR_HVIPH, csr->hviph); + nacl_csr_write(nsh, CSR_HVIPRIO1H, csr->hviprio1h); + nacl_csr_write(nsh, CSR_HVIPRIO2H, csr->hviprio2h); +#endif + } else { + csr_write(CSR_VSISELECT, csr->vsiselect); + csr_write(CSR_HVIPRIO1, csr->hviprio1); + csr_write(CSR_HVIPRIO2, csr->hviprio2); #ifdef CONFIG_32BIT - csr_write(CSR_VSIEH, csr->vsieh); - csr_write(CSR_HVIPH, csr->hviph); - csr_write(CSR_HVIPRIO1H, csr->hviprio1h); - csr_write(CSR_HVIPRIO2H, csr->hviprio2h); + csr_write(CSR_VSIEH, csr->vsieh); + csr_write(CSR_HVIPH, csr->hviph); + csr_write(CSR_HVIPRIO1H, csr->hviprio1h); + csr_write(CSR_HVIPRIO2H, csr->hviprio2h); #endif + } } void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu) { struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; + void *nsh; if (!kvm_riscv_aia_available()) return; - csr->vsiselect = csr_read(CSR_VSISELECT); - csr->hviprio1 = csr_read(CSR_HVIPRIO1); - csr->hviprio2 = csr_read(CSR_HVIPRIO2); + if (kvm_riscv_nacl_available()) { + nsh = nacl_shmem(); + csr->vsiselect = nacl_csr_read(nsh, CSR_VSISELECT); + csr->hviprio1 = nacl_csr_read(nsh, CSR_HVIPRIO1); + csr->hviprio2 = nacl_csr_read(nsh, CSR_HVIPRIO2); #ifdef CONFIG_32BIT - csr->vsieh = csr_read(CSR_VSIEH); - csr->hviph = csr_read(CSR_HVIPH); - csr->hviprio1h = csr_read(CSR_HVIPRIO1H); - csr->hviprio2h = csr_read(CSR_HVIPRIO2H); + csr->vsieh = nacl_csr_read(nsh, CSR_VSIEH); + csr->hviph = nacl_csr_read(nsh, CSR_HVIPH); + csr->hviprio1h = nacl_csr_read(nsh, CSR_HVIPRIO1H); + csr->hviprio2h = nacl_csr_read(nsh, CSR_HVIPRIO2H); #endif + } else { + csr->vsiselect = csr_read(CSR_VSISELECT); + csr->hviprio1 = csr_read(CSR_HVIPRIO1); + csr->hviprio2 = csr_read(CSR_HVIPRIO2); +#ifdef CONFIG_32BIT + csr->vsieh = csr_read(CSR_VSIEH); + csr->hviph = csr_read(CSR_HVIPH); + csr->hviprio1h = csr_read(CSR_HVIPRIO1H); + csr->hviprio2h = csr_read(CSR_HVIPRIO2H); +#endif + } } int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu, @@ -250,20 +279,20 @@ static u8 aia_get_iprio8(struct kvm_vcpu *vcpu, unsigned int irq) switch (bitpos / BITS_PER_LONG) { case 0: - hviprio = csr_read(CSR_HVIPRIO1); + hviprio = ncsr_read(CSR_HVIPRIO1); break; case 1: #ifndef CONFIG_32BIT - hviprio = csr_read(CSR_HVIPRIO2); + hviprio = ncsr_read(CSR_HVIPRIO2); break; #else - hviprio = csr_read(CSR_HVIPRIO1H); + hviprio = ncsr_read(CSR_HVIPRIO1H); break; case 2: - hviprio = csr_read(CSR_HVIPRIO2); + hviprio = ncsr_read(CSR_HVIPRIO2); break; case 3: - hviprio = csr_read(CSR_HVIPRIO2H); + hviprio = ncsr_read(CSR_HVIPRIO2H); break; #endif default: @@ -283,20 +312,20 @@ static void aia_set_iprio8(struct kvm_vcpu *vcpu, unsigned int irq, u8 prio) switch (bitpos / BITS_PER_LONG) { case 0: - hviprio = csr_read(CSR_HVIPRIO1); + hviprio = ncsr_read(CSR_HVIPRIO1); break; case 1: #ifndef CONFIG_32BIT - hviprio = csr_read(CSR_HVIPRIO2); + hviprio = ncsr_read(CSR_HVIPRIO2); break; #else - hviprio = csr_read(CSR_HVIPRIO1H); + hviprio = ncsr_read(CSR_HVIPRIO1H); break; case 2: - hviprio = csr_read(CSR_HVIPRIO2); + hviprio = ncsr_read(CSR_HVIPRIO2); break; case 3: - hviprio = csr_read(CSR_HVIPRIO2H); + hviprio = ncsr_read(CSR_HVIPRIO2H); break; #endif default: @@ -308,20 +337,20 @@ static void aia_set_iprio8(struct kvm_vcpu *vcpu, unsigned int irq, u8 prio) switch (bitpos / BITS_PER_LONG) { case 0: - csr_write(CSR_HVIPRIO1, hviprio); + ncsr_write(CSR_HVIPRIO1, hviprio); break; case 1: #ifndef CONFIG_32BIT - csr_write(CSR_HVIPRIO2, hviprio); + ncsr_write(CSR_HVIPRIO2, hviprio); break; #else - csr_write(CSR_HVIPRIO1H, hviprio); + ncsr_write(CSR_HVIPRIO1H, hviprio); break; case 2: - csr_write(CSR_HVIPRIO2, hviprio); + ncsr_write(CSR_HVIPRIO2, hviprio); break; case 3: - csr_write(CSR_HVIPRIO2H, hviprio); + ncsr_write(CSR_HVIPRIO2H, hviprio); break; #endif default: @@ -377,7 +406,7 @@ int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num, return KVM_INSN_ILLEGAL_TRAP; /* First try to emulate in kernel space */ - isel = csr_read(CSR_VSISELECT) & ISELECT_MASK; + isel = ncsr_read(CSR_VSISELECT) & ISELECT_MASK; if (isel >= ISELECT_IPRIO0 && isel <= ISELECT_IPRIO15) return aia_rmw_iprio(vcpu, isel, val, new_val, wr_mask); else if (isel >= IMSIC_FIRST && isel <= IMSIC_LAST &&
When running under some other hypervisor, prefer nacl_csr_xyz() for accessing AIA CSRs in the run-loop. This makes CSR access faster whenever SBI nested acceleration is available. Signed-off-by: Anup Patel <apatel@ventanamicro.com> --- arch/riscv/kvm/aia.c | 97 ++++++++++++++++++++++++++++---------------- 1 file changed, 63 insertions(+), 34 deletions(-)