@@ -285,6 +285,7 @@
#define CSR_SIE 0x104
#define CSR_STVEC 0x105
#define CSR_SCOUNTEREN 0x106
+#define CSR_SENVCFG 0x10a
#define CSR_SSCRATCH 0x140
#define CSR_SEPC 0x141
#define CSR_SCAUSE 0x142
@@ -162,6 +162,7 @@ struct kvm_vcpu_csr {
unsigned long hvip;
unsigned long vsatp;
unsigned long scounteren;
+ unsigned long senvcfg;
};
struct kvm_vcpu_config {
@@ -188,6 +189,7 @@ struct kvm_vcpu_arch {
unsigned long host_sscratch;
unsigned long host_stvec;
unsigned long host_scounteren;
+ unsigned long host_senvcfg;
/* CPU context of Host */
struct kvm_cpu_context host_context;
@@ -79,6 +79,7 @@ struct kvm_riscv_csr {
unsigned long sip;
unsigned long satp;
unsigned long scounteren;
+ unsigned long senvcfg;
};
/* AIA CSR registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
@@ -601,6 +601,20 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
kvm_riscv_vcpu_aia_update_hvip(vcpu);
}
+static __always_inline void kvm_riscv_vcpu_swap_in_guest_state(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+
+ vcpu->arch.host_senvcfg = csr_swap(CSR_SENVCFG, csr->senvcfg);
+}
+
+static __always_inline void kvm_riscv_vcpu_swap_in_host_state(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+
+ csr->senvcfg = csr_swap(CSR_SENVCFG, vcpu->arch.host_senvcfg);
+}
+
/*
* Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
* the vCPU is running.
@@ -610,10 +624,12 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
*/
static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
{
+ kvm_riscv_vcpu_swap_in_guest_state(vcpu);
guest_state_enter_irqoff();
__kvm_riscv_switch_to(&vcpu->arch);
vcpu->arch.last_exit_cpu = vcpu->cpu;
guest_state_exit_irqoff();
+ kvm_riscv_vcpu_swap_in_host_state(vcpu);
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)