@@ -263,6 +263,12 @@ struct kvm_vcpu_arch {
/* 'static' configurations which are set only once */
struct kvm_vcpu_config cfg;
+
+ /* SBI steal-time accounting */
+ struct {
+ gpa_t shmem;
+ u64 last_steal;
+ } sta;
};
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
@@ -83,6 +83,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
vcpu->arch.hfence_tail = 0;
memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue));
+ vcpu->arch.sta.shmem = INVALID_GPA;
+
/* Reset the guest CSRs for hotplug usecase */
if (loaded)
kvm_arch_vcpu_load(vcpu, smp_processor_id());
@@ -10,6 +10,10 @@
void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
{
+ gpa_t shmem = vcpu->arch.sta.shmem;
+
+ if (shmem == INVALID_GPA)
+ return;
}
static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
KVM's implementation of SBI STA needs to track the address of each VCPU's steal-time shared memory region as well as the amount of stolen time. Add a structure to vcpu_arch to contain this state and make sure that the address is always set to INVALID_GPA on vcpu reset. And, of course, ensure KVM won't try to update steal- time when the shared memory address is invalid. Signed-off-by: Andrew Jones <ajones@ventanamicro.com> --- arch/riscv/include/asm/kvm_host.h | 6 ++++++ arch/riscv/kvm/vcpu.c | 2 ++ arch/riscv/kvm/vcpu_sbi_sta.c | 4 ++++ 3 files changed, 12 insertions(+)