@@ -63,5 +63,9 @@ int __pkvm_init_shadow(struct kvm *kvm, unsigned long shadow_hva,
size_t shadow_size, unsigned long pgd_hva);
int __pkvm_teardown_shadow(unsigned int shadow_handle);
+struct kvm_shadow_vcpu_state *
+pkvm_load_shadow_vcpu_state(unsigned int shadow_handle, unsigned int vcpu_idx);
+void pkvm_put_shadow_vcpu_state(struct kvm_shadow_vcpu_state *shadow_state);
+
#endif /* __ARM64_KVM_NVHE_PKVM_H__ */
@@ -22,11 +22,89 @@ DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
+static void flush_shadow_state(struct kvm_shadow_vcpu_state *shadow_state)
+{
+ struct kvm_vcpu *shadow_vcpu = &shadow_state->shadow_vcpu;
+ struct kvm_vcpu *host_vcpu = shadow_state->host_vcpu;
+
+ shadow_vcpu->arch.ctxt = host_vcpu->arch.ctxt;
+
+ shadow_vcpu->arch.sve_state = kern_hyp_va(host_vcpu->arch.sve_state);
+ shadow_vcpu->arch.sve_max_vl = host_vcpu->arch.sve_max_vl;
+
+ shadow_vcpu->arch.hw_mmu = host_vcpu->arch.hw_mmu;
+
+ shadow_vcpu->arch.hcr_el2 = host_vcpu->arch.hcr_el2;
+ shadow_vcpu->arch.mdcr_el2 = host_vcpu->arch.mdcr_el2;
+ shadow_vcpu->arch.cptr_el2 = host_vcpu->arch.cptr_el2;
+
+ shadow_vcpu->arch.flags = host_vcpu->arch.flags;
+
+ shadow_vcpu->arch.debug_ptr = kern_hyp_va(host_vcpu->arch.debug_ptr);
+ shadow_vcpu->arch.host_fpsimd_state = host_vcpu->arch.host_fpsimd_state;
+
+ shadow_vcpu->arch.vsesr_el2 = host_vcpu->arch.vsesr_el2;
+
+ shadow_vcpu->arch.vgic_cpu.vgic_v3 = host_vcpu->arch.vgic_cpu.vgic_v3;
+}
+
+static void sync_shadow_state(struct kvm_shadow_vcpu_state *shadow_state)
+{
+ struct kvm_vcpu *shadow_vcpu = &shadow_state->shadow_vcpu;
+ struct kvm_vcpu *host_vcpu = shadow_state->host_vcpu;
+ struct vgic_v3_cpu_if *shadow_cpu_if = &shadow_vcpu->arch.vgic_cpu.vgic_v3;
+ struct vgic_v3_cpu_if *host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3;
+ unsigned int i;
+
+ host_vcpu->arch.ctxt = shadow_vcpu->arch.ctxt;
+
+ host_vcpu->arch.hcr_el2 = shadow_vcpu->arch.hcr_el2;
+ host_vcpu->arch.cptr_el2 = shadow_vcpu->arch.cptr_el2;
+
+ host_vcpu->arch.fault = shadow_vcpu->arch.fault;
+
+ host_vcpu->arch.flags = shadow_vcpu->arch.flags;
+
+ host_cpu_if->vgic_hcr = shadow_cpu_if->vgic_hcr;
+ for (i = 0; i < shadow_cpu_if->used_lrs; ++i)
+ host_cpu_if->vgic_lr[i] = shadow_cpu_if->vgic_lr[i];
+}
+
static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
{
- DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
+ DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 1);
+ int ret;
+
+ host_vcpu = kern_hyp_va(host_vcpu);
+
+ if (unlikely(is_protected_kvm_enabled())) {
+ struct kvm_shadow_vcpu_state *shadow_state;
+ struct kvm_vcpu *shadow_vcpu;
+ struct kvm *host_kvm;
+ unsigned int handle;
+
+ host_kvm = kern_hyp_va(host_vcpu->kvm);
+ handle = host_kvm->arch.pkvm.shadow_handle;
+ shadow_state = pkvm_load_shadow_vcpu_state(handle,
+ host_vcpu->vcpu_idx);
+ if (!shadow_state) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ shadow_vcpu = &shadow_state->shadow_vcpu;
+ flush_shadow_state(shadow_state);
+
+ ret = __kvm_vcpu_run(shadow_vcpu);
+
+ sync_shadow_state(shadow_state);
+ pkvm_put_shadow_vcpu_state(shadow_state);
+ } else {
+ ret = __kvm_vcpu_run(host_vcpu);
+ }
- cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu));
+out:
+ cpu_reg(host_ctxt, 1) = ret;
}
static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
@@ -244,6 +244,33 @@ static struct kvm_shadow_vm *find_shadow_by_handle(unsigned int shadow_handle)
return shadow_table[shadow_idx];
}
+struct kvm_shadow_vcpu_state *
+pkvm_load_shadow_vcpu_state(unsigned int shadow_handle, unsigned int vcpu_idx)
+{
+ struct kvm_shadow_vcpu_state *shadow_state = NULL;
+ struct kvm_shadow_vm *vm;
+
+ hyp_spin_lock(&shadow_lock);
+ vm = find_shadow_by_handle(shadow_handle);
+ if (!vm || vm->kvm.created_vcpus <= vcpu_idx)
+ goto unlock;
+
+ shadow_state = &vm->shadow_vcpu_states[vcpu_idx];
+ hyp_page_ref_inc(hyp_virt_to_page(vm));
+unlock:
+ hyp_spin_unlock(&shadow_lock);
+ return shadow_state;
+}
+
+void pkvm_put_shadow_vcpu_state(struct kvm_shadow_vcpu_state *shadow_state)
+{
+ struct kvm_shadow_vm *vm = shadow_state->shadow_vm;
+
+ hyp_spin_lock(&shadow_lock);
+ hyp_page_ref_dec(hyp_virt_to_page(vm));
+ hyp_spin_unlock(&shadow_lock);
+}
+
static void unpin_host_vcpus(struct kvm_shadow_vcpu_state *shadow_vcpu_states,
unsigned int nr_vcpus)
{
As a stepping stone towards deprivileging the host's access to the guest's vCPU structures, introduce some naive flush/sync routines to copy most of the host vCPU into the shadow vCPU on vCPU run and back again on return to EL1. This allows us to run using the shadow structure when KVM is initialised in protected mode. Signed-off-by: Will Deacon <will@kernel.org> --- arch/arm64/kvm/hyp/include/nvhe/pkvm.h | 4 ++ arch/arm64/kvm/hyp/nvhe/hyp-main.c | 82 +++++++++++++++++++++++++- arch/arm64/kvm/hyp/nvhe/pkvm.c | 27 +++++++++ 3 files changed, 111 insertions(+), 2 deletions(-)