diff mbox series

[47/89] KVM: arm64: Add current vcpu and shadow_state lookup primitive

Message ID 20220519134204.5379-48-will@kernel.org (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Base support for the pKVM hypervisor at EL2 | expand

Commit Message

Will Deacon May 19, 2022, 1:41 p.m. UTC
From: Marc Zyngier <maz@kernel.org>

In order to be able to safely manipulate the loaded state,
add a helper that always return the vcpu as mapped in the EL2 S1
address space as well as the pointer to the shadow state.

In case of failure, both pointers are returned as NULL values.

For non-protected setups, state is always NULL, and vcpu the
EL2 mapping of the input value.

handle___kvm_vcpu_run() is converted to this helper.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/kvm/hyp/nvhe/hyp-main.c | 41 +++++++++++++++++++++++++-----
 1 file changed, 35 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 9e3a2aa6f737..40cbf45800b7 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -177,22 +177,51 @@  static void handle___pkvm_vcpu_put(struct kvm_cpu_context *host_ctxt)
 	}
 }
 
+static struct kvm_vcpu *__get_current_vcpu(struct kvm_vcpu *vcpu,
+					   struct kvm_shadow_vcpu_state **state)
+{
+	struct kvm_shadow_vcpu_state *sstate = NULL;
+
+	vcpu = kern_hyp_va(vcpu);
+
+	if (unlikely(is_protected_kvm_enabled())) {
+		sstate = pkvm_loaded_shadow_vcpu_state();
+		if (!sstate || vcpu != sstate->host_vcpu) {
+			sstate = NULL;
+			vcpu = NULL;
+		}
+	}
+
+	*state = sstate;
+	return vcpu;
+}
+
+#define get_current_vcpu(ctxt, regnr, statepp)				\
+	({								\
+		DECLARE_REG(struct kvm_vcpu *, __vcpu, ctxt, regnr);	\
+		__get_current_vcpu(__vcpu, statepp);			\
+	})
+
 static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
 {
-	DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 1);
+	struct kvm_shadow_vcpu_state *shadow_state;
+	struct kvm_vcpu *vcpu;
 	int ret;
 
-	if (unlikely(is_protected_kvm_enabled())) {
-		struct kvm_shadow_vcpu_state *shadow_state = pkvm_loaded_shadow_vcpu_state();
-		struct kvm_vcpu *shadow_vcpu = &shadow_state->shadow_vcpu;
+	vcpu = get_current_vcpu(host_ctxt, 1, &shadow_state);
+	if (!vcpu) {
+		cpu_reg(host_ctxt, 1) =  -EINVAL;
+		return;
+	}
 
+	if (unlikely(shadow_state)) {
 		flush_shadow_state(shadow_state);
 
-		ret = __kvm_vcpu_run(shadow_vcpu);
+		ret = __kvm_vcpu_run(&shadow_state->shadow_vcpu);
 
 		sync_shadow_state(shadow_state);
 	} else {
-		ret = __kvm_vcpu_run(kern_hyp_va(host_vcpu));
+		ret = __kvm_vcpu_run(vcpu);
 	}
 
 	cpu_reg(host_ctxt, 1) =  ret;