@@ -78,6 +78,8 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs,
__KVM_HOST_SMCCC_FUNC___pkvm_init_shadow,
__KVM_HOST_SMCCC_FUNC___pkvm_teardown_shadow,
+ __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load,
+ __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put,
};
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
@@ -429,12 +429,26 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu_ptrauth_disable(vcpu);
kvm_arch_vcpu_load_debug_state_flags(vcpu);
+ if (is_protected_kvm_enabled()) {
+ kvm_call_hyp_nvhe(__pkvm_vcpu_load,
+ vcpu->kvm->arch.pkvm.shadow_handle,
+ vcpu->vcpu_idx, vcpu->arch.hcr_el2);
+ kvm_call_hyp(__vgic_v3_restore_vmcr_aprs,
+ &vcpu->arch.vgic_cpu.vgic_v3);
+ }
+
if (!cpumask_test_cpu(smp_processor_id(), vcpu->kvm->arch.supported_cpus))
vcpu_set_on_unsupported_cpu(vcpu);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
+ if (is_protected_kvm_enabled()) {
+ kvm_call_hyp(__vgic_v3_save_vmcr_aprs,
+ &vcpu->arch.vgic_cpu.vgic_v3);
+ kvm_call_hyp_nvhe(__pkvm_vcpu_put);
+ }
+
kvm_arch_vcpu_put_debug_state_flags(vcpu);
kvm_arch_vcpu_put_fp(vcpu);
if (has_vhe())
@@ -24,6 +24,12 @@ struct kvm_shadow_vcpu_state {
/* A pointer to the shadow vm. */
struct kvm_shadow_vm *shadow_vm;
+
+ /*
+ * Points to the per-cpu pointer of the cpu where it's loaded, or NULL
+ * if not loaded.
+ */
+ struct kvm_shadow_vcpu_state **loaded_shadow_state;
};
/*
@@ -79,6 +85,7 @@ int __pkvm_teardown_shadow(unsigned int shadow_handle);
struct kvm_shadow_vcpu_state *
pkvm_load_shadow_vcpu_state(unsigned int shadow_handle, unsigned int vcpu_idx);
void pkvm_put_shadow_vcpu_state(struct kvm_shadow_vcpu_state *shadow_state);
+struct kvm_shadow_vcpu_state *pkvm_loaded_shadow_vcpu_state(void);
u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id);
bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code);
@@ -138,40 +138,63 @@ static void sync_shadow_state(struct kvm_shadow_vcpu_state *shadow_state)
sync_timer_state(shadow_state);
}
+static void handle___pkvm_vcpu_load(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(unsigned int, shadow_handle, host_ctxt, 1);
+ DECLARE_REG(unsigned int, vcpu_idx, host_ctxt, 2);
+ DECLARE_REG(u64, hcr_el2, host_ctxt, 3);
+ struct kvm_shadow_vcpu_state *shadow_state;
+ struct kvm_vcpu *shadow_vcpu;
+
+ if (!is_protected_kvm_enabled())
+ return;
+
+ shadow_state = pkvm_load_shadow_vcpu_state(shadow_handle, vcpu_idx);
+ if (!shadow_state)
+ return;
+
+ shadow_vcpu = &shadow_state->shadow_vcpu;
+
+ if (shadow_state_is_protected(shadow_state)) {
+ /* Propagate WFx trapping flags, trap ptrauth */
+ shadow_vcpu->arch.hcr_el2 &= ~(HCR_TWE | HCR_TWI |
+ HCR_API | HCR_APK);
+ shadow_vcpu->arch.hcr_el2 |= hcr_el2 & (HCR_TWE | HCR_TWI);
+ }
+}
+
+static void handle___pkvm_vcpu_put(struct kvm_cpu_context *host_ctxt)
+{
+ struct kvm_shadow_vcpu_state *shadow_state;
+
+ if (!is_protected_kvm_enabled())
+ return;
+
+ shadow_state = pkvm_loaded_shadow_vcpu_state();
+
+ if (shadow_state) {
+ pkvm_put_shadow_vcpu_state(shadow_state);
+ }
+}
+
static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 1);
int ret;
- host_vcpu = kern_hyp_va(host_vcpu);
-
if (unlikely(is_protected_kvm_enabled())) {
- struct kvm_shadow_vcpu_state *shadow_state;
- struct kvm_vcpu *shadow_vcpu;
- struct kvm *host_kvm;
- unsigned int handle;
-
- host_kvm = kern_hyp_va(host_vcpu->kvm);
- handle = host_kvm->arch.pkvm.shadow_handle;
- shadow_state = pkvm_load_shadow_vcpu_state(handle,
- host_vcpu->vcpu_idx);
- if (!shadow_state) {
- ret = -EINVAL;
- goto out;
- }
-
- shadow_vcpu = &shadow_state->shadow_vcpu;
+ struct kvm_shadow_vcpu_state *shadow_state = pkvm_loaded_shadow_vcpu_state();
+ struct kvm_vcpu *shadow_vcpu = &shadow_state->shadow_vcpu;
+
flush_shadow_state(shadow_state);
ret = __kvm_vcpu_run(shadow_vcpu);
sync_shadow_state(shadow_state);
- pkvm_put_shadow_vcpu_state(shadow_state);
} else {
- ret = __kvm_vcpu_run(host_vcpu);
+ ret = __kvm_vcpu_run(kern_hyp_va(host_vcpu));
}
-out:
cpu_reg(host_ctxt, 1) = ret;
}
@@ -414,6 +437,8 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__vgic_v3_restore_vmcr_aprs),
HANDLE_FUNC(__pkvm_init_shadow),
HANDLE_FUNC(__pkvm_teardown_shadow),
+ HANDLE_FUNC(__pkvm_vcpu_load),
+ HANDLE_FUNC(__pkvm_vcpu_put),
};
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
@@ -17,6 +17,12 @@ unsigned long __icache_flags;
/* Used by kvm_get_vttbr(). */
unsigned int kvm_arm_vmid_bits;
+/*
+ * The shadow state for the currently loaded vcpu. Used only when protected KVM
+ * is enabled for both protected and non-protected VMs.
+ */
+static DEFINE_PER_CPU(struct kvm_shadow_vcpu_state *, loaded_shadow_state);
+
/*
* Set trap register values based on features in ID_AA64PFR0.
*/
@@ -252,15 +258,30 @@ pkvm_load_shadow_vcpu_state(unsigned int shadow_handle, unsigned int vcpu_idx)
struct kvm_shadow_vcpu_state *shadow_state = NULL;
struct kvm_shadow_vm *vm;
+ /* Cannot load a new vcpu without putting the old one first. */
+ if (__this_cpu_read(loaded_shadow_state))
+ return NULL;
+
hyp_spin_lock(&shadow_lock);
vm = find_shadow_by_handle(shadow_handle);
if (!vm || vm->kvm.created_vcpus <= vcpu_idx)
goto unlock;
shadow_state = &vm->shadow_vcpu_states[vcpu_idx];
+
+ /* Ensure vcpu isn't loaded on more than one cpu simultaneously. */
+ if (unlikely(shadow_state->loaded_shadow_state)) {
+ shadow_state = NULL;
+ goto unlock;
+ }
+ shadow_state->loaded_shadow_state = this_cpu_ptr(&loaded_shadow_state);
+
hyp_page_ref_inc(hyp_virt_to_page(vm));
unlock:
hyp_spin_unlock(&shadow_lock);
+
+ __this_cpu_write(loaded_shadow_state, shadow_state);
+
return shadow_state;
}
@@ -269,10 +290,17 @@ void pkvm_put_shadow_vcpu_state(struct kvm_shadow_vcpu_state *shadow_state)
struct kvm_shadow_vm *vm = shadow_state->shadow_vm;
hyp_spin_lock(&shadow_lock);
+ shadow_state->loaded_shadow_state = NULL;
+ __this_cpu_write(loaded_shadow_state, NULL);
hyp_page_ref_dec(hyp_virt_to_page(vm));
hyp_spin_unlock(&shadow_lock);
}
+struct kvm_shadow_vcpu_state *pkvm_loaded_shadow_vcpu_state(void)
+{
+ return __this_cpu_read(loaded_shadow_state);
+}
+
static void unpin_host_vcpus(struct kvm_shadow_vcpu_state *shadow_vcpu_states,
unsigned int nr_vcpus)
{
@@ -720,7 +720,8 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
- kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, cpu_if);
+ if (likely(!is_protected_kvm_enabled()))
+ kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, cpu_if);
if (has_vhe())
__vgic_v3_activate_traps(cpu_if);
@@ -734,7 +735,8 @@ void vgic_v3_put(struct kvm_vcpu *vcpu, bool blocking)
WARN_ON(vgic_v4_put(vcpu, blocking));
- kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if);
+ if (likely(!is_protected_kvm_enabled()))
+ kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if);
if (has_vhe())
__vgic_v3_deactivate_traps(cpu_if);