@@ -445,20 +445,15 @@ static void handle___pkvm_host_map_guest(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(u64, pfn, host_ctxt, 1);
DECLARE_REG(u64, gfn, host_ctxt, 2);
- DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 3);
- struct kvm_shadow_vcpu_state *shadow_state;
+ struct kvm_vcpu *host_vcpu;
struct kvm_vcpu *shadow_vcpu;
- struct kvm *host_kvm;
- unsigned int handle;
+ struct kvm_shadow_vcpu_state *shadow_state;
int ret = -EINVAL;
if (!is_protected_kvm_enabled())
goto out;
- host_vcpu = kern_hyp_va(host_vcpu);
- host_kvm = kern_hyp_va(host_vcpu->kvm);
- handle = host_kvm->arch.pkvm.shadow_handle;
- shadow_state = pkvm_load_shadow_vcpu_state(handle, host_vcpu->vcpu_idx);
+ shadow_state = pkvm_loaded_shadow_vcpu_state();
if (!shadow_state)
goto out;
@@ -468,11 +463,9 @@ static void handle___pkvm_host_map_guest(struct kvm_cpu_context *host_ctxt)
/* Topup shadow memcache with the host's */
ret = pkvm_refill_memcache(shadow_vcpu, host_vcpu);
if (ret)
- goto out_put_state;
+ goto out;
ret = __pkvm_host_share_guest(pfn, gfn, shadow_vcpu);
-out_put_state:
- pkvm_put_shadow_vcpu_state(shadow_state);
out:
cpu_reg(host_ctxt, 1) = ret;
}
@@ -1143,9 +1143,9 @@ static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
return 0;
}
-static int pkvm_host_map_guest(u64 pfn, u64 gfn, struct kvm_vcpu *vcpu)
+static int pkvm_host_map_guest(u64 pfn, u64 gfn)
{
- int ret = kvm_call_hyp_nvhe(__pkvm_host_map_guest, pfn, gfn, vcpu);
+ int ret = kvm_call_hyp_nvhe(__pkvm_host_map_guest, pfn, gfn);
/*
* Getting -EPERM at this point implies that the pfn has already been
@@ -1211,7 +1211,7 @@ static int pkvm_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
write_lock(&kvm->mmu_lock);
pfn = page_to_pfn(page);
- ret = pkvm_host_map_guest(pfn, fault_ipa >> PAGE_SHIFT, vcpu);
+ ret = pkvm_host_map_guest(pfn, fault_ipa >> PAGE_SHIFT);
if (ret) {
if (ret == -EAGAIN)
ret = 0;