@@ -63,6 +63,7 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
+ __KVM_HOST_SMCCC_FUNC___pkvm_disown_pages,
__KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
/* Hypercalls available after pKVM finalisation */
@@ -58,6 +58,7 @@ enum pkvm_component_id {
PKVM_ID_HOST,
PKVM_ID_HYP,
PKVM_ID_GUEST,
+ PKVM_ID_NOBODY,
};
extern unsigned long hyp_nr_cpus;
@@ -98,6 +98,7 @@ int __pkvm_init_shadow(struct kvm *kvm,
unsigned long pgd_hva,
unsigned long last_ran_hva, size_t last_ran_size);
int __pkvm_teardown_shadow(unsigned int shadow_handle);
+int __pkvm_disown_pages(phys_addr_t phys, size_t size);
struct kvm_shadow_vcpu_state *
pkvm_load_shadow_vcpu_state(unsigned int shadow_handle, unsigned int vcpu_idx);
@@ -1031,6 +1031,14 @@ static void handle___pkvm_teardown_shadow(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __pkvm_teardown_shadow(shadow_handle);
}
+static void handle___pkvm_disown_pages(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
+ DECLARE_REG(size_t, size, host_ctxt, 2);
+
+ cpu_reg(host_ctxt, 1) = __pkvm_disown_pages(phys, size);
+}
+
typedef void (*hcall_t)(struct kvm_cpu_context *);
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
@@ -1048,6 +1056,7 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
HANDLE_FUNC(__kvm_tlb_flush_vmid),
HANDLE_FUNC(__kvm_flush_cpu_context),
+ HANDLE_FUNC(__pkvm_disown_pages),
HANDLE_FUNC(__pkvm_prot_finalize),
HANDLE_FUNC(__pkvm_host_share_hyp),
@@ -1756,3 +1756,14 @@ int __pkvm_host_reclaim_page(u64 pfn)
return ret;
}
+
+int __pkvm_disown_pages(phys_addr_t phys, size_t size)
+{
+ int ret;
+
+ host_lock_component();
+ ret = host_stage2_set_owner_locked(phys, size, PKVM_ID_NOBODY);
+ host_unlock_component();
+
+ return ret;
+}
Currently we only deny the host access to hyp and guest pages. However, there may be other pages that could potentially be used to indirectly compromise the hypervisor or the other guests. Therefore introduce a __pkvm_disown_pages hypercall that the host kernel may use to deny its future self access to those pages before deprivileging itself. Signed-off-by: Peter Collingbourne <pcc@google.com> --- v2: - refcount the PTEs owned by NOBODY arch/arm64/include/asm/kvm_asm.h | 1 + arch/arm64/kvm/hyp/include/nvhe/mem_protect.h | 1 + arch/arm64/kvm/hyp/include/nvhe/pkvm.h | 1 + arch/arm64/kvm/hyp/nvhe/hyp-main.c | 9 +++++++++ arch/arm64/kvm/hyp/nvhe/mem_protect.c | 11 +++++++++++ 5 files changed, 23 insertions(+)