@@ -16,7 +16,7 @@
#define HYP_MEMBLOCK_REGIONS 128
-int kvm_init_pvm(struct kvm *kvm);
+int kvm_init_pvm(struct kvm *kvm, unsigned long type);
int kvm_shadow_create(struct kvm *kvm);
void kvm_shadow_destroy(struct kvm *kvm);
@@ -141,11 +141,14 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
int ret;
+ if (type & ~KVM_VM_TYPE_MASK)
+ return -EINVAL;
+
ret = kvm_share_hyp(kvm, kvm + 1);
if (ret)
return ret;
- ret = kvm_init_pvm(kvm);
+ ret = kvm_init_pvm(kvm, type);
if (ret)
goto err_unshare_kvm;
@@ -652,9 +652,6 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
u64 mmfr0, mmfr1;
u32 phys_shift;
- if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
- return -EINVAL;
-
phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
if (is_protected_kvm_enabled()) {
phys_shift = kvm_ipa_limit;
@@ -218,8 +218,16 @@ void kvm_shadow_destroy(struct kvm *kvm)
}
}
-int kvm_init_pvm(struct kvm *kvm)
+int kvm_init_pvm(struct kvm *kvm, unsigned long type)
{
mutex_init(&kvm->arch.pkvm.shadow_lock);
+
+ if (!(type & KVM_VM_TYPE_ARM_PROTECTED))
+ return 0;
+
+ if (!is_protected_kvm_enabled())
+ return -EINVAL;
+
+ kvm->arch.pkvm.enabled = true;
return 0;
}
@@ -887,6 +887,12 @@ struct kvm_ppc_resize_hpt {
#define KVM_VM_TYPE_ARM_IPA_SIZE_MASK 0xffULL
#define KVM_VM_TYPE_ARM_IPA_SIZE(x) \
((x) & KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
+
+#define KVM_VM_TYPE_ARM_PROTECTED (1UL << 8)
+
+#define KVM_VM_TYPE_MASK (KVM_VM_TYPE_ARM_IPA_SIZE_MASK | \
+ KVM_VM_TYPE_ARM_PROTECTED)
+
/*
* ioctls for /dev/kvm fds:
*/
Introduce a new virtual machine type, KVM_VM_TYPE_ARM_PROTECTED, which specifies that the guest memory pages are to be unmapped from the host stage-2 by the hypervisor. Signed-off-by: Will Deacon <will@kernel.org> --- arch/arm64/include/asm/kvm_pkvm.h | 2 +- arch/arm64/kvm/arm.c | 5 ++++- arch/arm64/kvm/mmu.c | 3 --- arch/arm64/kvm/pkvm.c | 10 +++++++++- include/uapi/linux/kvm.h | 6 ++++++ 5 files changed, 20 insertions(+), 6 deletions(-)