@@ -164,6 +164,7 @@ struct kvm_pinned_page {
};
struct kvm_protected_vm {
+ bool enabled;
unsigned int shadow_handle;
struct mutex shadow_lock;
struct kvm_hyp_memcache teardown_mc;
@@ -895,10 +896,7 @@ int kvm_set_ipa_limit(void);
#define __KVM_HAVE_ARCH_VM_ALLOC
struct kvm *kvm_arch_alloc_vm(void);
-static inline bool kvm_vm_is_protected(struct kvm *kvm)
-{
- return false;
-}
+#define kvm_vm_is_protected(kvm) ((kvm)->arch.pkvm.enabled)
void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
@@ -58,6 +58,19 @@ static inline struct kvm_shadow_vm *get_shadow_vm(struct kvm_vcpu *shadow_vcpu)
return get_shadow_state(shadow_vcpu)->shadow_vm;
}
+static inline bool shadow_state_is_protected(struct kvm_shadow_vcpu_state *shadow_state)
+{
+ return shadow_state->shadow_vm->kvm.arch.pkvm.enabled;
+}
+
+static inline bool vcpu_is_protected(struct kvm_vcpu *vcpu)
+{
+ if (!is_protected_kvm_enabled())
+ return false;
+
+ return shadow_state_is_protected(get_shadow_state(vcpu));
+}
+
void hyp_shadow_table_init(void *tbl);
int __pkvm_init_shadow(struct kvm *kvm, unsigned long shadow_hva,
size_t shadow_size, unsigned long pgd_hva);