@@ -351,7 +351,8 @@ union kvm_mmu_page_role {
unsigned ad_disabled:1;
unsigned guest_mode:1;
unsigned passthrough:1;
- unsigned :5;
+ unsigned is_private:1;
+ unsigned :4;
/*
* This is left at the top of the word so that
@@ -363,6 +364,16 @@ union kvm_mmu_page_role {
};
};
+static inline bool kvm_mmu_page_role_is_private(union kvm_mmu_page_role role)
+{
+ return !!role.is_private;
+}
+
+static inline void kvm_mmu_page_role_set_private(union kvm_mmu_page_role *role)
+{
+ role->is_private = 1;
+}
+
/*
* kvm_mmu_extended_role complements kvm_mmu_page_role, tracking properties
* relevant to the current MMU configuration. When loading CR0, CR4, or EFER,
@@ -145,6 +145,11 @@ static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
return kvm_mmu_role_as_id(sp->role);
}
+static inline bool is_private_sp(const struct kvm_mmu_page *sp)
+{
+ return kvm_mmu_page_role_is_private(sp->role);
+}
+
static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
{
/*
@@ -265,6 +265,11 @@ static inline struct kvm_mmu_page *root_to_sp(hpa_t root)
return spte_to_child_sp(root);
}
+static inline bool is_private_sptep(u64 *sptep)
+{
+ return is_private_sp(sptep_to_sp(sptep));
+}
+
static inline bool is_mmio_spte(struct kvm *kvm, u64 spte)
{
return (spte & shadow_mmio_mask) == kvm->arch.shadow_mmio_value &&