@@ -53,14 +53,18 @@ enum {
AUDIT_PRE_PAGE_FAULT,
AUDIT_POST_PAGE_FAULT,
AUDIT_PRE_PTE_WRITE,
- AUDIT_POST_PTE_WRITE
+ AUDIT_POST_PTE_WRITE,
+ AUDIT_PRE_SYNC,
+ AUDIT_POST_SYNC
};
char *audit_point_name[] = {
"pre page fault",
"post page fault",
"pre pte write",
- "post pte write"
+ "post pte write",
+ "pre sync",
+ "post sync"
};
#undef MMU_DEBUG
@@ -2516,6 +2520,8 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return;
+
+ trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa;
sp = page_header(root);
@@ -2531,6 +2537,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
mmu_sync_children(vcpu, sp);
}
}
+ trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
}
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
@@ -164,6 +164,14 @@ static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level)
inspect_spte_has_rmap(vcpu->kvm, sptep);
}
+static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level)
+{
+ struct kvm_mmu_page *sp = page_header(__pa(sptep));
+
+ if (audit_point == AUDIT_POST_SYNC && sp->unsync)
+ audit_printk("meet unsync sp(%p) after sync root.\n", sp);
+}
+
static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
{
int i;
@@ -179,7 +187,7 @@ static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
}
}
-void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
+static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
{
struct kvm_memory_slot *slot;
unsigned long *rmapp;
@@ -215,6 +223,7 @@ static void audit_spte(struct kvm_vcpu *vcpu, u64 *sptep, int level)
{
audit_sptes_have_rmaps(vcpu, sptep, level);
audit_mappings(vcpu, sptep, level);
+ audit_spte_after_sync(vcpu, sptep, level);
}
static void audit_vcpu_spte(struct kvm_vcpu *vcpu)