@@ -4300,6 +4300,8 @@ void kvm_mmu_invalidate_memslot_pages(struct kvm *kvm,
if (slot)
zap_invalid_pages(kvm);
+
+ trace_kvm_mmu_invalidate_memslot_pages(kvm, slot);
spin_unlock(&kvm->mmu_lock);
}
@@ -276,6 +276,29 @@ TRACE_EVENT(
__spte_satisfied(old_spte), __spte_satisfied(new_spte)
)
);
+
+TRACE_EVENT(
+ kvm_mmu_invalidate_memslot_pages,
+ TP_PROTO(struct kvm *kvm, struct kvm_memory_slot *slot),
+ TP_ARGS(kvm, slot),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, mmu_valid_gen)
+ __field(unsigned int, mmu_used_pages)
+ __field(int, slot_id)
+ ),
+
+ TP_fast_assign(
+ __entry->mmu_valid_gen = kvm->arch.mmu_valid_gen;
+ __entry->mmu_used_pages = kvm->arch.n_used_mmu_pages;
+ __entry->slot_id = slot ? slot->id : -1;
+ ),
+
+ TP_printk("kvm-mmu-valid-gen %lx slot_id %d used_pages %x",
+ __entry->mmu_valid_gen, __entry->slot_id,
+ __entry->mmu_used_pages
+ )
+);
#endif /* _TRACE_KVMMMU_H */
#undef TRACE_INCLUDE_PATH
It is good for debug and development Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> --- arch/x86/kvm/mmu.c | 2 ++ arch/x86/kvm/mmutrace.h | 23 +++++++++++++++++++++++ 2 files changed, 25 insertions(+), 0 deletions(-)