diff mbox series

[RFC,4/6] KVM: trace events: add type argument to async pf

Message ID 20241118123948.4796-5-kalyazin@amazon.com (mailing list archive)
State New
Headers show
Series KVM: x86: async PF user | expand

Commit Message

Nikita Kalyazin Nov. 18, 2024, 12:39 p.m. UTC
With async PF user being added, in order to reuse existing tracepoint
definitions and distinguish async PF user from kernel, a new int
argument `type` is being added that can be either 0 ("kernel") or 1
("user").

For now all of the users of these tracepoints supply 0 ("kernel") as
async PF user are not yet implemented.  In the next commits when they
are implemented, the tracepoints user will set this to 1 ("user") as
necessary.

Signed-off-by: Nikita Kalyazin <kalyazin@amazon.com>
---
 arch/x86/kvm/mmu/mmu.c     |  4 +--
 arch/x86/kvm/x86.c         |  4 +--
 include/trace/events/kvm.h | 50 ++++++++++++++++++++++++--------------
 virt/kvm/async_pf.c        |  2 +-
 4 files changed, 37 insertions(+), 23 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index f0dbc3c68e5c..004e068cabae 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4395,9 +4395,9 @@  static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
 		return RET_PF_CONTINUE; /* *pfn has correct page already */
 
 	if (!fault->prefetch && kvm_can_do_async_pf(vcpu)) {
-		trace_kvm_try_async_get_page(fault->addr, fault->gfn);
+		trace_kvm_try_async_get_page(fault->addr, fault->gfn, 0);
 		if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) {
-			trace_kvm_async_pf_repeated_fault(fault->addr, fault->gfn);
+			trace_kvm_async_pf_repeated_fault(fault->addr, fault->gfn, 0);
 			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
 			return RET_PF_RETRY;
 		} else if (kvm_arch_setup_async_pf(vcpu, fault)) {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 800493739043..0a04de5dbada 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -13408,7 +13408,7 @@  bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
 {
 	struct x86_exception fault;
 
-	trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa);
+	trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa, 0);
 	kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
 
 	if (kvm_can_deliver_async_pf(vcpu) &&
@@ -13447,7 +13447,7 @@  void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
 		work->arch.token = ~0; /* broadcast wakeup */
 	else
 		kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
-	trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
+	trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa, 0);
 
 	if ((work->wakeup_all || work->notpresent_injected) &&
 	    kvm_pv_async_pf_enabled(vcpu) &&
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 74e40d5d4af4..a7731b62863b 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -256,90 +256,104 @@  TRACE_EVENT(kvm_fpu,
 );
 
 #ifdef CONFIG_KVM_ASYNC_PF
+#define kvm_async_pf_type_symbol	\
+	{0, "kernel"},		\
+	{1, "user"}
+
 DECLARE_EVENT_CLASS(kvm_async_get_page_class,
 
-	TP_PROTO(u64 gva, u64 gfn),
+	TP_PROTO(u64 gva, u64 gfn, int type),
 
-	TP_ARGS(gva, gfn),
+	TP_ARGS(gva, gfn, type),
 
 	TP_STRUCT__entry(
 		__field(__u64, gva)
 		__field(u64, gfn)
+		__field(int, type)
 	),
 
 	TP_fast_assign(
 		__entry->gva = gva;
 		__entry->gfn = gfn;
+		__entry->type = type;
 	),
 
-	TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
+	TP_printk("gva = %#llx, gfn = %#llx, type = %s", __entry->gva,
+		__entry->gfn, __print_symbolic(__entry->type,
+		kvm_async_pf_type_symbol))
 );
 
 DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
 
-	TP_PROTO(u64 gva, u64 gfn),
+	TP_PROTO(u64 gva, u64 gfn, int type),
 
-	TP_ARGS(gva, gfn)
+	TP_ARGS(gva, gfn, type)
 );
 
 DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_repeated_fault,
 
-	TP_PROTO(u64 gva, u64 gfn),
+	TP_PROTO(u64 gva, u64 gfn, int type),
 
-	TP_ARGS(gva, gfn)
+	TP_ARGS(gva, gfn, type)
 );
 
 DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
 
-	TP_PROTO(u64 token, u64 gva),
+	TP_PROTO(u64 token, u64 gva, int type),
 
-	TP_ARGS(token, gva),
+	TP_ARGS(token, gva, type),
 
 	TP_STRUCT__entry(
 		__field(__u64, token)
 		__field(__u64, gva)
+		__field(int, type)
 	),
 
 	TP_fast_assign(
 		__entry->token = token;
 		__entry->gva = gva;
+		__entry->type = type;
 	),
 
-	TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
+	TP_printk("token %#llx gva %#llx type %s", __entry->token, __entry->gva,
+		__print_symbolic(__entry->type, kvm_async_pf_type_symbol))
 
 );
 
 DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
 
-	TP_PROTO(u64 token, u64 gva),
+	TP_PROTO(u64 token, u64 gva, int type),
 
-	TP_ARGS(token, gva)
+	TP_ARGS(token, gva, type)
 );
 
 DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
 
-	TP_PROTO(u64 token, u64 gva),
+	TP_PROTO(u64 token, u64 gva, int type),
 
-	TP_ARGS(token, gva)
+	TP_ARGS(token, gva, type)
 );
 
 TRACE_EVENT(
 	kvm_async_pf_completed,
-	TP_PROTO(unsigned long address, u64 gva),
-	TP_ARGS(address, gva),
+	TP_PROTO(unsigned long address, u64 gva, int type),
+	TP_ARGS(address, gva, type),
 
 	TP_STRUCT__entry(
 		__field(unsigned long, address)
 		__field(u64, gva)
+		__field(int, type)
 		),
 
 	TP_fast_assign(
 		__entry->address = address;
 		__entry->gva = gva;
+		__entry->type = type;
 		),
 
-	TP_printk("gva %#llx address %#lx",  __entry->gva,
-		  __entry->address)
+	TP_printk("gva %#llx address %#lx type %s",  __entry->gva,
+		  __entry->address, __print_symbolic(__entry->type,
+		  kvm_async_pf_type_symbol))
 );
 
 #endif
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 99a63bad0306..77c689a9b585 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -92,7 +92,7 @@  static void async_pf_execute(struct work_struct *work)
 	if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC) && first)
 		kvm_arch_async_page_present_queued(vcpu);
 
-	trace_kvm_async_pf_completed(addr, cr2_or_gpa);
+	trace_kvm_async_pf_completed(addr, cr2_or_gpa, 0);
 
 	__kvm_vcpu_wake_up(vcpu);
 }