diff mbox

[1/2] KVM: convert custom marker based tracing to event traces

Message ID 20090615183243.182853565@localhost.localdomain (mailing list archive)
State New, archived
Headers show

Commit Message

Marcelo Tosatti June 15, 2009, 6:31 p.m. UTC
This allows use of the powerful ftrace infrastructure.

See Documentation/trace/ for usage information.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

Comments

Steven Rostedt June 15, 2009, 8:03 p.m. UTC | #1
On Mon, 15 Jun 2009, Marcelo Tosatti wrote:

> This allows use of the powerful ftrace infrastructure.
> 
> See Documentation/trace/ for usage information.
> 
> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
> 
> Index: kvm/arch/x86/kvm/svm.c
> ===================================================================
> --- kvm.orig/arch/x86/kvm/svm.c
> +++ kvm/arch/x86/kvm/svm.c
> @@ -29,6 +29,9 @@
>  #include <asm/desc.h>
>  
>  #include <asm/virtext.h>
> +#include "svm-trace.h"
> +#define CREATE_TRACE_POINTS
> +#include <trace/events/kvm/x86-arch.h>
>  
>  #define __ex(x) __kvm_handle_fault_on_reboot(x)
>  


> Index: kvm/arch/x86/kvm/vmx.c
> ===================================================================
> --- kvm.orig/arch/x86/kvm/vmx.c
> +++ kvm/arch/x86/kvm/vmx.c
> @@ -34,6 +34,10 @@
>  #include <asm/virtext.h>
>  #include <asm/mce.h>
>  
> +#include "vmx-trace.h"
> +#define CREATE_TRACE_POINTS
> +#include <trace/events/kvm/x86-arch.h>
> +
>  #define __ex(x) __kvm_handle_fault_on_reboot(x)
>  
>  MODULE_AUTHOR("Qumranet");

> Index: kvm/arch/x86/kvm/x86.c
> ===================================================================
> --- kvm.orig/arch/x86/kvm/x86.c
> +++ kvm/arch/x86/kvm/x86.c
> @@ -37,6 +37,8 @@
>  #include <linux/iommu.h>
>  #include <linux/intel-iommu.h>
>  #include <linux/cpufreq.h>
> +#define CREATE_TRACE_POINTS
> +#include <trace/events/kvm/x86.h>
>  
>  #include <asm/uaccess.h>
>  #include <asm/msr.h>
> @@ -347,9 +349,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0);


> Index: kvm/include/trace/events/kvm/x86-arch.h
> ===================================================================
> --- /dev/null
> +++ kvm/include/trace/events/kvm/x86-arch.h


One suggestion. Instead of putting in arch specific trace points into 
generic code, you can put these into arch/x86/kvm/trace.h ?

Then you can in the Makefile add:

CFLAGS_x86.o := -I.
CFLAGS_svm.o := -I.
CFLAGS_vmx.o := -I.

Or better yet, have a single file called trace.c:

in the Makefile:
CFLAGS_trace.o := -I.
obj-$(EVENT_TRACING) += trace.o

in trace.c:

#define CREATE_TRACE_POINTS
#include "trace.h"
#include "trace-arch.h"


Then have the kvm/x86.h moved to trace.h
and the kvm/arch-x86.h move to trace-arch.h

Just change the "TRACE_INCLUDE_FILE" to include the proper name.

-- Steve


> @@ -0,0 +1,128 @@
> +#if !defined(_TRACE_KVM_ARCH_H) || defined(TRACE_HEADER_MULTI_READ)
> +#define _TRACE_KVM_ARCH_H
> +
> +#include <linux/tracepoint.h>
> +
> +#undef TRACE_SYSTEM
> +#define TRACE_SYSTEM kvm
> +#define TRACE_INCLUDE_FILE kvm/x86-arch
> +
> +/*
> + * Tracepoint for kvm guest exit:
> + */
> +TRACE_EVENT(kvm_exit,
> +	TP_PROTO(unsigned int exit_reason, unsigned long guest_rip),
> +	TP_ARGS(exit_reason, guest_rip),
> +
> +	TP_STRUCT__entry(
> +		__field(	unsigned int,	exit_reason	)
> +		__field(	unsigned long,	guest_rip	)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->exit_reason	= exit_reason;
> +		__entry->guest_rip	= guest_rip;
> +	),
> +
> +	TP_printk("reason %s rip 0x%lx",
> +		  __print_symbolic(__entry->exit_reason, exit_reasons),
> +	   	 __entry->guest_rip)
> +);
> +
> +/*
> + * Tracepoint for kvm interrupt injection:
> + */
> +TRACE_EVENT(kvm_inj_virq,
> +	TP_PROTO(unsigned int irq),
> +	TP_ARGS(irq),
> +
> +	TP_STRUCT__entry(
> +		__field(	unsigned int,	irq		)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->irq		= irq;
> +	),
> +
> +	TP_printk("irq %u", __entry->irq)
> +);
> +
> +/*
> + * Tracepoint for page fault.
> + */
> +TRACE_EVENT(kvm_page_fault,
> +	TP_PROTO(unsigned long fault_address, unsigned int error_code),
> +	TP_ARGS(fault_address, error_code),
> +
> +	TP_STRUCT__entry(
> +		__field(	unsigned long,	fault_address	)
> +		__field(	unsigned int,	error_code	)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->fault_address	= fault_address;
> +		__entry->error_code	= error_code;
> +	),
> +
> +	TP_printk("address %lx error_code %x",
> +		  __entry->fault_address, __entry->error_code)
> +);
> +
> +/*
> + * Tracepoint for guest MSR access.
> + */
> +TRACE_EVENT(kvm_msr,
> +	TP_PROTO(unsigned int rw, unsigned int ecx, unsigned long data),
> +	TP_ARGS(rw, ecx, data),
> +
> +	TP_STRUCT__entry(
> +		__field(	unsigned int,	rw		)
> +		__field(	unsigned int,	ecx		)
> +		__field(	unsigned long,	data		)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->rw		= rw;
> +		__entry->ecx		= ecx;
> +		__entry->data		= data;
> +	),
> +
> +	TP_printk("msr_%s %x = 0x%lx",
> +		  __entry->rw ? "write" : "read",
> +		  __entry->ecx, __entry->data)
> +);
> +
> +#define trace_kvm_msr_read(ecx, data)		trace_kvm_msr(0, ecx, data)
> +#define trace_kvm_msr_write(ecx, data)		trace_kvm_msr(1, ecx, data)
> +
> +/*
> + * Tracepoint for guest CR access.
> + */
> +TRACE_EVENT(kvm_cr,
> +	TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
> +	TP_ARGS(rw, cr, val),
> +
> +	TP_STRUCT__entry(
> +		__field(	unsigned int,	rw		)
> +		__field(	unsigned int,	cr		)
> +		__field(	unsigned long,	val		)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->rw		= rw;
> +		__entry->cr		= cr;
> +		__entry->val		= val;
> +	),
> +
> +	TP_printk("cr_%s %x = 0x%lx",
> +		  __entry->rw ? "write" : "read",
> +		  __entry->cr, __entry->val)
> +);
> +
> +#define trace_kvm_cr_read(cr, val)		trace_kvm_cr(0, cr, val)
> +#define trace_kvm_cr_write(cr, val)		trace_kvm_cr(1, cr, val)
> +
> +#endif /* _TRACE_KVM_ARCH_H */
> +
> +/* This part must be outside protection */
> +#include <trace/define_trace.h>
> Index: kvm/include/trace/events/kvm/x86.h
> ===================================================================
> --- /dev/null
> +++ kvm/include/trace/events/kvm/x86.h
> @@ -0,0 +1,143 @@
> +#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
> +#define _TRACE_KVM_H
> +
> +#include <linux/tracepoint.h>
> +
> +#undef TRACE_SYSTEM
> +#define TRACE_SYSTEM kvm
> +#define TRACE_INCLUDE_FILE kvm/x86
> +
> +/*
> + * Tracepoint for guest mode entry.
> + */
> +TRACE_EVENT(kvm_entry,
> +	TP_PROTO(unsigned int vcpu_id),
> +	TP_ARGS(vcpu_id),
> +
> +	TP_STRUCT__entry(
> +		__field(	unsigned int,	vcpu_id		)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->vcpu_id	= vcpu_id;
> +	),
> +
> +	TP_printk("vcpu %u\n", __entry->vcpu_id)
> +);
> +
> +/*
> + * Tracepoint for hypercall.
> + */
> +TRACE_EVENT(kvm_hypercall,
> +	TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
> +		 unsigned long a2, unsigned long a3),
> +	TP_ARGS(nr, a0, a1, a2, a3),
> +
> +	TP_STRUCT__entry(
> +		__field(	unsigned long, 	nr		)
> +		__field(	unsigned long,	a0		)
> +		__field(	unsigned long,	a1		)
> +		__field(	unsigned long,	a2		)
> +		__field(	unsigned long,	a3		)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->nr		= nr;
> +		__entry->a0		= a0;
> +		__entry->a1		= a1;
> +		__entry->a2		= a2;
> +		__entry->a3		= a3;
> +	),
> +
> +	TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
> +		 __entry->nr, __entry->a0, __entry->a1,  __entry->a2,
> +		 __entry->a3)
> +);
> +
> +/*
> + * Tracepoint for PIO.
> + */
> +TRACE_EVENT(kvm_pio,
> +	TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
> +		 unsigned int count),
> +	TP_ARGS(rw, port, size, count),
> +
> +	TP_STRUCT__entry(
> +		__field(	unsigned int, 	rw		)
> +		__field(	unsigned int, 	port		)
> +		__field(	unsigned int, 	size		)
> +		__field(	unsigned int,	count		)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->rw		= rw;
> +		__entry->port		= port;
> +		__entry->size		= size;
> +		__entry->count		= count;
> +	),
> +
> +	TP_printk("pio_%s at 0x%x size %d count %d",
> +		  __entry->rw ? "write" : "read",
> +		  __entry->port, __entry->size, __entry->count)
> +);
> +
> +/*
> + * Tracepoint for cpuid.
> + */
> +TRACE_EVENT(kvm_cpuid,
> +	TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx,
> +		 unsigned long rcx, unsigned long rdx),
> +	TP_ARGS(function, rax, rbx, rcx, rdx),
> +
> +	TP_STRUCT__entry(
> +		__field(	unsigned int,	function	)
> +		__field(	unsigned long,	rax		)
> +		__field(	unsigned long,	rbx		)
> +		__field(	unsigned long,	rcx		)
> +		__field(	unsigned long,	rdx		)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->function	= function;
> +		__entry->rax		= rax;
> +		__entry->rbx		= rbx;
> +		__entry->rcx		= rcx;
> +		__entry->rdx		= rdx;
> +	),
> +
> +	TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx",
> +		  __entry->function, __entry->rax,
> +		  __entry->rbx, __entry->rcx, __entry->rdx)
> +);
> +
> +/*
> + * Tracepoint for apic access.
> + */
> +TRACE_EVENT(kvm_apic,
> +	TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val),
> +	TP_ARGS(rw, reg, val),
> +
> +	TP_STRUCT__entry(
> +		__field(	unsigned int,	rw		)
> +		__field(	unsigned int,	reg		)
> +		__field(	unsigned int,	val		)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->rw		= rw;
> +		__entry->reg		= reg;
> +		__entry->val		= val;
> +	),
> +
> +	TP_printk("apic_%s 0x%x = 0x%x",
> +		  __entry->rw ? "write" : "read",
> +		  __entry->reg, __entry->val)
> +);
> +
> +#define trace_kvm_apic_read(reg, val)		trace_kvm_apic(0, reg, val)
> +#define trace_kvm_apic_write(reg, val)		trace_kvm_apic(1, reg, val)
> +
> +#endif /* _TRACE_KVM_H */
> +
> +/* This part must be outside protection */
> +#include <trace/define_trace.h>
> Index: kvm/include/trace/events/kvm/kvm.h
> ===================================================================
> --- /dev/null
> +++ kvm/include/trace/events/kvm/kvm.h
> @@ -0,0 +1,55 @@
> +#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
> +#define _TRACE_KVM_MAIN_H
> +
> +#include <linux/tracepoint.h>
> +
> +#undef TRACE_SYSTEM
> +#define TRACE_SYSTEM kvm
> +#define TRACE_INCLUDE_FILE kvm/kvm
> +
> +TRACE_EVENT(kvm_set_irq,
> +	TP_PROTO(unsigned int gsi),
> +	TP_ARGS(gsi),
> +
> +	TP_STRUCT__entry(
> +		__field(	unsigned int,	gsi		)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->gsi		= gsi;
> +	),
> +
> +	TP_printk("gsi %u", __entry->gsi)
> +);
> +
> +
> +#define kvm_irqchips						\
> +	{KVM_IRQCHIP_PIC_MASTER,	"PIC master"},		\
> +	{KVM_IRQCHIP_PIC_SLAVE,		"PIC slave"},		\
> +	{KVM_IRQCHIP_IOAPIC,		"IOAPIC"}
> +
> +TRACE_EVENT(kvm_ack_irq,
> +	TP_PROTO(unsigned int irqchip, unsigned int pin),
> +	TP_ARGS(irqchip, pin),
> +
> +	TP_STRUCT__entry(
> +		__field(	unsigned int,	irqchip		)
> +		__field(	unsigned int,	pin		)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->irqchip		= irqchip;
> +		__entry->pin			= pin;
> +	),
> +
> +	TP_printk("irqchip %s pin %u",
> +		  __print_symbolic(__entry->irqchip, kvm_irqchips),
> +		 __entry->pin)
> +);
> +
> +
> +
> +#endif /* _TRACE_KVM_MAIN_H */
> +
> +/* This part must be outside protection */
> +#include <trace/define_trace.h>
> Index: kvm/virt/kvm/irq_comm.c
> ===================================================================
> --- kvm.orig/virt/kvm/irq_comm.c
> +++ kvm/virt/kvm/irq_comm.c
> @@ -20,6 +20,7 @@
>   */
>  
>  #include <linux/kvm_host.h>
> +#include <trace/events/kvm/kvm.h>
>  
>  #include <asm/msidef.h>
>  #ifdef CONFIG_IA64
> @@ -125,6 +126,8 @@ int kvm_set_irq(struct kvm *kvm, int irq
>  	unsigned long *irq_state, sig_level;
>  	int ret = -1;
>  
> +	trace_kvm_set_irq(irq);
> +
>  	WARN_ON(!mutex_is_locked(&kvm->irq_lock));
>  
>  	if (irq < KVM_IOAPIC_NUM_PINS) {
> @@ -161,6 +164,8 @@ void kvm_notify_acked_irq(struct kvm *kv
>  	struct hlist_node *n;
>  	unsigned gsi = pin;
>  
> +	trace_kvm_ack_irq(irqchip, pin);
> +
>  	list_for_each_entry(e, &kvm->irq_routing, link)
>  		if (e->irqchip.irqchip == irqchip &&
>  		    e->irqchip.pin == pin) {
> Index: kvm/virt/kvm/kvm_main.c
> ===================================================================
> --- kvm.orig/virt/kvm/kvm_main.c
> +++ kvm/virt/kvm/kvm_main.c
> @@ -59,6 +59,9 @@
>  #include "irq.h"
>  #endif
>  
> +#define CREATE_TRACE_POINTS
> +#include <trace/events/kvm/kvm.h>
> +
>  MODULE_AUTHOR("Qumranet");
>  MODULE_LICENSE("GPL");
>  
> @@ -2715,6 +2718,7 @@ EXPORT_SYMBOL_GPL(kvm_init);
>  void kvm_exit(void)
>  {
>  	kvm_trace_cleanup();
> +	tracepoint_synchronize_unregister();
>  	misc_deregister(&kvm_dev);
>  	kmem_cache_destroy(kvm_vcpu_cache);
>  	sysdev_unregister(&kvm_sysdev);
> 
> -- 
> 
> 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Marcelo Tosatti June 16, 2009, 12:44 p.m. UTC | #2
On Mon, Jun 15, 2009 at 04:03:10PM -0400, Steven Rostedt wrote:
> 
> On Mon, 15 Jun 2009, Marcelo Tosatti wrote:
> 
> > This allows use of the powerful ftrace infrastructure.
> > 
> > See Documentation/trace/ for usage information.
> > 
> > Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
> > 
> > Index: kvm/arch/x86/kvm/svm.c
> > ===================================================================
> > --- kvm.orig/arch/x86/kvm/svm.c
> > +++ kvm/arch/x86/kvm/svm.c
> > @@ -29,6 +29,9 @@
> >  #include <asm/desc.h>
> >  
> >  #include <asm/virtext.h>
> > +#include "svm-trace.h"
> > +#define CREATE_TRACE_POINTS
> > +#include <trace/events/kvm/x86-arch.h>
> >  
> >  #define __ex(x) __kvm_handle_fault_on_reboot(x)
> >  
> 
> 
> > Index: kvm/arch/x86/kvm/vmx.c
> > ===================================================================
> > --- kvm.orig/arch/x86/kvm/vmx.c
> > +++ kvm/arch/x86/kvm/vmx.c
> > @@ -34,6 +34,10 @@
> >  #include <asm/virtext.h>
> >  #include <asm/mce.h>
> >  
> > +#include "vmx-trace.h"
> > +#define CREATE_TRACE_POINTS
> > +#include <trace/events/kvm/x86-arch.h>
> > +
> >  #define __ex(x) __kvm_handle_fault_on_reboot(x)
> >  
> >  MODULE_AUTHOR("Qumranet");
> 
> > Index: kvm/arch/x86/kvm/x86.c
> > ===================================================================
> > --- kvm.orig/arch/x86/kvm/x86.c
> > +++ kvm/arch/x86/kvm/x86.c
> > @@ -37,6 +37,8 @@
> >  #include <linux/iommu.h>
> >  #include <linux/intel-iommu.h>
> >  #include <linux/cpufreq.h>
> > +#define CREATE_TRACE_POINTS
> > +#include <trace/events/kvm/x86.h>
> >  
> >  #include <asm/uaccess.h>
> >  #include <asm/msr.h>
> > @@ -347,9 +349,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0);
> 
> 
> > Index: kvm/include/trace/events/kvm/x86-arch.h
> > ===================================================================
> > --- /dev/null
> > +++ kvm/include/trace/events/kvm/x86-arch.h
> 
> 
> One suggestion. Instead of putting in arch specific trace points into 
> generic code, you can put these into arch/x86/kvm/trace.h ?
> 
> Then you can in the Makefile add:
> 
> CFLAGS_x86.o := -I.
> CFLAGS_svm.o := -I.
> CFLAGS_vmx.o := -I.
> 
> Or better yet, have a single file called trace.c:
> 
> in the Makefile:
> CFLAGS_trace.o := -I.
> obj-$(EVENT_TRACING) += trace.o
> 
> in trace.c:
> 
> #define CREATE_TRACE_POINTS
> #include "trace.h"
> #include "trace-arch.h"
> 
> 
> Then have the kvm/x86.h moved to trace.h
> and the kvm/arch-x86.h move to trace-arch.h
> 
> Just change the "TRACE_INCLUDE_FILE" to include the proper name.
> 
> -- Steve

Similar to http://patchwork.kernel.org/patch/23829/, but moving the
x86 tracepoint definitions to arch/x86/kvm/ ?

I thought the point of include/trace/ was to have all tracepoints
definitions in a single directory?
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Steven Rostedt June 16, 2009, 1:40 p.m. UTC | #3
On Tue, 16 Jun 2009, Marcelo Tosatti wrote:
> > 
> > 
> > One suggestion. Instead of putting in arch specific trace points into 
> > generic code, you can put these into arch/x86/kvm/trace.h ?
> > 
> > Then you can in the Makefile add:
> > 
> > CFLAGS_x86.o := -I.
> > CFLAGS_svm.o := -I.
> > CFLAGS_vmx.o := -I.
> > 
> > Or better yet, have a single file called trace.c:
> > 
> > in the Makefile:
> > CFLAGS_trace.o := -I.
> > obj-$(EVENT_TRACING) += trace.o
> > 
> > in trace.c:
> > 
> > #define CREATE_TRACE_POINTS
> > #include "trace.h"
> > #include "trace-arch.h"
> > 
> > 
> > Then have the kvm/x86.h moved to trace.h
> > and the kvm/arch-x86.h move to trace-arch.h
> > 
> > Just change the "TRACE_INCLUDE_FILE" to include the proper name.
> > 
> > -- Steve
> 
> Similar to http://patchwork.kernel.org/patch/23829/, but moving the
> x86 tracepoint definitions to arch/x86/kvm/ ?

Yes, similar to what Christoph suggested.

> 
> I thought the point of include/trace/ was to have all tracepoints
> definitions in a single directory?
> 

That's not what I heard ;-)

It should only be for core kernel code. If you have trace points that are 
the same on most archs, then sure. But if it is specific to one arch, then 
we are going to end up with arch code scattered through out the kernel 
again. I thought the idea of moving include/asm-x86 into 
arch/x86/include/asm was to consolidate arch code to one location?

Thus, if a trace point is made for one arch, then it should be located in 
that arch.

-- Steve

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Marcelo Tosatti June 16, 2009, 10:13 p.m. UTC | #4

diff mbox

Patch

Index: kvm/arch/x86/kvm/lapic.c
===================================================================
--- kvm.orig/arch/x86/kvm/lapic.c
+++ kvm/arch/x86/kvm/lapic.c
@@ -26,6 +26,7 @@ 
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/math64.h>
+#include <trace/events/kvm/x86.h>
 #include <asm/processor.h>
 #include <asm/msr.h>
 #include <asm/page.h>
@@ -515,8 +516,6 @@  static u32 __apic_read(struct kvm_lapic 
 {
 	u32 val = 0;
 
-	KVMTRACE_1D(APIC_ACCESS, apic->vcpu, (u32)offset, handler);
-
 	if (offset >= LAPIC_MMIO_LENGTH)
 		return 0;
 
@@ -562,6 +561,8 @@  static void apic_mmio_read(struct kvm_io
 	}
 	result = __apic_read(apic, offset & ~0xf);
 
+	trace_kvm_apic_read(offset, result);
+
 	switch (len) {
 	case 1:
 	case 2:
@@ -657,7 +658,7 @@  static void apic_mmio_write(struct kvm_i
 
 	offset &= 0xff0;
 
-	KVMTRACE_1D(APIC_ACCESS, apic->vcpu, (u32)offset, handler);
+	trace_kvm_apic_write(offset, val);
 
 	switch (offset) {
 	case APIC_ID:		/* Local APIC ID */
Index: kvm/arch/x86/kvm/svm-trace.h
===================================================================
--- /dev/null
+++ kvm/arch/x86/kvm/svm-trace.h
@@ -0,0 +1,51 @@ 
+#define exit_reasons svm_exit_reasons
+#define svm_exit_reasons 						\
+	{SVM_EXIT_READ_CR0,           		"read_cr0"},		\
+	{SVM_EXIT_READ_CR3,	      		"read_cr3"},		\
+	{SVM_EXIT_READ_CR4,	      		"read_cr4"},		\
+	{SVM_EXIT_READ_CR8,  	      		"read_cr8"},		\
+	{SVM_EXIT_WRITE_CR0,          		"write_cr0"},		\
+	{SVM_EXIT_WRITE_CR3,	      		"write_cr3"},		\
+	{SVM_EXIT_WRITE_CR4,          		"write_cr4"},		\
+	{SVM_EXIT_WRITE_CR8, 	      		"write_cr8"},		\
+	{SVM_EXIT_READ_DR0, 	      		"read_dr0"},		\
+	{SVM_EXIT_READ_DR1,	      		"read_dr1"},		\
+	{SVM_EXIT_READ_DR2,	      		"read_dr2"},		\
+	{SVM_EXIT_READ_DR3,	      		"read_dr3"},		\
+	{SVM_EXIT_WRITE_DR0,	      		"write_dr0"},		\
+	{SVM_EXIT_WRITE_DR1,	      		"write_dr1"},		\
+	{SVM_EXIT_WRITE_DR2,	      		"write_dr2"},		\
+	{SVM_EXIT_WRITE_DR3,	      		"write_dr3"},		\
+	{SVM_EXIT_WRITE_DR5,	      		"write_dr5"},		\
+	{SVM_EXIT_WRITE_DR7,	      		"write_dr7"},		\
+	{SVM_EXIT_EXCP_BASE + DB_VECTOR,	"DB excp"},		\
+	{SVM_EXIT_EXCP_BASE + BP_VECTOR,	"BP excp"},		\
+	{SVM_EXIT_EXCP_BASE + UD_VECTOR,	"UD excp"},		\
+	{SVM_EXIT_EXCP_BASE + PF_VECTOR,	"PF excp"},		\
+	{SVM_EXIT_EXCP_BASE + NM_VECTOR,	"NM excp"},		\
+	{SVM_EXIT_EXCP_BASE + MC_VECTOR,	"MC excp"},		\
+	{SVM_EXIT_INTR,				"interrupt"},		\
+	{SVM_EXIT_NMI,				"nmi"},			\
+	{SVM_EXIT_SMI,				"smi"},			\
+	{SVM_EXIT_INIT,				"init"},		\
+	{SVM_EXIT_VINTR,			"vintr"},		\
+	{SVM_EXIT_CPUID,			"cpuid"},		\
+	{SVM_EXIT_INVD,				"invd"},		\
+	{SVM_EXIT_HLT,				"hlt"},			\
+	{SVM_EXIT_INVLPG,			"invlpg"},		\
+	{SVM_EXIT_INVLPGA,			"invlpga"},		\
+	{SVM_EXIT_IOIO,				"io"},			\
+	{SVM_EXIT_MSR,				"msr"},			\
+	{SVM_EXIT_TASK_SWITCH,			"task_switch"},		\
+	{SVM_EXIT_SHUTDOWN,			"shutdown"},		\
+	{SVM_EXIT_VMRUN,			"vmrun"},		\
+	{SVM_EXIT_VMMCALL,			"hypercall"},		\
+	{SVM_EXIT_VMLOAD,			"vmload"},		\
+	{SVM_EXIT_VMSAVE,			"vmsave"},		\
+	{SVM_EXIT_STGI,				"stgi"},		\
+	{SVM_EXIT_CLGI,				"clgi"},		\
+	{SVM_EXIT_SKINIT,			"skinit"},		\
+	{SVM_EXIT_WBINVD,			"wbinvd"},		\
+	{SVM_EXIT_MONITOR,			"monitor"},		\
+	{SVM_EXIT_MWAIT,			"mwait"},		\
+	{SVM_EXIT_NPF,				"npf"}
Index: kvm/arch/x86/kvm/svm.c
===================================================================
--- kvm.orig/arch/x86/kvm/svm.c
+++ kvm/arch/x86/kvm/svm.c
@@ -29,6 +29,9 @@ 
 #include <asm/desc.h>
 
 #include <asm/virtext.h>
+#include "svm-trace.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/kvm/x86-arch.h>
 
 #define __ex(x) __kvm_handle_fault_on_reboot(x)
 
@@ -1113,7 +1116,6 @@  static unsigned long svm_get_dr(struct k
 		val = 0;
 	}
 
-	KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
 	return val;
 }
 
@@ -1122,8 +1124,6 @@  static void svm_set_dr(struct kvm_vcpu *
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)value, handler);
-
 	*exception = 0;
 
 	switch (dr) {
@@ -1171,14 +1171,7 @@  static int pf_interception(struct vcpu_s
 	fault_address  = svm->vmcb->control.exit_info_2;
 	error_code = svm->vmcb->control.exit_info_1;
 
-	if (!npt_enabled)
-		KVMTRACE_3D(PAGE_FAULT, &svm->vcpu, error_code,
-			    (u32)fault_address, (u32)(fault_address >> 32),
-			    handler);
-	else
-		KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code,
-			    (u32)fault_address, (u32)(fault_address >> 32),
-			    handler);
+	trace_kvm_page_fault(fault_address, error_code);
 	/*
 	 * FIXME: Tis shouldn't be necessary here, but there is a flush
 	 * missing in the MMU code. Until we find this bug, flush the
@@ -1305,14 +1298,12 @@  static int io_interception(struct vcpu_s
 
 static int nmi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
-	KVMTRACE_0D(NMI, &svm->vcpu, handler);
 	return 1;
 }
 
 static int intr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
 	++svm->vcpu.stat.irq_exits;
-	KVMTRACE_0D(INTR, &svm->vcpu, handler);
 	return 1;
 }
 
@@ -2079,8 +2070,7 @@  static int rdmsr_interception(struct vcp
 	if (svm_get_msr(&svm->vcpu, ecx, &data))
 		kvm_inject_gp(&svm->vcpu, 0);
 	else {
-		KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data,
-			    (u32)(data >> 32), handler);
+		trace_kvm_msr_read(ecx, data);
 
 		svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
 		svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
@@ -2158,8 +2148,7 @@  static int wrmsr_interception(struct vcp
 	u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
 		| ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
 
-	KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32),
-		    handler);
+	trace_kvm_msr_write(ecx, data);
 
 	svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
 	if (svm_set_msr(&svm->vcpu, ecx, data))
@@ -2180,8 +2169,6 @@  static int msr_interception(struct vcpu_
 static int interrupt_window_interception(struct vcpu_svm *svm,
 				   struct kvm_run *kvm_run)
 {
-	KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler);
-
 	svm_clear_vintr(svm);
 	svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
 	/*
@@ -2260,8 +2247,7 @@  static int handle_exit(struct kvm_run *k
 	struct vcpu_svm *svm = to_svm(vcpu);
 	u32 exit_code = svm->vmcb->control.exit_code;
 
-	KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip,
-		    (u32)((u64)svm->vmcb->save.rip >> 32), entryexit);
+	trace_kvm_exit(exit_code, svm->vmcb->save.rip);
 
 	if (is_nested(svm)) {
 		nsvm_printk("nested handle_exit: 0x%x | 0x%lx | 0x%lx | 0x%lx\n",
@@ -2349,7 +2335,7 @@  static inline void svm_inject_irq(struct
 {
 	struct vmcb_control_area *control;
 
-	KVMTRACE_1D(INJ_VIRQ, &svm->vcpu, (u32)irq, handler);
+	trace_kvm_inj_virq(irq);
 
 	++svm->vcpu.stat.irq_injections;
 	control = &svm->vmcb->control;
Index: kvm/arch/x86/kvm/vmx-trace.h
===================================================================
--- /dev/null
+++ kvm/arch/x86/kvm/vmx-trace.h
@@ -0,0 +1,22 @@ 
+#define exit_reasons vmx_exit_reasons
+#define vmx_exit_reasons 						\
+	{EXIT_REASON_EXCEPTION_NMI,           "exception"},		\
+	{EXIT_REASON_EXTERNAL_INTERRUPT,      "ext_irq"},		\
+	{EXIT_REASON_TRIPLE_FAULT,            "triple_fault"},		\
+	{EXIT_REASON_NMI_WINDOW,              "nmi_window"},		\
+	{EXIT_REASON_IO_INSTRUCTION,          "io_instruction"},	\
+	{EXIT_REASON_CR_ACCESS,               "cr_access"},		\
+	{EXIT_REASON_DR_ACCESS,               "dr_access"},		\
+	{EXIT_REASON_CPUID,                   "cpuid"},			\
+	{EXIT_REASON_MSR_READ,                "rdmsr"},			\
+	{EXIT_REASON_MSR_WRITE,               "wrmsr"},			\
+	{EXIT_REASON_PENDING_INTERRUPT,       "interrupt_window"},	\
+	{EXIT_REASON_HLT,                     "halt"},			\
+	{EXIT_REASON_INVLPG,                  "invlpg"},		\
+	{EXIT_REASON_VMCALL,                  "hypercall"},		\
+	{EXIT_REASON_TPR_BELOW_THRESHOLD,     "tpr_below_thres"},	\
+	{EXIT_REASON_APIC_ACCESS,             "apic_access"},		\
+	{EXIT_REASON_WBINVD,                  "wbinvd"},		\
+	{EXIT_REASON_TASK_SWITCH,             "task_switch"},		\
+	{EXIT_REASON_EPT_VIOLATION,           "ept_violation"}
+
Index: kvm/arch/x86/kvm/vmx.c
===================================================================
--- kvm.orig/arch/x86/kvm/vmx.c
+++ kvm/arch/x86/kvm/vmx.c
@@ -34,6 +34,10 @@ 
 #include <asm/virtext.h>
 #include <asm/mce.h>
 
+#include "vmx-trace.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/kvm/x86-arch.h>
+
 #define __ex(x) __kvm_handle_fault_on_reboot(x)
 
 MODULE_AUTHOR("Qumranet");
@@ -2550,7 +2554,7 @@  static void vmx_inject_irq(struct kvm_vc
 	uint32_t intr;
 	int irq = vcpu->arch.interrupt.nr;
 
-	KVMTRACE_1D(INJ_VIRQ, vcpu, (u32)irq, handler);
+	trace_kvm_inj_virq(irq);
 
 	++vcpu->stat.irq_injections;
 	if (vmx->rmode.vm86_active) {
@@ -2751,8 +2755,8 @@  static int handle_exception(struct kvm_v
 		if (enable_ept)
 			BUG();
 		cr2 = vmcs_readl(EXIT_QUALIFICATION);
-		KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2,
-			    (u32)((u64)cr2 >> 32), handler);
+		trace_kvm_page_fault(cr2, error_code);
+
 		if (kvm_event_needs_reinjection(vcpu))
 			kvm_mmu_unprotect_page_virt(vcpu, cr2);
 		return kvm_mmu_page_fault(vcpu, cr2, error_code);
@@ -2799,7 +2803,6 @@  static int handle_external_interrupt(str
 				     struct kvm_run *kvm_run)
 {
 	++vcpu->stat.irq_exits;
-	KVMTRACE_1D(INTR, vcpu, vmcs_read32(VM_EXIT_INTR_INFO), handler);
 	return 1;
 }
 
@@ -2847,7 +2850,7 @@  vmx_patch_hypercall(struct kvm_vcpu *vcp
 
 static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
-	unsigned long exit_qualification;
+	unsigned long exit_qualification, val;
 	int cr;
 	int reg;
 
@@ -2856,21 +2859,19 @@  static int handle_cr(struct kvm_vcpu *vc
 	reg = (exit_qualification >> 8) & 15;
 	switch ((exit_qualification >> 4) & 3) {
 	case 0: /* mov to cr */
-		KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr,
-			    (u32)kvm_register_read(vcpu, reg),
-			    (u32)((u64)kvm_register_read(vcpu, reg) >> 32),
-			    handler);
+		val = kvm_register_read(vcpu, reg);
+		trace_kvm_cr_write(cr, val);
 		switch (cr) {
 		case 0:
-			kvm_set_cr0(vcpu, kvm_register_read(vcpu, reg));
+			kvm_set_cr0(vcpu, val);
 			skip_emulated_instruction(vcpu);
 			return 1;
 		case 3:
-			kvm_set_cr3(vcpu, kvm_register_read(vcpu, reg));
+			kvm_set_cr3(vcpu, val);
 			skip_emulated_instruction(vcpu);
 			return 1;
 		case 4:
-			kvm_set_cr4(vcpu, kvm_register_read(vcpu, reg));
+			kvm_set_cr4(vcpu, val);
 			skip_emulated_instruction(vcpu);
 			return 1;
 		case 8: {
@@ -2892,23 +2893,19 @@  static int handle_cr(struct kvm_vcpu *vc
 		vcpu->arch.cr0 &= ~X86_CR0_TS;
 		vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
 		vmx_fpu_activate(vcpu);
-		KVMTRACE_0D(CLTS, vcpu, handler);
 		skip_emulated_instruction(vcpu);
 		return 1;
 	case 1: /*mov from cr*/
 		switch (cr) {
 		case 3:
 			kvm_register_write(vcpu, reg, vcpu->arch.cr3);
-			KVMTRACE_3D(CR_READ, vcpu, (u32)cr,
-				    (u32)kvm_register_read(vcpu, reg),
-				    (u32)((u64)kvm_register_read(vcpu, reg) >> 32),
-				    handler);
+			trace_kvm_cr_read(cr, vcpu->arch.cr3);
 			skip_emulated_instruction(vcpu);
 			return 1;
 		case 8:
-			kvm_register_write(vcpu, reg, kvm_get_cr8(vcpu));
-			KVMTRACE_2D(CR_READ, vcpu, (u32)cr,
-				    (u32)kvm_register_read(vcpu, reg), handler);
+			val = kvm_get_cr8(vcpu);
+			kvm_register_write(vcpu, cr, val);
+			trace_kvm_cr_read(cr, val);
 			skip_emulated_instruction(vcpu);
 			return 1;
 		}
@@ -2976,7 +2973,6 @@  static int handle_dr(struct kvm_vcpu *vc
 			val = 0;
 		}
 		kvm_register_write(vcpu, reg, val);
-		KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
 	} else {
 		val = vcpu->arch.regs[reg];
 		switch (dr) {
@@ -3009,7 +3005,6 @@  static int handle_dr(struct kvm_vcpu *vc
 			}
 			break;
 		}
-		KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)val, handler);
 	}
 	skip_emulated_instruction(vcpu);
 	return 1;
@@ -3031,8 +3026,7 @@  static int handle_rdmsr(struct kvm_vcpu 
 		return 1;
 	}
 
-	KVMTRACE_3D(MSR_READ, vcpu, ecx, (u32)data, (u32)(data >> 32),
-		    handler);
+	trace_kvm_msr_read(ecx, data);
 
 	/* FIXME: handling of bits 32:63 of rax, rdx */
 	vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
@@ -3047,8 +3041,7 @@  static int handle_wrmsr(struct kvm_vcpu 
 	u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
 		| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
 
-	KVMTRACE_3D(MSR_WRITE, vcpu, ecx, (u32)data, (u32)(data >> 32),
-		    handler);
+	trace_kvm_msr_write(ecx, data);
 
 	if (vmx_set_msr(vcpu, ecx, data) != 0) {
 		kvm_inject_gp(vcpu, 0);
@@ -3075,7 +3068,6 @@  static int handle_interrupt_window(struc
 	cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
 	vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
 
-	KVMTRACE_0D(PEND_INTR, vcpu, handler);
 	++vcpu->stat.irq_window_exits;
 
 	/*
@@ -3221,6 +3213,7 @@  static int handle_ept_violation(struct k
 	}
 
 	gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
+	trace_kvm_page_fault(gpa, exit_qualification);
 	return kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0);
 }
 
@@ -3395,8 +3388,7 @@  static int vmx_handle_exit(struct kvm_ru
 	u32 exit_reason = vmx->exit_reason;
 	u32 vectoring_info = vmx->idt_vectoring_info;
 
-	KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu),
-		    (u32)((u64)kvm_rip_read(vcpu) >> 32), entryexit);
+	trace_kvm_exit(exit_reason, kvm_rip_read(vcpu));
 
 	/* If we need to emulate an MMIO from handle_invalid_guest_state
 	 * we just return 0 */
@@ -3485,10 +3477,8 @@  static void vmx_complete_interrupts(stru
 
 	/* We need to handle NMIs before interrupts are enabled */
 	if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
-	    (exit_intr_info & INTR_INFO_VALID_MASK)) {
-		KVMTRACE_0D(NMI, &vmx->vcpu, handler);
+	    (exit_intr_info & INTR_INFO_VALID_MASK))
 		asm("int $2");
-	}
 
 	idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
 
Index: kvm/arch/x86/kvm/x86.c
===================================================================
--- kvm.orig/arch/x86/kvm/x86.c
+++ kvm/arch/x86/kvm/x86.c
@@ -37,6 +37,8 @@ 
 #include <linux/iommu.h>
 #include <linux/intel-iommu.h>
 #include <linux/cpufreq.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/kvm/x86.h>
 
 #include <asm/uaccess.h>
 #include <asm/msr.h>
@@ -347,9 +349,6 @@  EXPORT_SYMBOL_GPL(kvm_set_cr0);
 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 {
 	kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
-	KVMTRACE_1D(LMSW, vcpu,
-		    (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
-		    handler);
 }
 EXPORT_SYMBOL_GPL(kvm_lmsw);
 
@@ -2531,7 +2530,6 @@  int emulate_invlpg(struct kvm_vcpu *vcpu
 
 int emulate_clts(struct kvm_vcpu *vcpu)
 {
-	KVMTRACE_0D(CLTS, vcpu, handler);
 	kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
 	return X86EMUL_CONTINUE;
 }
@@ -2814,12 +2812,8 @@  int kvm_emulate_pio(struct kvm_vcpu *vcp
 	vcpu->arch.pio.down = 0;
 	vcpu->arch.pio.rep = 0;
 
-	if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
-		KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
-			    handler);
-	else
-		KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
-			    handler);
+	trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
+		      size, 1);
 
 	val = kvm_register_read(vcpu, VCPU_REGS_RAX);
 	memcpy(vcpu->arch.pio_data, &val, 4);
@@ -2855,12 +2849,8 @@  int kvm_emulate_pio_string(struct kvm_vc
 	vcpu->arch.pio.down = down;
 	vcpu->arch.pio.rep = rep;
 
-	if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
-		KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
-			    handler);
-	else
-		KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
-			    handler);
+	trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
+		      size, count);
 
 	if (!count) {
 		kvm_x86_ops->skip_emulated_instruction(vcpu);
@@ -3038,7 +3028,6 @@  void kvm_arch_exit(void)
 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
 {
 	++vcpu->stat.halt_exits;
-	KVMTRACE_0D(HLT, vcpu, handler);
 	if (irqchip_in_kernel(vcpu->kvm)) {
 		vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
 		return 1;
@@ -3069,7 +3058,7 @@  int kvm_emulate_hypercall(struct kvm_vcp
 	a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
 	a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
 
-	KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
+	trace_kvm_hypercall(nr, a0, a1, a2, a3);
 
 	if (!is_long_mode(vcpu)) {
 		nr &= 0xFFFFFFFF;
@@ -3169,8 +3158,6 @@  unsigned long realmode_get_cr(struct kvm
 		vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
 		return 0;
 	}
-	KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value,
-		    (u32)((u64)value >> 32), handler);
 
 	return value;
 }
@@ -3178,9 +3165,6 @@  unsigned long realmode_get_cr(struct kvm
 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
 		     unsigned long *rflags)
 {
-	KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val,
-		    (u32)((u64)val >> 32), handler);
-
 	switch (cr) {
 	case 0:
 		kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
@@ -3290,11 +3274,11 @@  void kvm_emulate_cpuid(struct kvm_vcpu *
 		kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
 	}
 	kvm_x86_ops->skip_emulated_instruction(vcpu);
-	KVMTRACE_5D(CPUID, vcpu, function,
-		    (u32)kvm_register_read(vcpu, VCPU_REGS_RAX),
-		    (u32)kvm_register_read(vcpu, VCPU_REGS_RBX),
-		    (u32)kvm_register_read(vcpu, VCPU_REGS_RCX),
-		    (u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler);
+	trace_kvm_cpuid(function,
+			kvm_register_read(vcpu, VCPU_REGS_RAX),
+			kvm_register_read(vcpu, VCPU_REGS_RBX),
+			kvm_register_read(vcpu, VCPU_REGS_RCX),
+			kvm_register_read(vcpu, VCPU_REGS_RDX));
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
 
@@ -3490,7 +3474,7 @@  static int vcpu_enter_guest(struct kvm_v
 		set_debugreg(vcpu->arch.eff_db[3], 3);
 	}
 
-	KVMTRACE_0D(VMENTRY, vcpu, entryexit);
+	trace_kvm_entry(vcpu->vcpu_id);
 	kvm_x86_ops->run(vcpu, kvm_run);
 
 	if (unlikely(vcpu->arch.switch_db_regs)) {
Index: kvm/include/trace/events/kvm/x86-arch.h
===================================================================
--- /dev/null
+++ kvm/include/trace/events/kvm/x86-arch.h
@@ -0,0 +1,128 @@ 
+#if !defined(_TRACE_KVM_ARCH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_ARCH_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+#define TRACE_INCLUDE_FILE kvm/x86-arch
+
+/*
+ * Tracepoint for kvm guest exit:
+ */
+TRACE_EVENT(kvm_exit,
+	TP_PROTO(unsigned int exit_reason, unsigned long guest_rip),
+	TP_ARGS(exit_reason, guest_rip),
+
+	TP_STRUCT__entry(
+		__field(	unsigned int,	exit_reason	)
+		__field(	unsigned long,	guest_rip	)
+	),
+
+	TP_fast_assign(
+		__entry->exit_reason	= exit_reason;
+		__entry->guest_rip	= guest_rip;
+	),
+
+	TP_printk("reason %s rip 0x%lx",
+		  __print_symbolic(__entry->exit_reason, exit_reasons),
+	   	 __entry->guest_rip)
+);
+
+/*
+ * Tracepoint for kvm interrupt injection:
+ */
+TRACE_EVENT(kvm_inj_virq,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq),
+
+	TP_STRUCT__entry(
+		__field(	unsigned int,	irq		)
+	),
+
+	TP_fast_assign(
+		__entry->irq		= irq;
+	),
+
+	TP_printk("irq %u", __entry->irq)
+);
+
+/*
+ * Tracepoint for page fault.
+ */
+TRACE_EVENT(kvm_page_fault,
+	TP_PROTO(unsigned long fault_address, unsigned int error_code),
+	TP_ARGS(fault_address, error_code),
+
+	TP_STRUCT__entry(
+		__field(	unsigned long,	fault_address	)
+		__field(	unsigned int,	error_code	)
+	),
+
+	TP_fast_assign(
+		__entry->fault_address	= fault_address;
+		__entry->error_code	= error_code;
+	),
+
+	TP_printk("address %lx error_code %x",
+		  __entry->fault_address, __entry->error_code)
+);
+
+/*
+ * Tracepoint for guest MSR access.
+ */
+TRACE_EVENT(kvm_msr,
+	TP_PROTO(unsigned int rw, unsigned int ecx, unsigned long data),
+	TP_ARGS(rw, ecx, data),
+
+	TP_STRUCT__entry(
+		__field(	unsigned int,	rw		)
+		__field(	unsigned int,	ecx		)
+		__field(	unsigned long,	data		)
+	),
+
+	TP_fast_assign(
+		__entry->rw		= rw;
+		__entry->ecx		= ecx;
+		__entry->data		= data;
+	),
+
+	TP_printk("msr_%s %x = 0x%lx",
+		  __entry->rw ? "write" : "read",
+		  __entry->ecx, __entry->data)
+);
+
+#define trace_kvm_msr_read(ecx, data)		trace_kvm_msr(0, ecx, data)
+#define trace_kvm_msr_write(ecx, data)		trace_kvm_msr(1, ecx, data)
+
+/*
+ * Tracepoint for guest CR access.
+ */
+TRACE_EVENT(kvm_cr,
+	TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
+	TP_ARGS(rw, cr, val),
+
+	TP_STRUCT__entry(
+		__field(	unsigned int,	rw		)
+		__field(	unsigned int,	cr		)
+		__field(	unsigned long,	val		)
+	),
+
+	TP_fast_assign(
+		__entry->rw		= rw;
+		__entry->cr		= cr;
+		__entry->val		= val;
+	),
+
+	TP_printk("cr_%s %x = 0x%lx",
+		  __entry->rw ? "write" : "read",
+		  __entry->cr, __entry->val)
+);
+
+#define trace_kvm_cr_read(cr, val)		trace_kvm_cr(0, cr, val)
+#define trace_kvm_cr_write(cr, val)		trace_kvm_cr(1, cr, val)
+
+#endif /* _TRACE_KVM_ARCH_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
Index: kvm/include/trace/events/kvm/x86.h
===================================================================
--- /dev/null
+++ kvm/include/trace/events/kvm/x86.h
@@ -0,0 +1,143 @@ 
+#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+#define TRACE_INCLUDE_FILE kvm/x86
+
+/*
+ * Tracepoint for guest mode entry.
+ */
+TRACE_EVENT(kvm_entry,
+	TP_PROTO(unsigned int vcpu_id),
+	TP_ARGS(vcpu_id),
+
+	TP_STRUCT__entry(
+		__field(	unsigned int,	vcpu_id		)
+	),
+
+	TP_fast_assign(
+		__entry->vcpu_id	= vcpu_id;
+	),
+
+	TP_printk("vcpu %u\n", __entry->vcpu_id)
+);
+
+/*
+ * Tracepoint for hypercall.
+ */
+TRACE_EVENT(kvm_hypercall,
+	TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
+		 unsigned long a2, unsigned long a3),
+	TP_ARGS(nr, a0, a1, a2, a3),
+
+	TP_STRUCT__entry(
+		__field(	unsigned long, 	nr		)
+		__field(	unsigned long,	a0		)
+		__field(	unsigned long,	a1		)
+		__field(	unsigned long,	a2		)
+		__field(	unsigned long,	a3		)
+	),
+
+	TP_fast_assign(
+		__entry->nr		= nr;
+		__entry->a0		= a0;
+		__entry->a1		= a1;
+		__entry->a2		= a2;
+		__entry->a3		= a3;
+	),
+
+	TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
+		 __entry->nr, __entry->a0, __entry->a1,  __entry->a2,
+		 __entry->a3)
+);
+
+/*
+ * Tracepoint for PIO.
+ */
+TRACE_EVENT(kvm_pio,
+	TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
+		 unsigned int count),
+	TP_ARGS(rw, port, size, count),
+
+	TP_STRUCT__entry(
+		__field(	unsigned int, 	rw		)
+		__field(	unsigned int, 	port		)
+		__field(	unsigned int, 	size		)
+		__field(	unsigned int,	count		)
+	),
+
+	TP_fast_assign(
+		__entry->rw		= rw;
+		__entry->port		= port;
+		__entry->size		= size;
+		__entry->count		= count;
+	),
+
+	TP_printk("pio_%s at 0x%x size %d count %d",
+		  __entry->rw ? "write" : "read",
+		  __entry->port, __entry->size, __entry->count)
+);
+
+/*
+ * Tracepoint for cpuid.
+ */
+TRACE_EVENT(kvm_cpuid,
+	TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx,
+		 unsigned long rcx, unsigned long rdx),
+	TP_ARGS(function, rax, rbx, rcx, rdx),
+
+	TP_STRUCT__entry(
+		__field(	unsigned int,	function	)
+		__field(	unsigned long,	rax		)
+		__field(	unsigned long,	rbx		)
+		__field(	unsigned long,	rcx		)
+		__field(	unsigned long,	rdx		)
+	),
+
+	TP_fast_assign(
+		__entry->function	= function;
+		__entry->rax		= rax;
+		__entry->rbx		= rbx;
+		__entry->rcx		= rcx;
+		__entry->rdx		= rdx;
+	),
+
+	TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx",
+		  __entry->function, __entry->rax,
+		  __entry->rbx, __entry->rcx, __entry->rdx)
+);
+
+/*
+ * Tracepoint for apic access.
+ */
+TRACE_EVENT(kvm_apic,
+	TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val),
+	TP_ARGS(rw, reg, val),
+
+	TP_STRUCT__entry(
+		__field(	unsigned int,	rw		)
+		__field(	unsigned int,	reg		)
+		__field(	unsigned int,	val		)
+	),
+
+	TP_fast_assign(
+		__entry->rw		= rw;
+		__entry->reg		= reg;
+		__entry->val		= val;
+	),
+
+	TP_printk("apic_%s 0x%x = 0x%x",
+		  __entry->rw ? "write" : "read",
+		  __entry->reg, __entry->val)
+);
+
+#define trace_kvm_apic_read(reg, val)		trace_kvm_apic(0, reg, val)
+#define trace_kvm_apic_write(reg, val)		trace_kvm_apic(1, reg, val)
+
+#endif /* _TRACE_KVM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
Index: kvm/include/trace/events/kvm/kvm.h
===================================================================
--- /dev/null
+++ kvm/include/trace/events/kvm/kvm.h
@@ -0,0 +1,55 @@ 
+#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_MAIN_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+#define TRACE_INCLUDE_FILE kvm/kvm
+
+TRACE_EVENT(kvm_set_irq,
+	TP_PROTO(unsigned int gsi),
+	TP_ARGS(gsi),
+
+	TP_STRUCT__entry(
+		__field(	unsigned int,	gsi		)
+	),
+
+	TP_fast_assign(
+		__entry->gsi		= gsi;
+	),
+
+	TP_printk("gsi %u", __entry->gsi)
+);
+
+
+#define kvm_irqchips						\
+	{KVM_IRQCHIP_PIC_MASTER,	"PIC master"},		\
+	{KVM_IRQCHIP_PIC_SLAVE,		"PIC slave"},		\
+	{KVM_IRQCHIP_IOAPIC,		"IOAPIC"}
+
+TRACE_EVENT(kvm_ack_irq,
+	TP_PROTO(unsigned int irqchip, unsigned int pin),
+	TP_ARGS(irqchip, pin),
+
+	TP_STRUCT__entry(
+		__field(	unsigned int,	irqchip		)
+		__field(	unsigned int,	pin		)
+	),
+
+	TP_fast_assign(
+		__entry->irqchip		= irqchip;
+		__entry->pin			= pin;
+	),
+
+	TP_printk("irqchip %s pin %u",
+		  __print_symbolic(__entry->irqchip, kvm_irqchips),
+		 __entry->pin)
+);
+
+
+
+#endif /* _TRACE_KVM_MAIN_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
Index: kvm/virt/kvm/irq_comm.c
===================================================================
--- kvm.orig/virt/kvm/irq_comm.c
+++ kvm/virt/kvm/irq_comm.c
@@ -20,6 +20,7 @@ 
  */
 
 #include <linux/kvm_host.h>
+#include <trace/events/kvm/kvm.h>
 
 #include <asm/msidef.h>
 #ifdef CONFIG_IA64
@@ -125,6 +126,8 @@  int kvm_set_irq(struct kvm *kvm, int irq
 	unsigned long *irq_state, sig_level;
 	int ret = -1;
 
+	trace_kvm_set_irq(irq);
+
 	WARN_ON(!mutex_is_locked(&kvm->irq_lock));
 
 	if (irq < KVM_IOAPIC_NUM_PINS) {
@@ -161,6 +164,8 @@  void kvm_notify_acked_irq(struct kvm *kv
 	struct hlist_node *n;
 	unsigned gsi = pin;
 
+	trace_kvm_ack_irq(irqchip, pin);
+
 	list_for_each_entry(e, &kvm->irq_routing, link)
 		if (e->irqchip.irqchip == irqchip &&
 		    e->irqchip.pin == pin) {
Index: kvm/virt/kvm/kvm_main.c
===================================================================
--- kvm.orig/virt/kvm/kvm_main.c
+++ kvm/virt/kvm/kvm_main.c
@@ -59,6 +59,9 @@ 
 #include "irq.h"
 #endif
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/kvm/kvm.h>
+
 MODULE_AUTHOR("Qumranet");
 MODULE_LICENSE("GPL");
 
@@ -2715,6 +2718,7 @@  EXPORT_SYMBOL_GPL(kvm_init);
 void kvm_exit(void)
 {
 	kvm_trace_cleanup();
+	tracepoint_synchronize_unregister();
 	misc_deregister(&kvm_dev);
 	kmem_cache_destroy(kvm_vcpu_cache);
 	sysdev_unregister(&kvm_sysdev);