@@ -34,6 +34,7 @@
#include <asm/kvm_page_track.h>
#include <asm/kvm_vcpu_regs.h>
#include <asm/hyperv-tlfs.h>
+#include <asm/vmx.h>
#define __KVM_HAVE_ARCH_VCPU_DEBUGFS
@@ -1257,6 +1258,9 @@ struct kvm_vcpu_stat {
u64 directed_yield_attempted;
u64 directed_yield_successful;
u64 guest_mode;
+ u64 vmx_all_exits[EXIT_REASON_NUM];
+ u64 vmx_l2_exits[EXIT_REASON_NUM];
+ u64 vmx_nested_exits[EXIT_REASON_NUM];
};
struct x86_instruction_info;
@@ -91,6 +91,7 @@
#define EXIT_REASON_UMWAIT 67
#define EXIT_REASON_TPAUSE 68
#define EXIT_REASON_BUS_LOCK 74
+#define EXIT_REASON_NUM (EXIT_REASON_BUS_LOCK + 1)
#define VMX_EXIT_REASONS \
{ EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
@@ -5890,6 +5890,8 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
u32 vectoring_info = vmx->idt_vectoring_info;
u16 exit_handler_index;
+ ++vcpu->stat.vmx_all_exits[exit_reason.basic];
+
/*
* Flush logged GPAs PML buffer, this will make dirty_bitmap more
* updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
@@ -5915,6 +5917,7 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
return handle_invalid_guest_state(vcpu);
if (is_guest_mode(vcpu)) {
+ ++vcpu->stat.vmx_l2_exits[exit_reason.basic];
/*
* PML is never enabled when running L2, bail immediately if a
* PML full exit occurs as something is horribly wrong.
@@ -5935,8 +5938,10 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
*/
nested_mark_vmcs12_pages_dirty(vcpu);
- if (nested_vmx_reflect_vmexit(vcpu))
+ if (nested_vmx_reflect_vmexit(vcpu)) {
+ ++vcpu->stat.vmx_nested_exits[exit_reason.basic];
return 1;
+ }
}
if (exit_reason.failed_vmentry) {
@@ -277,7 +277,10 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
STATS_DESC_COUNTER(VCPU, nested_run),
STATS_DESC_COUNTER(VCPU, directed_yield_attempted),
STATS_DESC_COUNTER(VCPU, directed_yield_successful),
- STATS_DESC_ICOUNTER(VCPU, guest_mode)
+ STATS_DESC_ICOUNTER(VCPU, guest_mode),
+ STATS_DESC_LINHIST_COUNTER(VCPU, vmx_all_exits, EXIT_REASON_NUM, 1),
+ STATS_DESC_LINHIST_COUNTER(VCPU, vmx_l2_exits, EXIT_REASON_NUM, 1),
+ STATS_DESC_LINHIST_COUNTER(VCPU, vmx_nested_exits, EXIT_REASON_NUM, 1),
};
const struct kvm_stats_header kvm_vcpu_stats_header = {
@@ -1429,6 +1429,10 @@ struct _kvm_stats_desc {
#define STATS_DESC_PCOUNTER(SCOPE, name) \
STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \
KVM_STATS_BASE_POW10, 0)
+/* Linear histogram for counter */
+#define STATS_DESC_LINHIST_COUNTER(SCOPE, name, sz, bsz) \
+ STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_NONE, \
+ KVM_STATS_BASE_POW10, 0, sz, bsz)
/* Cumulative time in nanosecond */
#define STATS_DESC_TIME_NSEC(SCOPE, name) \
These stats will be used to monitor the nested virtualization use in VMs. Most importantly: VMXON exits are evidence that the guest has enabled VMX, VMLAUNCH/VMRESUME exits are evidence that the guest has run an L2. Original-by: Peter Feiner <pfeiner@google.com> Signed-off-by: Jing Zhang <jingzhangos@google.com> --- arch/x86/include/asm/kvm_host.h | 4 ++++ arch/x86/include/uapi/asm/vmx.h | 1 + arch/x86/kvm/vmx/vmx.c | 7 ++++++- arch/x86/kvm/x86.c | 5 ++++- include/linux/kvm_host.h | 4 ++++ 5 files changed, 19 insertions(+), 2 deletions(-) base-commit: 680c7e3be6a3d502248771fe42c911f99d7e006c