===================================================================
@@ -301,6 +301,8 @@ struct kvm_vcpu_arch {
unsigned long mmu_seq;
} update_pte;
+ unsigned long last_rip;
+
struct fpu guest_fpu;
gva_t mmio_fault_cr2;
@@ -653,6 +655,8 @@ void kvm_disable_tdp(void);
int complete_pio(struct kvm_vcpu *vcpu);
bool kvm_check_iopl(struct kvm_vcpu *vcpu);
+void kvm_detect_spin(struct kvm_vcpu *vcpu);
+
struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn);
static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
===================================================================
@@ -1558,8 +1558,10 @@ static int nmi_interception(struct vcpu_
static int intr_interception(struct vcpu_svm *svm)
{
+ if (!svm_has(SVM_FEATURE_PAUSE_FILTER))
+ kvm_detect_spin(&svm->vcpu);
++svm->vcpu.stat.irq_exits;
- return 1;
+ return 2;
}
static int nop_on_interception(struct vcpu_svm *svm)
===================================================================
@@ -3116,7 +3116,9 @@ static int handle_exception(struct kvm_v
static int handle_external_interrupt(struct kvm_vcpu *vcpu)
{
++vcpu->stat.irq_exits;
- return 1;
+ if (!cpu_has_vmx_ple())
+ kvm_detect_spin(vcpu);
+ return 2;
}
static int handle_triple_fault(struct kvm_vcpu *vcpu)
===================================================================
@@ -4523,6 +4523,17 @@ static void inject_pending_event(struct
}
}
+void kvm_detect_spin(struct kvm_vcpu *vcpu)
+{
+ unsigned long rip = kvm_rip_read(vcpu);
+
+ if (vcpu->arch.last_rip == rip)
+ kvm_vcpu_on_spin(vcpu);
+
+ vcpu->arch.last_rip = rip;
+}
+EXPORT_SYMBOL_GPL(kvm_detect_spin);
+
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{
int r;
@@ -4654,6 +4665,8 @@ static int vcpu_enter_guest(struct kvm_v
kvm_lapic_sync_from_vapic(vcpu);
r = kvm_x86_ops->handle_exit(vcpu);
+ if (r == 1)
+ vcpu->arch.last_rip = ~(0UL);
out:
return r;
}