@@ -1687,6 +1687,20 @@ static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
}
+static void disable_arch_lbr_ctl(struct kvm_vcpu *vcpu)
+{
+ struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+
+ if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) &&
+ test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use) &&
+ lbr_desc->event) {
+ u64 ctl = vmcs_read64(GUEST_IA32_LBR_CTL);
+
+ vmcs_write64(GUEST_IA32_LBR_CTL, ctl & ~ARCH_LBR_CTL_LBREN);
+ }
+}
+
static void vmx_queue_exception(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -1722,6 +1736,9 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu)
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
vmx_clear_hlt(vcpu);
+
+ if (nr == DB_VECTOR)
+ disable_arch_lbr_ctl(vcpu);
}
static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr,
@@ -4886,6 +4903,9 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
vmx_clear_hlt(vcpu);
+
+ if (vcpu->arch.exception.nr == DB_VECTOR)
+ disable_arch_lbr_ctl(vcpu);
}
bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)