@@ -4827,6 +4827,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
if (!init_event) {
if (cpu_has_vmx_arch_lbr())
vmcs_write64(GUEST_IA32_LBR_CTL, 0);
+ } else {
+ disable_arch_lbr_ctl(vcpu);
}
}
@@ -7967,6 +7969,8 @@ static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
{
+ struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
/*
@@ -7983,11 +7987,22 @@ static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
vmx->nested.smm.vmxon = vmx->nested.vmxon;
vmx->nested.vmxon = false;
vmx_clear_hlt(vcpu);
+
+ if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) &&
+ test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use) &&
+ lbr_desc->event && guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
+ u64 ctl = vmcs_read64(GUEST_IA32_LBR_CTL);
+
+ put_smstate(u64, smstate, 0x7f10, ctl);
+ vmcs_write64(GUEST_IA32_LBR_CTL, ctl & ~ARCH_LBR_CTL_LBREN);
+ }
+
return 0;
}
static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
+ struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
int ret;
@@ -8004,6 +8019,17 @@ static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
vmx->nested.nested_run_pending = 1;
vmx->nested.smm.guest_mode = false;
}
+
+ if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) &&
+ guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
+ u64 ctl = GET_SMSTATE(u64, smstate, 0x7f10);
+
+ vmcs_write64(GUEST_IA32_LBR_CTL, ctl | ARCH_LBR_CTL_LBREN);
+
+ if (intel_pmu_lbr_is_enabled(vcpu) && !lbr_desc->event)
+ intel_pmu_create_guest_lbr_event(vcpu);
+ }
+
return 0;
}