@@ -388,6 +388,11 @@ typedef enum X86Seg {
#define MSR_IA32_TSX_CTRL 0x122
#define MSR_IA32_TSCDEADLINE 0x6e0
#define MSR_IA32_PKRS 0x6e1
+#define MSR_ARCH_LBR_CTL 0x000014ce
+#define MSR_ARCH_LBR_DEPTH 0x000014cf
+#define MSR_ARCH_LBR_FROM_0 0x00001500
+#define MSR_ARCH_LBR_TO_0 0x00001600
+#define MSR_ARCH_LBR_INFO_0 0x00001200
#define FEATURE_CONTROL_LOCKED (1<<0)
#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1ULL << 1)
@@ -1659,6 +1664,11 @@ typedef struct CPUX86State {
uint64_t msr_xfd;
uint64_t msr_xfd_err;
+ /* Per-VCPU Arch LBR MSRs */
+ uint64_t msr_lbr_ctl;
+ uint64_t msr_lbr_depth;
+ LBR_ENTRY lbr_records[ARCH_LBR_NR_ENTRIES];
+
/* exception/interrupt handling */
int error_code;
int exception_is_int;
@@ -3273,6 +3273,38 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
env->msr_xfd_err);
}
+ if (kvm_enabled() && cpu->enable_pmu &&
+ (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
+ uint64_t depth;
+ int i, ret;
+
+ /*
+ * Only migrate Arch LBR states when: 1) Arch LBR is enabled
+ * for migrated vcpu. 2) the host Arch LBR depth equals that
+ * of source guest's, this is to avoid mismatch of guest/host
+ * config for the msr hence avoid unexpected misbehavior.
+ */
+ ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
+
+ if (ret == 1 && (env->msr_lbr_ctl & 0x1) && !!depth &&
+ depth == env->msr_lbr_depth) {
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, env->msr_lbr_ctl);
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, env->msr_lbr_depth);
+
+ for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) {
+ if (!env->lbr_records[i].from) {
+ continue;
+ }
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i,
+ env->lbr_records[i].from);
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i,
+ env->lbr_records[i].to);
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i,
+ env->lbr_records[i].info);
+ }
+ }
+ }
+
/* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
* kvm_put_msr_feature_control. */
}
@@ -3670,6 +3702,26 @@ static int kvm_get_msrs(X86CPU *cpu)
kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, 0);
}
+ if (kvm_enabled() && cpu->enable_pmu &&
+ (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
+ uint64_t ctl, depth;
+ int i, ret2;
+
+ ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_CTL, &ctl);
+ ret2 = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
+ if (ret == 1 && ret2 == 1 && (ctl & 0x1) &&
+ depth == ARCH_LBR_NR_ENTRIES) {
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, 0);
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, 0);
+
+ for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) {
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i, 0);
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i, 0);
+ kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i, 0);
+ }
+ }
+ }
+
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
if (ret < 0) {
return ret;
@@ -3972,6 +4024,21 @@ static int kvm_get_msrs(X86CPU *cpu)
case MSR_IA32_XFD_ERR:
env->msr_xfd_err = msrs[i].data;
break;
+ case MSR_ARCH_LBR_CTL:
+ env->msr_lbr_ctl = msrs[i].data;
+ break;
+ case MSR_ARCH_LBR_DEPTH:
+ env->msr_lbr_depth = msrs[i].data;
+ break;
+ case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31:
+ env->lbr_records[index - MSR_ARCH_LBR_FROM_0].from = msrs[i].data;
+ break;
+ case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31:
+ env->lbr_records[index - MSR_ARCH_LBR_TO_0].to = msrs[i].data;
+ break;
+ case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31:
+ env->lbr_records[index - MSR_ARCH_LBR_INFO_0].info = msrs[i].data;
+ break;
}
}
In the first generation of Arch LBR, the max support Arch LBR depth is 32, both host and guest use the value to set depth MSR. This can simplify the implementation of patch given the side-effect of mismatch of host/guest depth MSR: XRSTORS will reset all recording MSRs to 0s if the saved depth mismatches MSR_ARCH_LBR_DEPTH. In most of the cases Arch LBR is not in active status, so check the control bit before save/restore the big chunck of Arch LBR MSRs. Signed-off-by: Yang Weijiang <weijiang.yang@intel.com> --- target/i386/cpu.h | 10 +++++++ target/i386/kvm/kvm.c | 67 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+)