@@ -48,6 +48,12 @@
#define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
+#define SPEC_CTRL_IPRED_DIS_U_SHIFT 3 /* Disable IPRED behavior in user mode */
+#define SPEC_CTRL_IPRED_DIS_U BIT(SPEC_CTRL_IPRED_DIS_U_SHIFT)
+#define SPEC_CTRL_IPRED_DIS_S_SHIFT 4 /* Disable IPRED behavior in supervisor mode */
+#define SPEC_CTRL_IPRED_DIS_S BIT(SPEC_CTRL_IPRED_DIS_S_SHIFT)
+#define SPEC_CTRL_RRSBA_DIS_U_SHIFT 5 /* Disable RRSBA behavior in user mode */
+#define SPEC_CTRL_RRSBA_DIS_U BIT(SPEC_CTRL_RRSBA_DIS_U_SHIFT)
#define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior in supervisor mode */
#define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
#define SPEC_CTRL_BHI_DIS_S_SHIFT 10 /* Disable BHI behavior in supervisor mode */
@@ -116,6 +116,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/amx_test
TEST_GEN_PROGS_x86_64 += x86_64/max_vcpuid_cap_test
TEST_GEN_PROGS_x86_64 += x86_64/triple_fault_event_test
TEST_GEN_PROGS_x86_64 += x86_64/virtual_mitigation_msr_test
+TEST_GEN_PROGS_x86_64 += x86_64/spec_ctrl_msr_test
TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
TEST_GEN_PROGS_x86_64 += demand_paging_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
@@ -125,8 +125,13 @@ struct kvm_x86_cpu_feature {
#define X86_FEATURE_IBT KVM_X86_CPU_FEATURE(0x7, 0, EDX, 20)
#define X86_FEATURE_AMX_TILE KVM_X86_CPU_FEATURE(0x7, 0, EDX, 24)
#define X86_FEATURE_SPEC_CTRL KVM_X86_CPU_FEATURE(0x7, 0, EDX, 26)
+#define X86_FEATURE_INTEL_STIBP KVM_X86_CPU_FEATURE(0x7, 0, EDX, 27)
+#define X86_FEATURE_SPEC_CTRL_SSBD KVM_X86_CPU_FEATURE(0x7, 0, EDX, 31)
#define X86_FEATURE_ARCH_CAPABILITIES KVM_X86_CPU_FEATURE(0x7, 0, EDX, 29)
#define X86_FEATURE_PKS KVM_X86_CPU_FEATURE(0x7, 0, ECX, 31)
+#define X86_FEATURE_IPRED_CTRL KVM_X86_CPU_FEATURE(0x7, 2, EDX, 1)
+#define X86_FEATURE_RRSBA_CTRL KVM_X86_CPU_FEATURE(0x7, 2, EDX, 2)
+#define X86_FEATURE_BHI_CTRL KVM_X86_CPU_FEATURE(0x7, 2, EDX, 4)
#define X86_FEATURE_XTILECFG KVM_X86_CPU_FEATURE(0xD, 0, EAX, 17)
#define X86_FEATURE_XTILEDATA KVM_X86_CPU_FEATURE(0xD, 0, EAX, 18)
#define X86_FEATURE_XSAVES KVM_X86_CPU_FEATURE(0xD, 1, EAX, 3)
new file mode 100644
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023, Intel, Inc.
+ *
+ * tests for IA32_SPEC_CTRL MSR accesses
+ */
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+
+#include "kvm_util.h"
+#include "vmx.h"
+#include "processor.h"
+
+static void set_spec_ctrl(u64 val)
+{
+ /* Set the bit and verify the result */
+ wrmsr(MSR_IA32_SPEC_CTRL, val);
+ GUEST_ASSERT_2(rdmsr(MSR_IA32_SPEC_CTRL) == val, rdmsr(MSR_IA32_SPEC_CTRL), val);
+
+ /* Clear the bit and verify the result */
+ val = 0;
+ wrmsr(MSR_IA32_SPEC_CTRL, val);
+ GUEST_ASSERT_2(rdmsr(MSR_IA32_SPEC_CTRL) == val, rdmsr(MSR_IA32_SPEC_CTRL), val);
+}
+
+static void guest_code(void)
+{
+ set_spec_ctrl(SPEC_CTRL_IBRS);
+
+ if (this_cpu_has(X86_FEATURE_INTEL_STIBP))
+ set_spec_ctrl(SPEC_CTRL_STIBP);
+
+ if (this_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+ set_spec_ctrl(SPEC_CTRL_SSBD);
+
+ if (this_cpu_has(X86_FEATURE_IPRED_CTRL)) {
+ set_spec_ctrl(SPEC_CTRL_IPRED_DIS_S);
+ set_spec_ctrl(SPEC_CTRL_IPRED_DIS_U);
+ }
+
+ if (this_cpu_has(X86_FEATURE_RRSBA_CTRL)) {
+ set_spec_ctrl(SPEC_CTRL_RRSBA_DIS_S);
+ set_spec_ctrl(SPEC_CTRL_RRSBA_DIS_U);
+ }
+
+ if (this_cpu_has(X86_FEATURE_BHI_CTRL))
+ set_spec_ctrl(SPEC_CTRL_BHI_DIS_S);
+
+ GUEST_DONE();
+}
+
+static void test_spec_ctrl_access(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_run *run;
+ struct kvm_vm *vm;
+ struct ucall uc;
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+ run = vcpu->run;
+
+ while (1) {
+ vcpu_run(vcpu);
+
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Unexpected exit reason: %u (%s),\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT_2(uc, "real %ld expected %ld");
+ break;
+ case UCALL_DONE:
+ goto done;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+ }
+
+done:
+ kvm_vm_free(vm);
+}
+
+static void l2_guest_code(void)
+{
+ GUEST_ASSERT(rdmsr(MSR_IA32_SPEC_CTRL) == SPEC_CTRL_IBRS);
+ wrmsr(MSR_IA32_SPEC_CTRL, 0);
+
+ /* Exit to L1 */
+ __asm__ __volatile__("vmcall");
+}
+
+static void l1_guest_code(struct vmx_pages *vmx_pages)
+{
+#define L2_GUEST_STACK_SIZE 64
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ uint32_t control;
+
+ /*
+ * Try to disable interception of writes to SPEC_CTRL by writing a
+ * non-0 value. This test is intended to verify that SPEC_CTRL is
+ * preserved across nested transitions particuarlly when writes to
+ * the MSR isn't intercepted by L0 VMM or L1 VMM.
+ */
+ wrmsr(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS);
+
+ GUEST_ASSERT(vmx_pages->vmcs_gpa);
+ GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+ GUEST_ASSERT(load_vmcs(vmx_pages));
+ GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
+ prepare_vmcs(vmx_pages, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
+ control |= CPU_BASED_USE_MSR_BITMAPS;
+ vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
+
+ GUEST_ASSERT(!vmlaunch());
+
+ GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
+ GUEST_ASSERT(rdmsr(MSR_IA32_SPEC_CTRL) == 0);
+
+ GUEST_DONE();
+}
+
+static void test_spec_ctrl_vmx_transition(void)
+{
+ vm_vaddr_t vmx_pages_gva;
+ struct kvm_vcpu *vcpu;
+ struct kvm_run *run;
+ struct kvm_vm *vm;
+ struct ucall uc;
+
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
+
+ vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
+ vcpu_alloc_vmx(vm, &vmx_pages_gva);
+ vcpu_args_set(vcpu, 1, vmx_pages_gva);
+ run = vcpu->run;
+
+ while (1) {
+ vcpu_run(vcpu);
+
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Unexpected exit reason: %u (%s),\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ break;
+ case UCALL_DONE:
+ goto done;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+ }
+
+done:
+ kvm_vm_free(vm);
+}
+
+int main(int argc, char *argv[])
+{
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SPEC_CTRL));
+ test_spec_ctrl_access();
+ test_spec_ctrl_vmx_transition();
+
+ return 0;
+}
Toggle supported bits of IA32_SPEC_CTRL and verify the result. And also verify the MSR value is preserved across nested transitions. Signed-off-by: Chao Gao <chao.gao@intel.com> --- tools/arch/x86/include/asm/msr-index.h | 6 + tools/testing/selftests/kvm/Makefile | 1 + .../selftests/kvm/include/x86_64/processor.h | 5 + .../selftests/kvm/x86_64/spec_ctrl_msr_test.c | 178 ++++++++++++++++++ 4 files changed, 190 insertions(+) create mode 100644 tools/testing/selftests/kvm/x86_64/spec_ctrl_msr_test.c