@@ -34,6 +34,7 @@ tests += $(TEST_DIR)/rdpru.$(exe)
tests += $(TEST_DIR)/pks.$(exe)
tests += $(TEST_DIR)/pmu_lbr.$(exe)
tests += $(TEST_DIR)/pmu_pebs.$(exe)
+tests += $(TEST_DIR)/sysenter.$(exe)
ifeq ($(CONFIG_EFI),y)
@@ -61,3 +62,4 @@ $(TEST_DIR)/hyperv_clock.$(bin): $(TEST_DIR)/hyperv_clock.o
$(TEST_DIR)/vmx.$(bin): $(TEST_DIR)/vmx_tests.o
$(TEST_DIR)/svm.$(bin): $(TEST_DIR)/svm_tests.o
$(TEST_DIR)/svm_npt.$(bin): $(TEST_DIR)/svm_npt.o
+$(TEST_DIR)/sysenter.o: CFLAGS += -Wa,-mintel64
new file mode 100644
@@ -0,0 +1,203 @@
+#include "alloc.h"
+#include "libcflat.h"
+#include "processor.h"
+#include "msr.h"
+#include "desc.h"
+
+// define this to test SYSENTER/SYSEXIT in 64 bit mode
+//#define TEST_64_BIT
+
+static void test_comp32(void)
+{
+ ulong rax = 0xDEAD;
+
+ extern void sysenter_target_32(void);
+
+ wrmsr(MSR_IA32_SYSENTER_EIP, (uint64_t)sysenter_target_32);
+
+ asm volatile (
+ "# switch to comp32, mode prior to running the test\n"
+ "ljmpl *1f\n"
+ "1:\n"
+ ".long 1f\n"
+ ".long " xstr(KERNEL_CS32) "\n"
+ "1:\n"
+ ".code32\n"
+
+ "#+++++++++++++++++++++++++++++++++++++++++++++++++++\n"
+ "# user code (comp32)\n"
+ "#+++++++++++++++++++++++++++++++++++++++++++++++++++\n"
+
+ "# use sysenter to enter 64 bit system code\n"
+ "mov %%esp, %%ecx #stash rsp value\n"
+ "mov $1, %%ebx\n"
+ "sysenter\n"
+ "ud2\n"
+
+ "#+++++++++++++++++++++++++++++++++++++++++++++++++++\n"
+ "# 64 bit cpl=0 code\n"
+ "#+++++++++++++++++++++++++++++++++++++++++++++++++++\n"
+
+ ".code64\n"
+ "sysenter_target_32:\n"
+ "test %%rbx, %%rbx # check if we are here for second time\n"
+ "jne 1f\n"
+ "movq %%rcx, %%rsp # restore stack pointer manually\n"
+ "jmp test_done_32\n"
+ "1:\n"
+
+ "# test that MSR_IA32_SYSENTER_ESP is correct\n"
+ "movq $0xAAFFFFFFFF, %%rbx\n"
+ "movq $0xDEAD, %%rax\n"
+ "cmpq %%rsp, %%rbx\n"
+ "jne 1f\n"
+ "movq $0xACED, %%rax\n"
+
+ "# use sysexit to exit back\n"
+ "1:\n"
+ "leaq sysexit_target, %%rdx\n"
+ "sysexit\n"
+
+ "sysexit_target:\n"
+
+ "#+++++++++++++++++++++++++++++++++++++++++++++++++++\n"
+ "# exit back to 64 bit mode using a second sysenter\n"
+ "# due to rbx == 0, the sysenter handler will jump back to\n"
+ "# here without sysexit due to ebx=0\n"
+ "#+++++++++++++++++++++++++++++++++++++++++++++++++++\n"
+
+ ".code32\n"
+ "mov $0, %%ebx\n"
+ "sysenter\n"
+
+ ".code64\n"
+ "test_done_32:\n"
+ "nop\n"
+
+ : /*outputs*/
+ "=a" (rax)
+ : /* inputs*/
+ : /*clobbers*/
+ "rbx", /* action flag for sysenter_target */
+ "rcx", /* saved RSP */
+ "rdx", /* used for SYSEXIT*/
+ "flags"
+ );
+
+ report(rax == 0xACED, "MSR_IA32_SYSENTER_ESP has correct value");
+}
+
+#ifdef TEST_64_BIT
+static void test_64_bit(void)
+{
+ extern void test_done_64(void);
+ extern void sysenter_target_64(void);
+
+ ulong rax = 0xDEAD;
+ u8 *sysexit_thunk = (u8 *)malloc(50);
+ u8 *tmp = sysexit_thunk;
+
+ /* Allocate SYSEXIT thunk, whose purpose is to be at > 32 bit address,
+ * to test that SYSEXIT can jump to these addresses
+ *
+ * TODO: malloc seems to return addresses from the top of the
+ * virtual address space, but it is better to use a dedicated API
+ */
+ printf("SYSEXIT Thunk at 0x%lx\n", (u64)sysexit_thunk);
+
+ /* movabs test_done, %rdx*/
+ *tmp++ = 0x48; *tmp++ = 0xBA;
+ *(u64 *)tmp = (uint64_t)test_done_64; tmp += 8;
+ /* jmp %%rdx*/
+ *tmp++ = 0xFF; *tmp++ = 0xe2;
+
+ wrmsr(MSR_IA32_SYSENTER_EIP, (uint64_t)sysenter_target_64);
+
+ asm volatile (
+ "#+++++++++++++++++++++++++++++++++++++++++++++++++++\n"
+ "# user code (64 bit)\n"
+ "#+++++++++++++++++++++++++++++++++++++++++++++++++++\n"
+
+ "# store the 64 bit thunk address to rdx\n"
+ "mov %[sysexit_thunk], %%rdx\n"
+ "# use sysenter to enter 64 bit system code\n"
+ "mov %%esp, %%ecx #stash rsp value\n"
+ "mov $1, %%ebx\n"
+ "sysenter\n"
+ "ud2\n"
+
+ "#+++++++++++++++++++++++++++++++++++++++++++++++++++\n"
+ "# 64 bit cpl=0 code\n"
+ "#+++++++++++++++++++++++++++++++++++++++++++++++++++\n"
+
+ ".code64\n"
+ "sysenter_target_64:\n"
+ "# test that MSR_IA32_SYSENTER_ESP is correct\n"
+ "movq $0xAAFFFFFFFF, %%rbx\n"
+ "movq $0xDEAD, %%rax\n"
+ "cmpq %%rsp, %%rbx\n"
+ "jne 1f\n"
+ "movq $0xACED, %%rax\n"
+
+ "# use sysexit to exit back\n"
+ "1:\n"
+
+ "# this will go through thunk to test_done_64, which tests\n"
+ "# that we can sysexit to a high address\n"
+ ".byte 0x48\n"
+ "sysexit\n"
+ "ud2\n"
+
+ ".code64\n"
+ "test_done_64:\n"
+ "nop\n"
+
+ : /*outputs*/
+ "=a" (rax)
+ : /* inputs*/
+ [sysexit_thunk] "r" (sysexit_thunk)
+ : /*clobbers*/
+ "rbx", /* action flag for sysenter_target */
+ "rcx", /* saved RSP */
+ "rdx", /* used for SYSEXIT*/
+ "flags"
+ );
+ report(rax == 0xACED, "MSR_IA32_SYSENTER_ESP has correct value");
+}
+#endif
+
+int main(int ac, char **av)
+{
+ setup_vm();
+
+ int gdt_index = 0x50 >> 3;
+
+ /* init the sysenter GDT block */
+ gdt[gdt_index+0] = gdt[KERNEL_CS >> 3];
+ gdt[gdt_index+1] = gdt[KERNEL_DS >> 3];
+ gdt[gdt_index+2] = gdt[USER_CS >> 3];
+ gdt[gdt_index+3] = gdt[USER_DS >> 3];
+
+ /* init the sysenter msrs*/
+ wrmsr(MSR_IA32_SYSENTER_CS, gdt_index << 3);
+ wrmsr(MSR_IA32_SYSENTER_ESP, 0xAAFFFFFFFF);
+ test_comp32();
+
+ /*
+ * on AMD, the SYSENTER/SYSEXIT instruction is not supported in
+ * both 64 bit and comp32 modes
+ *
+ * However the KVM emulates it in COMP32 mode to support migration,
+ * iff guest cpu model is intel,
+ *
+ * but it doesn't emulate it in 64 bit mode because there is no good
+ * reason to use this instruction in 64 bit mode anyway.
+ */
+
+#ifdef TEST_64_BIT
+ test_64_bit();
+#endif
+ return report_summary();
+}
+
+
@@ -245,6 +245,11 @@ file = syscall.flat
arch = x86_64
extra_params = -cpu Opteron_G1,vendor=AuthenticAMD
+[sysenter]
+file = sysenter.flat
+arch = x86_64
+extra_params = -cpu host,vendor=GenuineIntel
+
[tsc]
file = tsc.flat
extra_params = -cpu kvm64,+rdtscp
Run the test with Intel's vendor ID and in the long mode, to test the emulation of this instruction on AMD. Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> --- x86/Makefile.x86_64 | 2 + x86/sysenter.c | 203 ++++++++++++++++++++++++++++++++++++++++++++ x86/unittests.cfg | 5 ++ 3 files changed, 210 insertions(+) create mode 100644 x86/sysenter.c