@@ -58,7 +58,8 @@ uint64_t run_in_user(usermode_func func, unsigned int fault_vector,
"pushq %[user_stack_top]\n\t"
"pushfq\n\t"
"pushq %[user_cs]\n\t"
- "pushq $user_mode\n\t"
+ "lea user_mode(%%rip), %%rdx\n\t"
+ "pushq %%rdx\n\t"
"iretq\n"
"user_mode:\n\t"
@@ -81,16 +81,16 @@ tests-common = $(TEST_DIR)/vmexit.$(exe) $(TEST_DIR)/tsc.$(exe) \
$(TEST_DIR)/init.$(exe) \
$(TEST_DIR)/hyperv_synic.$(exe) $(TEST_DIR)/hyperv_stimer.$(exe) \
$(TEST_DIR)/hyperv_connections.$(exe) \
- $(TEST_DIR)/tsx-ctrl.$(exe)
+ $(TEST_DIR)/tsx-ctrl.$(exe) \
+ $(TEST_DIR)/eventinj.$(exe) \
+ $(TEST_DIR)/umip.$(exe)
# The following test cases are disabled when building EFI tests because they
# use absolute addresses in their inline assembly code, which cannot compile
# with the '-fPIC' flag
ifneq ($(TARGET_EFI),y)
-tests-common += $(TEST_DIR)/eventinj.$(exe) \
- $(TEST_DIR)/smap.$(exe) \
- $(TEST_DIR)/realmode.$(exe) \
- $(TEST_DIR)/umip.$(exe)
+tests-common += $(TEST_DIR)/smap.$(exe) \
+ $(TEST_DIR)/realmode.$(exe)
endif
test_cases: $(tests-common) $(tests)
@@ -30,6 +30,8 @@ tests += $(TEST_DIR)/intel-iommu.$(exe)
tests += $(TEST_DIR)/rdpru.$(exe)
tests += $(TEST_DIR)/pks.$(exe)
tests += $(TEST_DIR)/pmu_lbr.$(exe)
+tests += $(TEST_DIR)/emulator.$(exe)
+tests += $(TEST_DIR)/vmware_backdoors.$(exe)
ifeq ($(TARGET_EFI),y)
tests += $(TEST_DIR)/amd_sev.$(exe)
@@ -40,14 +42,13 @@ endif
# with the '-fPIC' flag
ifneq ($(TARGET_EFI),y)
tests += $(TEST_DIR)/access.$(exe)
-tests += $(TEST_DIR)/emulator.$(exe)
tests += $(TEST_DIR)/svm.$(exe)
tests += $(TEST_DIR)/vmx.$(exe)
-tests += $(TEST_DIR)/vmware_backdoors.$(exe)
+endif
+
ifneq ($(fcf_protection_full),)
tests += $(TEST_DIR)/cet.$(exe)
endif
-endif
include $(SRCDIR)/$(TEST_DIR)/Makefile.common
@@ -700,7 +700,7 @@ static int ac_test_do_access(ac_test_t *at)
if (F(AC_ACCESS_TWICE)) {
asm volatile (
- "mov $fixed2, %%rsi \n\t"
+ "lea fixed2(%%rip), %%rsi \n\t"
"mov (%[addr]), %[reg] \n\t"
"fixed2:"
: [reg]"=r"(r), [fault]"=a"(fault), "=b"(e)
@@ -710,7 +710,7 @@ static int ac_test_do_access(ac_test_t *at)
fault = 0;
}
- asm volatile ("mov $fixed1, %%rsi \n\t"
+ asm volatile ("lea fixed1(%%rip), %%rsi \n\t"
"mov %%rsp, %[rsp0] \n\t"
"cmp $0, %[user] \n\t"
"jz do_access \n\t"
@@ -719,7 +719,8 @@ static int ac_test_do_access(ac_test_t *at)
"pushq %[user_stack_top] \n\t"
"pushfq \n\t"
"pushq %[user_cs] \n\t"
- "pushq $do_access \n\t"
+ "lea do_access(%%rip), %%r8\n\t"
+ "pushq %%r8\n\t"
"iretq \n"
"do_access: \n\t"
"cmp $0, %[fetch] \n\t"
@@ -750,7 +751,7 @@ static int ac_test_do_access(ac_test_t *at)
[user_cs]"i"(USER_CS),
[user_stack_top]"r"(user_stack + sizeof user_stack),
[kernel_entry_vector]"i"(0x20)
- : "rsi");
+ : "rsi", "r8");
asm volatile (".section .text.pf \n\t"
"page_fault: \n\t"
@@ -52,7 +52,7 @@ static u64 cet_ibt_func(void)
printf("No endbr64 instruction at jmp target, this triggers #CP...\n");
asm volatile ("movq $2, %rcx\n"
"dec %rcx\n"
- "leaq 2f, %rax\n"
+ "leaq 2f(%rip), %rax\n"
"jmp *%rax \n"
"2:\n"
"dec %rcx\n");
@@ -67,7 +67,8 @@ void test_func(void) {
"pushq %[user_stack_top]\n\t"
"pushfq\n\t"
"pushq %[user_cs]\n\t"
- "pushq $user_mode\n\t"
+ "lea user_mode(%%rip), %%rax\n\t"
+ "pushq %%rax\n\t"
"iretq\n"
"user_mode:\n\t"
@@ -77,7 +78,8 @@ void test_func(void) {
[user_ds]"i"(USER_DS),
[user_cs]"i"(USER_CS),
[user_stack_top]"r"(user_stack +
- sizeof(user_stack)));
+ sizeof(user_stack))
+ : "rax");
}
#define SAVE_REGS() \
@@ -262,12 +262,13 @@ static void test_pop(void *mem)
asm volatile("mov %%rsp, %[tmp] \n\t"
"mov %[stack_top], %%rsp \n\t"
- "push $1f \n\t"
+ "lea 1f(%%rip), %%rax \n\t"
+ "push %%rax \n\t"
"ret \n\t"
"2: jmp 2b \n\t"
"1: mov %[tmp], %%rsp"
: [tmp]"=&r"(tmp) : [stack_top]"r"(stack_top)
- : "memory");
+ : "memory", "rax");
report_pass("ret");
stack_top[-1] = 0x778899;
@@ -155,9 +155,17 @@ asm("do_iret:"
"pushf"W" \n\t"
"mov %cs, %ecx \n\t"
"push"W" %"R "cx \n\t"
+#ifndef __x86_64__
"push"W" $2f \n\t"
"cmpb $0, no_test_device\n\t" // see if need to flush
+#else
+ "leaq 2f(%rip), %rbx \n\t"
+ "pushq %rbx \n\t"
+
+ "mov no_test_device(%rip), %bl \n\t"
+ "cmpb $0, %bl\n\t" // see if need to flush
+#endif
"jnz 1f\n\t"
"outl %eax, $0xe4 \n\t" // flush page
"1: \n\t"
@@ -159,12 +159,17 @@ int main(int ac, char **av)
init_test(i);
stac();
test = -1;
+#ifndef __x86_64__
+ #define TEST "test"
+#else
+ #define TEST "test(%rip)"
+#endif
asm("or $(" xstr(USER_BASE) "), %"R "sp \n"
"push $44 \n "
- "decl test\n"
+ "decl " TEST "\n"
"and $~(" xstr(USER_BASE) "), %"R "sp \n"
"pop %"R "ax\n"
- "movl %eax, test");
+ "movl %eax, " TEST "\n");
report(pf_count == 0 && test == 44,
"write to user stack with AC=1");
@@ -173,10 +178,10 @@ int main(int ac, char **av)
test = -1;
asm("or $(" xstr(USER_BASE) "), %"R "sp \n"
"push $45 \n "
- "decl test\n"
+ "decl " TEST "\n"
"and $~(" xstr(USER_BASE) "), %"R "sp \n"
"pop %"R "ax\n"
- "movl %eax, test");
+ "movl %eax, " TEST "\n");
report(pf_count == 1 && test == 45 && save == -1,
"write to user stack with AC=0");
@@ -20,10 +20,20 @@ static void gp_handler(struct ex_regs *regs)
}
}
+#ifndef __x86_64__
+#define GP_ASM_MOVE_TO_RIP \
+ "mov" W " $1f, %[expected_rip]\n\t"
+#else
+#define GP_ASM_MOVE_TO_RIP \
+ "pushq %%rax\n\t" \
+ "lea 1f(%%rip), %%rax\n\t" \
+ "mov %%rax, %[expected_rip]\n\t" \
+ "popq %%rax\n\t"
+#endif
#define GP_ASM(stmt, in, clobber) \
asm volatile ( \
- "mov" W " $1f, %[expected_rip]\n\t" \
+ GP_ASM_MOVE_TO_RIP \
"movl $2f-1f, %[skip_count]\n\t" \
"1: " stmt "\n\t" \
"2: " \
@@ -125,12 +135,18 @@ static noinline int do_ring3(void (*fn)(const char *), const char *arg)
"mov %%dx, %%fs\n\t"
"mov %%dx, %%gs\n\t"
"mov %%" R "sp, %[sp0]\n\t" /* kernel sp for exception handlers */
+ "mov %[sp0], %%" R "bx\n\t" /* ebx/rbx is preserved before and after 'call' instruction */
"push" W " %%" R "dx \n\t"
"lea %[user_stack_top], %%" R "dx \n\t"
"push" W " %%" R "dx \n\t"
"pushf" W "\n\t"
"push" W " %[user_cs] \n\t"
+#ifndef __x86_64__
"push" W " $1f \n\t"
+#else
+ "lea 1f(%%rip), %%rdx \n\t"
+ "pushq %%rdx \n\t"
+#endif
"iret" W "\n"
"1: \n\t"
#ifndef __x86_64__
@@ -140,12 +156,16 @@ static noinline int do_ring3(void (*fn)(const char *), const char *arg)
#ifndef __x86_64__
"pop %%ecx\n\t"
#endif
+#ifndef __x86_64__
"mov $1f, %%" R "dx\n\t"
+#else
+ "lea 1f(%%" R "ip), %%" R "dx\n\t"
+#endif
"int %[kernel_entry_vector]\n\t"
".section .text.entry \n\t"
"kernel_entry: \n\t"
#ifdef __x86_64__
- "mov %[sp0], %%" R "sp\n\t"
+ "mov %%rbx, %%rsp\n\t"
#else
"add $(5 * " S "), %%esp\n\t"
#endif
@@ -171,7 +191,7 @@ static noinline int do_ring3(void (*fn)(const char *), const char *arg)
[arg]"D"(arg),
[kernel_ds]"i"(KERNEL_DS),
[kernel_entry_vector]"i"(0x20)
- : "rcx", "rdx");
+ : "rcx", "rdx", "rbx");
return ret;
}