@@ -194,41 +194,9 @@ struct regs get_regs(void)
// rax handled specially below
-#define SAVE_GPR_C \
- "xchg %%rbx, regs+0x8\n\t" \
- "xchg %%rcx, regs+0x10\n\t" \
- "xchg %%rdx, regs+0x18\n\t" \
- "xchg %%rbp, regs+0x28\n\t" \
- "xchg %%rsi, regs+0x30\n\t" \
- "xchg %%rdi, regs+0x38\n\t" \
- "xchg %%r8, regs+0x40\n\t" \
- "xchg %%r9, regs+0x48\n\t" \
- "xchg %%r10, regs+0x50\n\t" \
- "xchg %%r11, regs+0x58\n\t" \
- "xchg %%r12, regs+0x60\n\t" \
- "xchg %%r13, regs+0x68\n\t" \
- "xchg %%r14, regs+0x70\n\t" \
- "xchg %%r15, regs+0x78\n\t"
-
-#define LOAD_GPR_C SAVE_GPR_C
struct svm_test *v2_test;
-#define ASM_PRE_VMRUN_CMD \
- "vmload %%rax\n\t" \
- "mov regs+0x80, %%r15\n\t" \
- "mov %%r15, 0x170(%%rax)\n\t" \
- "mov regs, %%r15\n\t" \
- "mov %%r15, 0x1f8(%%rax)\n\t" \
- LOAD_GPR_C \
-
-#define ASM_POST_VMRUN_CMD \
- SAVE_GPR_C \
- "mov 0x170(%%rax), %%r15\n\t" \
- "mov %%r15, regs+0x80\n\t" \
- "mov 0x1f8(%%rax), %%r15\n\t" \
- "mov %%r15, regs\n\t" \
- "vmsave %%rax\n\t" \
u64 guest_stack[10000];
@@ -416,10 +416,57 @@ void vmcb_ident(struct vmcb *vmcb);
struct regs get_regs(void);
void vmmcall(void);
int __svm_vmrun(u64 rip);
+void __svm_bare_vmrun(void);
int svm_vmrun(void);
void test_set_guest(test_guest_func func);
extern struct vmcb *vmcb;
extern struct svm_test svm_tests[];
+
+#define SAVE_GPR_C \
+ "xchg %%rbx, regs+0x8\n\t" \
+ "xchg %%rcx, regs+0x10\n\t" \
+ "xchg %%rdx, regs+0x18\n\t" \
+ "xchg %%rbp, regs+0x28\n\t" \
+ "xchg %%rsi, regs+0x30\n\t" \
+ "xchg %%rdi, regs+0x38\n\t" \
+ "xchg %%r8, regs+0x40\n\t" \
+ "xchg %%r9, regs+0x48\n\t" \
+ "xchg %%r10, regs+0x50\n\t" \
+ "xchg %%r11, regs+0x58\n\t" \
+ "xchg %%r12, regs+0x60\n\t" \
+ "xchg %%r13, regs+0x68\n\t" \
+ "xchg %%r14, regs+0x70\n\t" \
+ "xchg %%r15, regs+0x78\n\t"
+
+#define LOAD_GPR_C SAVE_GPR_C
+
+#define ASM_PRE_VMRUN_CMD \
+ "vmload %%rax\n\t" \
+ "mov regs+0x80, %%r15\n\t" \
+ "mov %%r15, 0x170(%%rax)\n\t" \
+ "mov regs, %%r15\n\t" \
+ "mov %%r15, 0x1f8(%%rax)\n\t" \
+ LOAD_GPR_C \
+
+#define ASM_POST_VMRUN_CMD \
+ SAVE_GPR_C \
+ "mov 0x170(%%rax), %%r15\n\t" \
+ "mov %%r15, regs+0x80\n\t" \
+ "mov 0x1f8(%%rax), %%r15\n\t" \
+ "mov %%r15, regs\n\t" \
+ "vmsave %%rax\n\t" \
+
+
+
+#define SVM_BARE_VMRUN \
+ asm volatile ( \
+ ASM_PRE_VMRUN_CMD \
+ "vmrun %%rax\n\t" \
+ ASM_POST_VMRUN_CMD \
+ : \
+ : "a" (virt_to_phys(vmcb)) \
+ : "memory", "r15") \
+
#endif
This will be useful in nested LBR tests to ensure that no extra branches are made in the guest entry. Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> --- x86/svm.c | 32 -------------------------------- x86/svm.h | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 32 deletions(-)