@@ -52,6 +52,21 @@ SYM_CODE_START(entry_SYSCALL_64_pvm)
jmp entry_SYSCALL_64_after_hwframe
SYM_CODE_END(entry_SYSCALL_64_pvm)
+.pushsection .noinstr.text, "ax"
+SYM_FUNC_START(pvm_hypercall)
+ push %r11
+ push %r10
+ movq %rcx, %r10
+ UNWIND_HINT_SAVE
+ syscall
+ UNWIND_HINT_RESTORE
+ movq %r10, %rcx
+ popq %r10
+ popq %r11
+ RET
+SYM_FUNC_END(pvm_hypercall)
+.popsection
+
/*
* The new RIP value that PVM event delivery establishes is
* MSR_PVM_EVENT_ENTRY for vector events that occur in user mode.
@@ -87,6 +87,7 @@ static inline bool pvm_kernel_layout_relocate(void)
void entry_SYSCALL_64_pvm(void);
void pvm_user_event_entry(void);
+void pvm_hypercall(void);
void pvm_retu_rip(void);
void pvm_rets_rip(void);
#endif /* !__ASSEMBLY__ */
@@ -27,6 +27,52 @@ unsigned long pvm_range_end __initdata;
static bool early_traps_setup __initdata;
+static __always_inline long pvm_hypercall0(unsigned int nr)
+{
+ long ret;
+
+ asm volatile("call pvm_hypercall"
+ : "=a"(ret)
+ : "a"(nr)
+ : "memory");
+ return ret;
+}
+
+static __always_inline long pvm_hypercall1(unsigned int nr, unsigned long p1)
+{
+ long ret;
+
+ asm volatile("call pvm_hypercall"
+ : "=a"(ret)
+ : "a"(nr), "b"(p1)
+ : "memory");
+ return ret;
+}
+
+static __always_inline long pvm_hypercall2(unsigned int nr, unsigned long p1,
+ unsigned long p2)
+{
+ long ret;
+
+ asm volatile("call pvm_hypercall"
+ : "=a"(ret)
+ : "a"(nr), "b"(p1), "c"(p2)
+ : "memory");
+ return ret;
+}
+
+static __always_inline long pvm_hypercall3(unsigned int nr, unsigned long p1,
+ unsigned long p2, unsigned long p3)
+{
+ long ret;
+
+ asm volatile("call pvm_hypercall"
+ : "=a"(ret)
+ : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
+ : "memory");
+ return ret;
+}
+
void __init pvm_early_event(struct pt_regs *regs)
{
int vector = regs->orig_ax >> 32;