@@ -65,6 +65,28 @@ SYM_FUNC_START(pvm_hypercall)
popq %r11
RET
SYM_FUNC_END(pvm_hypercall)
+
+SYM_FUNC_START(pvm_save_fl)
+ movq PER_CPU_VAR(pvm_vcpu_struct + PVCS_event_flags), %rax
+ RET
+SYM_FUNC_END(pvm_save_fl)
+
+SYM_FUNC_START(pvm_irq_disable)
+ btrq $X86_EFLAGS_IF_BIT, PER_CPU_VAR(pvm_vcpu_struct + PVCS_event_flags)
+ RET
+SYM_FUNC_END(pvm_irq_disable)
+
+SYM_FUNC_START(pvm_irq_enable)
+ /* set X86_EFLAGS_IF */
+ orq $X86_EFLAGS_IF, PER_CPU_VAR(pvm_vcpu_struct + PVCS_event_flags)
+ btq $PVM_EVENT_FLAGS_IP_BIT, PER_CPU_VAR(pvm_vcpu_struct + PVCS_event_flags)
+ jc .L_maybe_interrupt_pending
+ RET
+.L_maybe_interrupt_pending:
+ /* handle pending IRQ */
+ movq $PVM_HC_IRQ_WIN, %rax
+ jmp pvm_hypercall
+SYM_FUNC_END(pvm_irq_enable)
.popsection
/*
@@ -95,6 +95,9 @@ void pvm_user_event_entry(void);
void pvm_hypercall(void);
void pvm_retu_rip(void);
void pvm_rets_rip(void);
+void pvm_save_fl(void);
+void pvm_irq_disable(void);
+void pvm_irq_enable(void);
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PVM_PARA_H */
@@ -148,6 +148,11 @@ static void pvm_load_tls(struct thread_struct *t, unsigned int cpu)
}
}
+static noinstr void pvm_safe_halt(void)
+{
+ pvm_hypercall0(PVM_HC_IRQ_HALT);
+}
+
void __init pvm_early_event(struct pt_regs *regs)
{
int vector = regs->orig_ax >> 32;
@@ -387,6 +392,11 @@ void __init pvm_early_setup(void)
pv_ops.cpu.write_msr_safe = pvm_write_msr_safe;
pv_ops.cpu.load_tls = pvm_load_tls;
+ pv_ops.irq.save_fl = __PV_IS_CALLEE_SAVE(pvm_save_fl);
+ pv_ops.irq.irq_disable = __PV_IS_CALLEE_SAVE(pvm_irq_disable);
+ pv_ops.irq.irq_enable = __PV_IS_CALLEE_SAVE(pvm_irq_enable);
+ pv_ops.irq.safe_halt = pvm_safe_halt;
+
wrmsrl(MSR_PVM_VCPU_STRUCT, __pa(this_cpu_ptr(&pvm_vcpu_struct)));
wrmsrl(MSR_PVM_EVENT_ENTRY, (unsigned long)(void *)pvm_early_kernel_event_entry - 256);
wrmsrl(MSR_PVM_SUPERVISOR_REDZONE, PVM_SUPERVISOR_REDZONE_SIZE);