@@ -42,6 +42,11 @@ void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp)
regs->pc = rsp->data.regs.arm.pc;
}
+void vm_event_monitor_next_interrupt(struct vcpu *v)
+{
+ /* Not supported on ARM. */
+}
+
/*
* Local variables:
* mode: C
@@ -469,6 +469,12 @@ void hvm_migrate_pirqs(struct vcpu *v)
spin_unlock(&d->event_lock);
}
+static bool hvm_get_pending_event(struct vcpu *v, struct x86_event *info)
+{
+ info->cr2 = v->arch.hvm_vcpu.guest_cr[2];
+ return hvm_funcs.get_pending_event(v, info);
+}
+
void hvm_do_resume(struct vcpu *v)
{
check_wakeup_from_wait();
@@ -535,9 +541,23 @@ void hvm_do_resume(struct vcpu *v)
/* Inject pending hw/sw trap */
if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
{
- hvm_inject_event(&v->arch.hvm_vcpu.inject_trap);
+ if ( !hvm_event_pending(v) )
+ hvm_inject_event(&v->arch.hvm_vcpu.inject_trap);
+
v->arch.hvm_vcpu.inject_trap.vector = -1;
}
+
+ if ( unlikely(v->arch.vm_event) && v->arch.monitor.next_interrupt_enabled )
+ {
+ struct x86_event info;
+
+ if ( hvm_get_pending_event(v, &info) )
+ {
+ hvm_monitor_interrupt(info.vector, info.type, info.error_code,
+ info.cr2);
+ v->arch.monitor.next_interrupt_enabled = false;
+ }
+ }
}
static int hvm_print_line(
@@ -150,6 +150,20 @@ int hvm_monitor_cpuid(unsigned long insn_length, unsigned int leaf,
return monitor_traps(curr, 1, &req);
}
+void hvm_monitor_interrupt(unsigned int vector, unsigned int type,
+ unsigned int err, uint64_t cr2)
+{
+ vm_event_request_t req = {
+ .reason = VM_EVENT_REASON_INTERRUPT,
+ .u.interrupt.x86.vector = vector,
+ .u.interrupt.x86.type = type,
+ .u.interrupt.x86.error_code = err,
+ .u.interrupt.x86.cr2 = cr2,
+ };
+
+ monitor_traps(current, 1, &req);
+}
+
/*
* Local variables:
* mode: C
@@ -2179,6 +2179,20 @@ static void svm_invlpg(struct vcpu *v, unsigned long vaddr)
svm_asid_g_invlpg(v, vaddr);
}
+static bool svm_get_pending_event(struct vcpu *v, struct x86_event *info)
+{
+ const struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
+ if ( vmcb->eventinj.fields.v )
+ return false;
+
+ info->vector = vmcb->eventinj.fields.vector;
+ info->type = vmcb->eventinj.fields.type;
+ info->error_code = vmcb->eventinj.fields.errorcode;
+
+ return true;
+}
+
static struct hvm_function_table __initdata svm_function_table = {
.name = "SVM",
.cpu_up_prepare = svm_cpu_up_prepare,
@@ -2209,6 +2223,7 @@ static struct hvm_function_table __initdata svm_function_table = {
.inject_event = svm_inject_event,
.init_hypercall_page = svm_init_hypercall_page,
.event_pending = svm_event_pending,
+ .get_pending_event = svm_get_pending_event,
.invlpg = svm_invlpg,
.wbinvd_intercept = svm_wbinvd_intercept,
.fpu_dirty_intercept = svm_fpu_dirty_intercept,
@@ -2052,6 +2052,25 @@ static int vmx_set_mode(struct vcpu *v, int mode)
return 0;
}
+static bool vmx_get_pending_event(struct vcpu *v, struct x86_event *info)
+{
+ unsigned long intr_info, error_code;
+
+ vmx_vmcs_enter(v);
+ __vmread(VM_ENTRY_INTR_INFO, &intr_info);
+ __vmread(VM_ENTRY_EXCEPTION_ERROR_CODE, &error_code);
+ vmx_vmcs_exit(v);
+
+ if ( !(intr_info & INTR_INFO_VALID_MASK) )
+ return false;
+
+ info->vector = MASK_EXTR(intr_info, INTR_INFO_VECTOR_MASK);
+ info->type = MASK_EXTR(intr_info, INTR_INFO_INTR_TYPE_MASK);
+ info->error_code = error_code;
+
+ return true;
+}
+
static struct hvm_function_table __initdata vmx_function_table = {
.name = "VMX",
.cpu_up_prepare = vmx_cpu_up_prepare,
@@ -2081,6 +2100,7 @@ static struct hvm_function_table __initdata vmx_function_table = {
.inject_event = vmx_inject_event,
.init_hypercall_page = vmx_init_hypercall_page,
.event_pending = vmx_event_pending,
+ .get_pending_event = vmx_get_pending_event,
.invlpg = vmx_invlpg,
.cpu_up = vmx_cpu_up,
.cpu_down = vmx_cpu_down,
@@ -134,6 +134,11 @@ void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp)
v->arch.user_regs.eip = rsp->data.regs.x86.rip;
}
+void vm_event_monitor_next_interrupt(struct vcpu *v)
+{
+ v->arch.monitor.next_interrupt_enabled = true;
+}
+
void vm_event_fill_regs(vm_event_request_t *req)
{
const struct cpu_user_regs *regs = guest_cpu_user_regs();
@@ -433,6 +433,9 @@ void vm_event_resume(struct domain *d, struct vm_event_domain *ved)
if ( rsp.flags & VM_EVENT_FLAG_SET_REGISTERS )
vm_event_set_registers(v, &rsp);
+ if ( rsp.flags & VM_EVENT_FLAG_GET_NEXT_INTERRUPT )
+ vm_event_monitor_next_interrupt(v);
+
if ( rsp.flags & VM_EVENT_FLAG_VCPU_PAUSED )
vm_event_vcpu_unpause(v);
}
@@ -577,6 +577,10 @@ struct arch_vcpu
XEN_GUEST_HANDLE(vcpu_time_info_t) time_info_guest;
struct arch_vm_event *vm_event;
+
+ struct {
+ bool next_interrupt_enabled;
+ } monitor;
};
smap_check_policy_t smap_policy_change(struct vcpu *v,
@@ -149,6 +149,7 @@ struct hvm_function_table {
void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
int (*event_pending)(struct vcpu *v);
+ bool (*get_pending_event)(struct vcpu *v, struct x86_event *info);
void (*invlpg)(struct vcpu *v, unsigned long vaddr);
int (*cpu_up_prepare)(unsigned int cpu);
@@ -42,6 +42,8 @@ int hvm_monitor_debug(unsigned long rip, enum hvm_monitor_debug_type type,
unsigned long trap_type, unsigned long insn_length);
int hvm_monitor_cpuid(unsigned long insn_length, unsigned int leaf,
unsigned int subleaf);
+void hvm_monitor_interrupt(unsigned int vector, unsigned int type,
+ unsigned int err, uint64_t cr2);
#endif /* __ASM_X86_HVM_MONITOR_H__ */
@@ -76,7 +76,8 @@ static inline uint32_t arch_monitor_get_capabilities(struct domain *d)
(1U << XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT) |
(1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST) |
(1U << XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION) |
- (1U << XEN_DOMCTL_MONITOR_EVENT_CPUID);
+ (1U << XEN_DOMCTL_MONITOR_EVENT_CPUID) |
+ (1U << XEN_DOMCTL_MONITOR_EVENT_INTERRUPT);
/* Since we know this is on VMX, we can just call the hvm func */
if ( hvm_is_singlestep_supported() )
@@ -1086,6 +1086,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cmt_op_t);
#define XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION 5
#define XEN_DOMCTL_MONITOR_EVENT_CPUID 6
#define XEN_DOMCTL_MONITOR_EVENT_PRIVILEGED_CALL 7
+#define XEN_DOMCTL_MONITOR_EVENT_INTERRUPT 8
struct xen_domctl_monitor_op {
uint32_t op; /* XEN_DOMCTL_MONITOR_OP_* */
@@ -105,6 +105,11 @@
* if any of those flags are set, only those will be honored).
*/
#define VM_EVENT_FLAG_SET_EMUL_INSN_DATA (1 << 9)
+/*
+ * Have a one-shot VM_EVENT_REASON_INTERRUPT event sent for the first
+ * interrupt pending after resuming the VCPU.
+ */
+#define VM_EVENT_FLAG_GET_NEXT_INTERRUPT (1 << 10)
/*
* Reasons for the vm event request
@@ -139,6 +144,8 @@
* These kinds of events will be filtered out in future versions.
*/
#define VM_EVENT_REASON_PRIVILEGED_CALL 11
+/* An interrupt has been delivered. */
+#define VM_EVENT_REASON_INTERRUPT 12
/* Supported values for the vm_event_write_ctrlreg index. */
#define VM_EVENT_X86_CR0 0
@@ -259,6 +266,14 @@ struct vm_event_cpuid {
uint32_t _pad;
};
+struct vm_event_interrupt_x86 {
+ uint32_t vector;
+ uint32_t type;
+ uint32_t error_code;
+ uint32_t _pad;
+ uint64_t cr2;
+};
+
#define MEM_PAGING_DROP_PAGE (1 << 0)
#define MEM_PAGING_EVICT_FAIL (1 << 1)
@@ -302,6 +317,9 @@ typedef struct vm_event_st {
struct vm_event_debug software_breakpoint;
struct vm_event_debug debug_exception;
struct vm_event_cpuid cpuid;
+ union {
+ struct vm_event_interrupt_x86 x86;
+ } interrupt;
} u;
union {
@@ -78,6 +78,8 @@ void vm_event_vcpu_unpause(struct vcpu *v);
void vm_event_fill_regs(vm_event_request_t *req);
void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp);
+void vm_event_monitor_next_interrupt(struct vcpu *v);
+
#endif /* __VM_EVENT_H__ */
/*