@@ -25,6 +25,39 @@
#include <asm/hvm/support.h>
#include <asm/vm_event.h>
+static void hvm_vm_event_set_registers(const struct vcpu *v)
+{
+ ASSERT(v == current);
+
+ if ( unlikely(v->arch.vm_event->set_gprs) )
+ {
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
+
+ regs->rax = v->arch.vm_event->gprs.rax;
+ regs->rbx = v->arch.vm_event->gprs.rbx;
+ regs->rcx = v->arch.vm_event->gprs.rcx;
+ regs->rdx = v->arch.vm_event->gprs.rdx;
+ regs->rsp = v->arch.vm_event->gprs.rsp;
+ regs->rbp = v->arch.vm_event->gprs.rbp;
+ regs->rsi = v->arch.vm_event->gprs.rsi;
+ regs->rdi = v->arch.vm_event->gprs.rdi;
+
+ regs->r8 = v->arch.vm_event->gprs.r8;
+ regs->r9 = v->arch.vm_event->gprs.r9;
+ regs->r10 = v->arch.vm_event->gprs.r10;
+ regs->r11 = v->arch.vm_event->gprs.r11;
+ regs->r12 = v->arch.vm_event->gprs.r12;
+ regs->r13 = v->arch.vm_event->gprs.r13;
+ regs->r14 = v->arch.vm_event->gprs.r14;
+ regs->r15 = v->arch.vm_event->gprs.r15;
+
+ regs->rflags = v->arch.vm_event->gprs.rflags;
+ regs->rip = v->arch.vm_event->gprs.rip;
+
+ v->arch.vm_event->set_gprs = false;
+ }
+}
+
void hvm_vm_event_do_resume(struct vcpu *v)
{
struct monitor_write_data *w;
@@ -32,6 +65,8 @@ void hvm_vm_event_do_resume(struct vcpu *v)
if ( likely(!v->arch.vm_event) )
return;
+ hvm_vm_event_set_registers(v);
+
w = &v->arch.vm_event->write_data;
if ( unlikely(v->arch.vm_event->emulate_flags) )
@@ -113,26 +113,8 @@ void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp)
{
ASSERT(atomic_read(&v->vm_event_pause_count));
- v->arch.user_regs.rax = rsp->data.regs.x86.rax;
- v->arch.user_regs.rbx = rsp->data.regs.x86.rbx;
- v->arch.user_regs.rcx = rsp->data.regs.x86.rcx;
- v->arch.user_regs.rdx = rsp->data.regs.x86.rdx;
- v->arch.user_regs.rsp = rsp->data.regs.x86.rsp;
- v->arch.user_regs.rbp = rsp->data.regs.x86.rbp;
- v->arch.user_regs.rsi = rsp->data.regs.x86.rsi;
- v->arch.user_regs.rdi = rsp->data.regs.x86.rdi;
-
- v->arch.user_regs.r8 = rsp->data.regs.x86.r8;
- v->arch.user_regs.r9 = rsp->data.regs.x86.r9;
- v->arch.user_regs.r10 = rsp->data.regs.x86.r10;
- v->arch.user_regs.r11 = rsp->data.regs.x86.r11;
- v->arch.user_regs.r12 = rsp->data.regs.x86.r12;
- v->arch.user_regs.r13 = rsp->data.regs.x86.r13;
- v->arch.user_regs.r14 = rsp->data.regs.x86.r14;
- v->arch.user_regs.r15 = rsp->data.regs.x86.r15;
-
- v->arch.user_regs.rflags = rsp->data.regs.x86.rflags;
- v->arch.user_regs.rip = rsp->data.regs.x86.rip;
+ v->arch.vm_event->gprs = rsp->data.regs.x86;
+ v->arch.vm_event->set_gprs = true;
}
void vm_event_monitor_next_interrupt(struct vcpu *v)
@@ -357,6 +357,16 @@ void vm_event_resume(struct domain *d, struct vm_event_domain *ved)
{
vm_event_response_t rsp;
+ /*
+ * vm_event_resume() runs in either XEN_DOMCTL_VM_EVENT_OP_*, or
+ * EVTCHN_send context from the introspection consumer. Both contexts
+ * are guaranteed not to be the subject of vm_event responses.
+ * While we could ASSERT(v != current) for each VCPU in d in the loop
+ * below, this covers the case where we would need to iterate over all
+ * of them more succintly.
+ */
+ ASSERT(d != current->domain);
+
/* Pull all responses off the ring. */
while ( vm_event_get_response(d, ved, &rsp) )
{
@@ -375,13 +385,6 @@ void vm_event_resume(struct domain *d, struct vm_event_domain *ved)
v = d->vcpu[rsp.vcpu_id];
/*
- * Make sure the vCPU state has been synchronized for the custom
- * handlers.
- */
- if ( atomic_read(&v->vm_event_pause_count) )
- sync_vcpu_execstate(v);
-
- /*
* In some cases the response type needs extra handling, so here
* we call the appropriate handlers.
*/
@@ -32,6 +32,8 @@ struct arch_vm_event {
struct vm_event_emul_insn_data insn;
} emul;
struct monitor_write_data write_data;
+ struct vm_event_regs_x86 gprs;
+ bool set_gprs;
};
int vm_event_init_domain(struct domain *d);