@@ -297,6 +297,29 @@ void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp)
};
}
+bool vm_event_check_pending_op(struct vcpu *v)
+{
+ struct monitor_write_data *w = &v->arch.vm_event->write_data;
+
+ if ( !v->arch.vm_event->sync_event )
+ return false;
+
+ if ( w->do_write.cr0 )
+ return true;
+ if ( w->do_write.cr3 )
+ return true;
+ if ( w->do_write.cr4 )
+ return true;
+ if ( w->do_write.msr )
+ return true;
+ if ( v->arch.vm_event->set_gprs )
+ return true;
+ if ( v->arch.vm_event->emulate_flags )
+ return true;
+
+ return false;
+}
+
/*
* Local variables:
* mode: C
@@ -183,6 +183,7 @@ static int vm_event_disable(struct domain *d, struct vm_event_domain **p_ved)
if ( vm_event_check_ring(ved) )
{
struct vcpu *v;
+ bool pending_op = false;
spin_lock(&ved->lock);
@@ -192,9 +193,6 @@ static int vm_event_disable(struct domain *d, struct vm_event_domain **p_ved)
return -EBUSY;
}
- /* Free domU's event channel and leave the other one unbound */
- free_xen_event_channel(d, ved->xen_port);
-
/* Unblock all vCPUs */
for_each_vcpu ( d, v )
{
@@ -203,8 +201,21 @@ static int vm_event_disable(struct domain *d, struct vm_event_domain **p_ved)
vcpu_unpause(v);
ved->blocked--;
}
+
+ if ( vm_event_check_pending_op(v) )
+ pending_op = true;
}
+ /* vm_event ops are still pending until vCPUs get scheduled */
+ if ( pending_op )
+ {
+ spin_unlock(&ved->lock);
+ return -EAGAIN;
+ }
+
+ /* Free domU's event channel and leave the other one unbound */
+ free_xen_event_channel(d, ved->xen_port);
+
destroy_ring_for_helper(&ved->ring_page, ved->ring_pg_struct);
vm_event_cleanup_domain(d);
@@ -58,4 +58,11 @@ void vm_event_sync_event(struct vcpu *v, bool value)
/* Not supported on ARM. */
}
+static inline
+bool vm_event_check_pending_op(struct vcpu *v)
+{
+ /* Not supported on ARM. */
+ return false;
+}
+
#endif /* __ASM_ARM_VM_EVENT_H__ */
@@ -54,4 +54,6 @@ void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp);
void vm_event_sync_event(struct vcpu *v, bool value);
+bool vm_event_check_pending_op(struct vcpu *v);
+
#endif /* __ASM_X86_VM_EVENT_H__ */
Perform sanity checking when shutting vm_event down to determine whether it is safe to do so. Error out with -EAGAIN in case pending operations have been found for the domain. Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com> --- xen/arch/x86/vm_event.c | 23 +++++++++++++++++++++++ xen/common/vm_event.c | 17 ++++++++++++++--- xen/include/asm-arm/vm_event.h | 7 +++++++ xen/include/asm-x86/vm_event.h | 2 ++ 4 files changed, 46 insertions(+), 3 deletions(-)