@@ -300,6 +300,13 @@ void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp)
};
}
+void vm_event_reset_vmtrace(struct vcpu *v)
+{
+#ifdef CONFIG_HVM
+ hvm_vmtrace_reset(v);
+#endif
+}
+
/*
* Local variables:
* mode: C
@@ -424,6 +424,9 @@ static int vm_event_resume(struct domain *d, struct vm_event_domain *ved)
if ( rsp.flags & VM_EVENT_FLAG_GET_NEXT_INTERRUPT )
vm_event_monitor_next_interrupt(v);
+ if ( rsp.flags & VM_EVENT_FLAG_RESET_VMTRACE )
+ vm_event_reset_vmtrace(v);
+
if ( rsp.flags & VM_EVENT_FLAG_VCPU_PAUSED )
vm_event_vcpu_unpause(v);
}
@@ -58,4 +58,10 @@ void vm_event_sync_event(struct vcpu *v, bool value)
/* Not supported on ARM. */
}
+static inline
+void vm_event_reset_vmtrace(struct vcpu *v)
+{
+ /* Not supported on ARM. */
+}
+
#endif /* __ASM_ARM_VM_EVENT_H__ */
@@ -54,4 +54,6 @@ void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp);
void vm_event_sync_event(struct vcpu *v, bool value);
+void vm_event_reset_vmtrace(struct vcpu *v);
+
#endif /* __ASM_X86_VM_EVENT_H__ */
@@ -123,6 +123,10 @@
* Set if the event comes from a nested VM and thus npt_base is valid.
*/
#define VM_EVENT_FLAG_NESTED_P2M (1 << 12)
+/*
+ * Reset the vmtrace buffer (if vmtrace is enabled)
+ */
+#define VM_EVENT_FLAG_RESET_VMTRACE (1 << 13)
/*
* Reasons for the vm event request