@@ -76,10 +76,10 @@ static int set_context_data(void *buffer, unsigned int size)
if ( unlikely(monitor_domain_initialised(curr->domain)) )
{
- unsigned int safe_size =
- min(size, curr->arch.vm_event->emul_read_data.size);
+ struct arch_vm_event_monitor *vem = curr->arch.vm_event->monitor;
+ unsigned int safe_size = min(size, vem->emul_read_data.size);
- memcpy(buffer, curr->arch.vm_event->emul_read_data.data, safe_size);
+ memcpy(buffer, vem->emul_read_data.data, safe_size);
memset(buffer + safe_size, 0, size - safe_size);
return X86EMUL_OKAY;
}
@@ -524,7 +524,7 @@ static int hvmemul_virtual_to_linear(
* vm_event being triggered for repeated writes to a whole page.
*/
if ( unlikely(current->domain->arch.mem_access_emulate_each_rep) &&
- current->arch.vm_event->emulate_flags != 0 )
+ current->arch.vm_event->monitor->emulate_flags != 0 )
max_reps = 1;
/*
@@ -65,7 +65,6 @@
#include <asm/altp2m.h>
#include <asm/mtrr.h>
#include <asm/apic.h>
-#include <asm/vm_event.h>
#include <public/sched.h>
#include <public/hvm/ioreq.h>
#include <public/version.h>
@@ -473,21 +472,21 @@ void hvm_do_resume(struct vcpu *v)
if ( unlikely(monitor_domain_initialised(v->domain)) )
{
- if ( unlikely(v->arch.vm_event->emulate_flags) )
+ struct arch_vm_event_monitor *vem = v->arch.vm_event->monitor;
+
+ if ( unlikely(vem->emulate_flags) )
{
enum emul_kind kind = EMUL_KIND_NORMAL;
- if ( v->arch.vm_event->emulate_flags &
- VM_EVENT_FLAG_SET_EMUL_READ_DATA )
+ if ( vem->emulate_flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA )
kind = EMUL_KIND_SET_CONTEXT;
- else if ( v->arch.vm_event->emulate_flags &
- VM_EVENT_FLAG_EMULATE_NOWRITE )
+ else if ( vem->emulate_flags & VM_EVENT_FLAG_EMULATE_NOWRITE )
kind = EMUL_KIND_NOWRITE;
hvm_mem_access_emulate_one(kind, TRAP_invalid_op,
HVM_DELIVER_NO_ERROR_CODE);
- v->arch.vm_event->emulate_flags = 0;
+ vem->emulate_flags = 0;
}
monitor_ctrlreg_write_data(v);
@@ -2184,8 +2183,8 @@ int hvm_set_cr0(unsigned long value, bool_t may_defer)
* The actual write will occur in monitor_ctrlreg_write_data(), if
* permitted.
*/
- v->arch.vm_event->write_data.do_write.cr0 = 1;
- v->arch.vm_event->write_data.cr0 = value;
+ v->arch.vm_event->monitor->write_data.do_write.cr0 = 1;
+ v->arch.vm_event->monitor->write_data.cr0 = value;
return X86EMUL_OKAY;
}
@@ -2289,8 +2288,8 @@ int hvm_set_cr3(unsigned long value, bool_t may_defer)
* The actual write will occur in monitor_ctrlreg_write_data(), if
* permitted.
*/
- v->arch.vm_event->write_data.do_write.cr3 = 1;
- v->arch.vm_event->write_data.cr3 = value;
+ v->arch.vm_event->monitor->write_data.do_write.cr3 = 1;
+ v->arch.vm_event->monitor->write_data.cr3 = value;
return X86EMUL_OKAY;
}
@@ -2372,8 +2371,8 @@ int hvm_set_cr4(unsigned long value, bool_t may_defer)
* The actual write will occur in monitor_ctrlreg_write_data(), if
* permitted.
*/
- v->arch.vm_event->write_data.do_write.cr4 = 1;
- v->arch.vm_event->write_data.cr4 = value;
+ v->arch.vm_event->monitor->write_data.do_write.cr4 = 1;
+ v->arch.vm_event->monitor->write_data.cr4 = value;
return X86EMUL_OKAY;
}
@@ -3754,9 +3753,9 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
* The actual write will occur in monitor_ctrlreg_write_data(), if
* permitted.
*/
- v->arch.vm_event->write_data.do_write.msr = 1;
- v->arch.vm_event->write_data.msr = msr;
- v->arch.vm_event->write_data.value = msr_content;
+ v->arch.vm_event->monitor->write_data.do_write.msr = 1;
+ v->arch.vm_event->monitor->write_data.msr = msr;
+ v->arch.vm_event->monitor->write_data.value = msr_content;
hvm_monitor_msr(msr, msr_content);
return X86EMUL_OKAY;
@@ -1591,12 +1591,15 @@ void p2m_mem_paging_resume(struct domain *d, vm_event_response_t *rsp)
void p2m_mem_access_emulate_check(struct vcpu *v,
const vm_event_response_t *rsp)
{
+ ASSERT(monitor_domain_initialised(v->domain));
+
/* Mark vcpu for skipping one instruction upon rescheduling. */
if ( rsp->flags & VM_EVENT_FLAG_EMULATE )
{
xenmem_access_t access;
bool_t violation = 1;
const struct vm_event_mem_access *data = &rsp->u.mem_access;
+ struct arch_vm_event_monitor *vem = v->arch.vm_event->monitor;
if ( p2m_get_mem_access(v->domain, _gfn(data->gfn), &access) == 0 )
{
@@ -1639,10 +1642,10 @@ void p2m_mem_access_emulate_check(struct vcpu *v,
}
}
- v->arch.vm_event->emulate_flags = violation ? rsp->flags : 0;
+ vem->emulate_flags = violation ? rsp->flags : 0;
if ( (rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA) )
- v->arch.vm_event->emul_read_data = rsp->data.emul_read_data;
+ vem->emul_read_data = rsp->data.emul_read_data;
}
}
@@ -21,7 +21,6 @@
#include <asm/hvm/vmx/vmx.h>
#include <asm/monitor.h>
-#include <asm/vm_event.h>
#include <public/vm_event.h>
static inline
@@ -55,16 +54,43 @@ static inline void monitor_ctrlreg_disable_traps(struct domain *d)
/* Implicitly serialized by the domctl lock. */
int monitor_init_domain(struct domain *d)
{
- if ( likely(!d->arch.monitor.msr_bitmap) )
+ int rc = 0;
+ struct vcpu *v;
+
+ /* Already initialised? */
+ if ( unlikely(monitor_domain_initialised(d)) )
+ return 0;
+
+ /* Per-vcpu initializations. */
+ for_each_vcpu(d, v)
+ {
+ ASSERT(v->arch.vm_event);
+ ASSERT(!v->arch.vm_event->monitor);
+
+ v->arch.vm_event->monitor = xzalloc(struct arch_vm_event_monitor);
+ if ( unlikely(!v->arch.vm_event->monitor) )
+ {
+ rc = -ENOMEM;
+ goto err;
+ }
+ }
+
+ ASSERT(!d->arch.monitor.msr_bitmap);
+ d->arch.monitor.msr_bitmap = xzalloc(struct monitor_msr_bitmap);
+ if ( unlikely(!d->arch.monitor.msr_bitmap) )
{
- d->arch.monitor.msr_bitmap = xzalloc(struct monitor_msr_bitmap);
- if ( unlikely(!d->arch.monitor.msr_bitmap) )
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err;
}
d->monitor.initialised = 1;
- return 0;
+err:
+ if ( unlikely(rc) )
+ /* On fail cleanup whatever resources have been partially allocated. */
+ monitor_cleanup_domain(d);
+
+ return rc;
}
/*
@@ -73,9 +99,20 @@ int monitor_init_domain(struct domain *d)
*/
void monitor_cleanup_domain(struct domain *d)
{
+ struct vcpu *v;
+
xfree(d->arch.monitor.msr_bitmap);
d->arch.monitor.msr_bitmap = NULL;
+ for_each_vcpu(d, v)
+ {
+ if ( unlikely(!v->arch.vm_event || !v->arch.vm_event->monitor) )
+ continue;
+
+ xfree(v->arch.vm_event->monitor);
+ v->arch.vm_event->monitor = NULL;
+ }
+
monitor_ctrlreg_disable_traps(d);
memset(&d->arch.monitor, 0, sizeof(d->arch.monitor));
@@ -88,6 +125,8 @@ void monitor_cleanup_domain(struct domain *d)
void monitor_ctrlreg_write_resume(struct vcpu *v, vm_event_response_t *rsp)
{
+ ASSERT(monitor_domain_initialised(v->domain));
+
if ( rsp->flags & VM_EVENT_FLAG_DENY )
{
struct monitor_write_data *w;
@@ -98,7 +137,7 @@ void monitor_ctrlreg_write_resume(struct vcpu *v, vm_event_response_t *rsp)
if ( !atomic_read(&v->vm_event_pause_count) )
return;
- w = &v->arch.vm_event->write_data;
+ w = &v->arch.vm_event->monitor->write_data;
switch ( rsp->reason )
{
@@ -125,7 +164,7 @@ void monitor_ctrlreg_write_resume(struct vcpu *v, vm_event_response_t *rsp)
void monitor_ctrlreg_write_data(struct vcpu *v)
{
- struct monitor_write_data *w = &v->arch.vm_event->write_data;
+ struct monitor_write_data *w = &v->arch.vm_event->monitor->write_data;
if ( likely(!w->writes_pending) )
return;
@@ -65,9 +65,22 @@ void vm_event_cleanup_domain(struct domain *d, struct vm_event_domain *ved)
if ( &d->vm_event->monitor == ved )
monitor_cleanup_domain(d);
- /* Per-vcpu uninitializations. */
+ /*
+ * Per-vCPU: if all resources of all vm-event subsystems were freed,
+ * also free v->arch.vm_event.
+ */
for_each_vcpu ( d, v )
- vm_event_cleanup_vcpu_destroy(v);
+ {
+ if ( unlikely(!v->arch.vm_event) )
+ continue;
+
+ /* Are there resources of vm-event subsystems left unfreed? */
+ if ( unlikely(v->arch.vm_event->monitor) )
+ continue;
+
+ xfree(v->arch.vm_event);
+ v->arch.vm_event = NULL;
+ }
}
void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
@@ -22,6 +22,7 @@
#include <xen/sched.h>
#include <xen/event.h>
+#include <xen/monitor.h>
#include <xen/wait.h>
#include <xen/vm_event.h>
#include <xen/mem_access.h>
@@ -397,11 +398,27 @@ void vm_event_resume(struct domain *d, struct vm_event_domain *ved)
case VM_EVENT_REASON_MOV_TO_MSR:
#endif
case VM_EVENT_REASON_WRITE_CTRLREG:
+ if ( unlikely(!monitor_domain_initialised(d)) )
+ {
+ printk(XENLOG_G_WARNING "Vm-event monitor subsystem not"
+ "initialized, cr-write ring response"
+ "ignored (hint: have you called"
+ "xc_monitor_enable()?).\n");
+ continue;
+ }
monitor_ctrlreg_write_resume(v, &rsp);
break;
#ifdef CONFIG_HAS_MEM_ACCESS
case VM_EVENT_REASON_MEM_ACCESS:
+ if ( unlikely(!monitor_domain_initialised(d)) )
+ {
+ printk(XENLOG_G_WARNING "Vm-event monitor subsystem not"
+ "initialized, mem-access ring response"
+ "ignored (hint: have you called"
+ "xc_monitor_enable()?).\n");
+ continue;
+ }
mem_access_resume(v, &rsp);
break;
#endif
@@ -9,6 +9,7 @@
#include <asm/e820.h>
#include <asm/mce.h>
#include <public/vcpu.h>
+#include <public/vm_event.h>
#include <public/hvm/hvm_info_table.h>
#define has_32bit_shinfo(d) ((d)->arch.has_32bit_shinfo)
@@ -503,6 +504,20 @@ typedef enum __packed {
SMAP_CHECK_DISABLED, /* disable the check */
} smap_check_policy_t;
+/*
+ * Should we emulate the next matching instruction on VCPU resume
+ * after a vm_event?
+ */
+struct arch_vm_event_monitor {
+ uint32_t emulate_flags;
+ struct vm_event_emul_read_data emul_read_data;
+ struct monitor_write_data write_data;
+};
+
+struct arch_vm_event {
+ struct arch_vm_event_monitor *monitor;
+};
+
struct arch_vcpu
{
/*
@@ -90,6 +90,13 @@ int monitor_init_domain(struct domain *d);
void monitor_cleanup_domain(struct domain *d);
+/* Called on vCPU destroy. */
+static inline void monitor_cleanup_vcpu_destroy(struct vcpu *v)
+{
+ ASSERT(v->arch.vm_event);
+ xfree(v->arch.vm_event->monitor);
+}
+
void monitor_ctrlreg_write_resume(struct vcpu *v, vm_event_response_t *rsp);
void monitor_ctrlreg_write_data(struct vcpu *v);
@@ -20,16 +20,7 @@
#define __ASM_X86_VM_EVENT_H__
#include <xen/sched.h>
-
-/*
- * Should we emulate the next matching instruction on VCPU resume
- * after a vm_event?
- */
-struct arch_vm_event {
- uint32_t emulate_flags;
- struct vm_event_emul_read_data emul_read_data;
- struct monitor_write_data write_data;
-};
+#include <asm/monitor.h>
int vm_event_init_domain(struct domain *d, struct vm_event_domain *ved);
@@ -41,6 +32,8 @@ static inline void vm_event_cleanup_vcpu_destroy(struct vcpu *v)
if ( likely(!v->arch.vm_event) )
return;
+ monitor_cleanup_vcpu_destroy(v);
+
xfree(v->arch.vm_event);
v->arch.vm_event = NULL;
}
Move fields in arch_vm_event in a structure called arch_vm_event_monitor and refactor arch_vm_event to only hold a pointer to an instance of the aforementioned. Done for a number of reasons: * to make the separation of monitor resources from other vm-event resources clearly visible * to be able to selectively allocate/free monitor-only resources in monitor init & cleanup stubs * did _not_ do a v->arch.vm_event -to- v->arch.monitor transformation because we want the guarantee of not occupying more than 1 pointer in arch_vcpu, i.e. if in the future e.g. paging vm-events would need per-vcpu resources, one would make an arch_vm_event_paging structure and add a pointer to an instance of it as a arch_vm_event.paging field, in which case we will still have only 1 pointer (arch_vm_event) consuming memory in arch_vcpu for vm-event resources Note also that this breaks the old implication 'vm-event initialized -> monitor resources (which were in arch_vm_event) initialized', so 2 extra checks on monitor_domain_initialised() had to be added: i) in vm_event_resume() before calling monitor_ctrlreg_write_resume(), also an ASSERT in monitor_ctrlreg_write_resume() ii) in vm_event_resume() before calling mem_access_resume(), also an ASSERT on that code-path in p2m_mem_access_emulate_check() Finally, also note that the definition of arch_vm_event had to be moved to asm-x86/domain.h due to a cyclic header dependency it caused between asm-x86/vm_event.h and asm-x86/monitor.h. Signed-off-by: Corneliu ZUZU <czuzu@bitdefender.com> --- xen/arch/x86/hvm/emulate.c | 8 +++--- xen/arch/x86/hvm/hvm.c | 31 ++++++++++++------------ xen/arch/x86/mm/p2m.c | 7 ++++-- xen/arch/x86/monitor.c | 55 ++++++++++++++++++++++++++++++++++++------ xen/arch/x86/vm_event.c | 17 +++++++++++-- xen/common/vm_event.c | 17 +++++++++++++ xen/include/asm-x86/domain.h | 15 ++++++++++++ xen/include/asm-x86/monitor.h | 7 ++++++ xen/include/asm-x86/vm_event.h | 13 +++------- 9 files changed, 128 insertions(+), 42 deletions(-)