@@ -22,6 +22,7 @@
#include <xen/errno.h>
#include <xen/guest_access.h>
#include <xen/sched.h>
+#include <xen/vm_event.h>
#include <xsm/xsm.h>
@@ -72,6 +73,13 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
break;
}
+ case HVMOP_guest_request_vm_event:
+ if ( guest_handle_is_null(arg) )
+ vm_event_monitor_guest_request();
+ else
+ rc = -EINVAL;
+ break;
+
default:
{
gdprintk(XENLOG_DEBUG, "HVMOP op=%lu: not implemented\n", op);
@@ -6,6 +6,7 @@
* Copyright (c) 2004, Intel Corporation.
* Copyright (c) 2005, International Business Machines Corporation.
* Copyright (c) 2008, Citrix Systems, Inc.
+ * Copyright (c) 2016, Bitdefender S.R.L.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -21,71 +22,32 @@
*/
#include <xen/vm_event.h>
-#include <xen/paging.h>
#include <asm/hvm/event.h>
#include <asm/monitor.h>
-#include <asm/altp2m.h>
#include <asm/vm_event.h>
#include <public/vm_event.h>
-static int hvm_event_traps(uint8_t sync, vm_event_request_t *req)
-{
- int rc;
- struct vcpu *curr = current;
- struct domain *currd = curr->domain;
-
- rc = vm_event_claim_slot(currd, &currd->vm_event->monitor);
- switch ( rc )
- {
- case 0:
- break;
- case -ENOSYS:
- /*
- * If there was no ring to handle the event, then
- * simply continue executing normally.
- */
- return 1;
- default:
- return rc;
- };
-
- if ( sync )
- {
- req->flags |= VM_EVENT_FLAG_VCPU_PAUSED;
- vm_event_vcpu_pause(curr);
- }
-
- if ( altp2m_active(currd) )
- {
- req->flags |= VM_EVENT_FLAG_ALTERNATE_P2M;
- req->altp2m_idx = vcpu_altp2m(curr).p2midx;
- }
-
- vm_event_fill_regs(req);
- vm_event_put_request(currd, &currd->vm_event->monitor, req);
-
- return 1;
-}
-
bool_t hvm_event_cr(unsigned int index, unsigned long value, unsigned long old)
{
- struct arch_domain *currad = ¤t->domain->arch;
+ struct vcpu *curr = current;
+ struct arch_domain *ad = &curr->domain->arch;
unsigned int ctrlreg_bitmask = monitor_ctrlreg_bitmask(index);
- if ( (currad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask) &&
- (!(currad->monitor.write_ctrlreg_onchangeonly & ctrlreg_bitmask) ||
+ if ( (ad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask) &&
+ (!(ad->monitor.write_ctrlreg_onchangeonly & ctrlreg_bitmask) ||
value != old) )
{
+ bool_t sync = !!(ad->monitor.write_ctrlreg_sync & ctrlreg_bitmask);
+
vm_event_request_t req = {
.reason = VM_EVENT_REASON_WRITE_CTRLREG,
- .vcpu_id = current->vcpu_id,
+ .vcpu_id = curr->vcpu_id,
.u.write_ctrlreg.index = index,
.u.write_ctrlreg.new_value = value,
.u.write_ctrlreg.old_value = old
};
- hvm_event_traps(currad->monitor.write_ctrlreg_sync & ctrlreg_bitmask,
- &req);
+ vm_event_monitor_traps(curr, sync, &req);
return 1;
}
@@ -95,30 +57,18 @@ bool_t hvm_event_cr(unsigned int index, unsigned long value, unsigned long old)
void hvm_event_msr(unsigned int msr, uint64_t value)
{
struct vcpu *curr = current;
- vm_event_request_t req = {
- .reason = VM_EVENT_REASON_MOV_TO_MSR,
- .vcpu_id = curr->vcpu_id,
- .u.mov_to_msr.msr = msr,
- .u.mov_to_msr.value = value,
- };
-
- if ( curr->domain->arch.monitor.mov_to_msr_enabled )
- hvm_event_traps(1, &req);
-}
-
-void hvm_event_guest_request(void)
-{
- struct vcpu *curr = current;
- struct arch_domain *currad = &curr->domain->arch;
+ struct arch_domain *ad = &curr->domain->arch;
- if ( currad->monitor.guest_request_enabled )
+ if ( ad->monitor.mov_to_msr_enabled )
{
vm_event_request_t req = {
- .reason = VM_EVENT_REASON_GUEST_REQUEST,
+ .reason = VM_EVENT_REASON_MOV_TO_MSR,
.vcpu_id = curr->vcpu_id,
+ .u.mov_to_msr.msr = msr,
+ .u.mov_to_msr.value = value,
};
- hvm_event_traps(currad->monitor.guest_request_sync, &req);
+ vm_event_monitor_traps(curr, 1, &req);
}
}
@@ -166,7 +116,7 @@ int hvm_event_breakpoint(unsigned long rip,
req.vcpu_id = curr->vcpu_id;
- return hvm_event_traps(1, &req);
+ return vm_event_monitor_traps(curr, 1, &req);
}
/*
@@ -36,6 +36,7 @@
#include <xen/wait.h>
#include <xen/mem_access.h>
#include <xen/rangeset.h>
+#include <xen/vm_event.h>
#include <asm/shadow.h>
#include <asm/hap.h>
#include <asm/current.h>
@@ -6909,7 +6910,7 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
case HVMOP_guest_request_vm_event:
if ( guest_handle_is_null(arg) )
- hvm_event_guest_request();
+ vm_event_monitor_guest_request();
else
rc = -EINVAL;
break;
@@ -124,24 +124,10 @@ int arch_monitor_domctl_event(struct domain *d,
break;
}
- case XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST:
- {
- bool_t old_status = ad->monitor.guest_request_enabled;
-
- if ( unlikely(old_status == requested_status) )
- return -EEXIST;
-
- domain_pause(d);
- ad->monitor.guest_request_sync = mop->u.guest_request.sync;
- ad->monitor.guest_request_enabled = requested_status;
- domain_unpause(d);
- break;
- }
-
default:
/*
- * Should not be reached unless arch_monitor_get_capabilities() is not
- * properly implemented.
+ * Should not be reached unless vm_event_monitor_get_capabilities() is
+ * not properly implemented.
*/
ASSERT_UNREACHABLE();
return -EOPNOTSUPP;
@@ -57,6 +57,7 @@ void vm_event_cleanup_domain(struct domain *d)
d->arch.mem_access_emulate_each_rep = 0;
memset(&d->arch.monitor, 0, sizeof(d->arch.monitor));
+ memset(&d->monitor, 0, sizeof(d->monitor));
}
void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
@@ -24,10 +24,12 @@
#include <xsm/xsm.h>
#include <public/domctl.h>
#include <asm/monitor.h>
+#include <asm/vm_event.h>
int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *mop)
{
int rc;
+ bool_t requested_status = 0;
if ( unlikely(current->domain == d) ) /* no domain_pause() */
return -EPERM;
@@ -39,24 +41,48 @@ int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *mop)
switch ( mop->op )
{
case XEN_DOMCTL_MONITOR_OP_ENABLE:
+ requested_status = 1;
+ /* fallthrough */
case XEN_DOMCTL_MONITOR_OP_DISABLE:
- /* Check if event type is available. */
/* sanity check: avoid left-shift undefined behavior */
if ( unlikely(mop->event > 31) )
return -EINVAL;
- if ( unlikely(!(arch_monitor_get_capabilities(d) & (1U << mop->event))) )
+ /* Check if event type is available. */
+ if ( unlikely(!(vm_event_monitor_get_capabilities(d) & (1U << mop->event))) )
return -EOPNOTSUPP;
- /* Arch-side handles enable/disable ops. */
- return arch_monitor_domctl_event(d, mop);
+ break;
case XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES:
- mop->event = arch_monitor_get_capabilities(d);
+ mop->event = vm_event_monitor_get_capabilities(d);
return 0;
default:
/* The monitor op is probably handled on the arch-side. */
return arch_monitor_domctl_op(d, mop);
}
+
+ switch ( mop->event )
+ {
+ case XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST:
+ {
+ bool_t old_status = d->monitor.guest_request_enabled;
+
+ if ( unlikely(old_status == requested_status) )
+ return -EEXIST;
+
+ domain_pause(d);
+ d->monitor.guest_request_sync = mop->u.guest_request.sync;
+ d->monitor.guest_request_enabled = requested_status;
+ domain_unpause(d);
+ break;
+ }
+
+ default:
+ /* Give arch-side the chance to handle this event */
+ return arch_monitor_domctl_event(d, mop);
+ }
+
+ return 0;
}
/*
@@ -26,6 +26,7 @@
#include <xen/vm_event.h>
#include <xen/mem_access.h>
#include <asm/p2m.h>
+#include <asm/altp2m.h>
#include <asm/vm_event.h>
#include <xsm/xsm.h>
@@ -781,6 +782,65 @@ void vm_event_vcpu_unpause(struct vcpu *v)
}
/*
+ * Monitor vm-events
+ */
+
+int vm_event_monitor_traps(struct vcpu *v, uint8_t sync,
+ vm_event_request_t *req)
+{
+ int rc;
+ struct domain *d = v->domain;
+
+ rc = vm_event_claim_slot(d, &d->vm_event->monitor);
+ switch ( rc )
+ {
+ case 0:
+ break;
+ case -ENOSYS:
+ /*
+ * If there was no ring to handle the event, then
+ * simply continue executing normally.
+ */
+ return 1;
+ default:
+ return rc;
+ };
+
+ if ( sync )
+ {
+ req->flags |= VM_EVENT_FLAG_VCPU_PAUSED;
+ vm_event_vcpu_pause(v);
+ }
+
+ if ( altp2m_active(d) )
+ {
+ req->flags |= VM_EVENT_FLAG_ALTERNATE_P2M;
+ req->altp2m_idx = altp2m_vcpu_idx(v);
+ }
+
+ vm_event_fill_regs(req);
+ vm_event_put_request(d, &d->vm_event->monitor, req);
+
+ return 1;
+}
+
+void vm_event_monitor_guest_request(void)
+{
+ struct vcpu *curr = current;
+ struct domain *d = curr->domain;
+
+ if ( d->monitor.guest_request_enabled )
+ {
+ vm_event_request_t req = {
+ .reason = VM_EVENT_REASON_GUEST_REQUEST,
+ .vcpu_id = curr->vcpu_id,
+ };
+
+ vm_event_monitor_traps(curr, d->monitor.guest_request_sync, &req);
+ }
+}
+
+/*
* Local variables:
* mode: C
* c-file-style: "BSD"
new file mode 100644
@@ -0,0 +1,39 @@
+/*
+ * Alternate p2m
+ *
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_ARM_ALTP2M_H
+#define __ASM_ARM_ALTP2M_H
+
+#include <xen/sched.h>
+
+/* Alternate p2m on/off per domain */
+static inline bool_t altp2m_active(const struct domain *d)
+{
+ /* Not implemented on ARM. */
+ return 0;
+}
+
+/* Alternate p2m VCPU */
+static inline uint16_t altp2m_vcpu_idx(const struct vcpu *v)
+{
+ /* Not implemented on ARM, should not be reached. */
+ BUG();
+ return 0;
+}
+
+#endif /* __ASM_ARM_ALTP2M_H */
@@ -25,12 +25,6 @@
#include <xen/sched.h>
#include <public/domctl.h>
-static inline uint32_t arch_monitor_get_capabilities(struct domain *d)
-{
- /* No monitor vm-events implemented on ARM. */
- return 0;
-}
-
static inline
int arch_monitor_domctl_op(struct domain *d, struct xen_domctl_monitor_op *mop)
{
@@ -45,7 +39,7 @@ int arch_monitor_domctl_event(struct domain *d,
/*
* No arch-specific monitor vm-events on ARM.
*
- * Should not be reached unless arch_monitor_get_capabilities() is not
+ * Should not be reached unless vm_event_monitor_get_capabilities() is not
* properly implemented.
*/
ASSERT_UNREACHABLE();
@@ -21,18 +21,19 @@
#include <xen/sched.h>
#include <xen/vm_event.h>
+#include <public/domctl.h>
static inline
int vm_event_init_domain(struct domain *d)
{
- /* Not supported on ARM. */
+ /* Nothing to do. */
return 0;
}
static inline
void vm_event_cleanup_domain(struct domain *d)
{
- /* Not supported on ARM. */
+ memset(&d->monitor, 0, sizeof(d->monitor));
}
static inline
@@ -53,4 +54,18 @@ void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp)
/* Not supported on ARM. */
}
+static inline void vm_event_fill_regs(vm_event_request_t *req)
+{
+ /* Not supported on ARM. */
+}
+
+static inline uint32_t vm_event_monitor_get_capabilities(struct domain *d)
+{
+ uint32_t capabilities = 0;
+
+ capabilities = (1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST);
+
+ return capabilities;
+}
+
#endif /* __ASM_ARM_VM_EVENT_H__ */
@@ -15,8 +15,8 @@
* this program; If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _X86_ALTP2M_H
-#define _X86_ALTP2M_H
+#ifndef __ASM_X86_ALTP2M_H
+#define __ASM_X86_ALTP2M_H
#include <xen/types.h>
#include <xen/sched.h> /* for struct vcpu, struct domain */
@@ -33,5 +33,9 @@ void altp2m_vcpu_initialise(struct vcpu *v);
void altp2m_vcpu_destroy(struct vcpu *v);
void altp2m_vcpu_reset(struct vcpu *v);
-#endif /* _X86_ALTP2M_H */
+static inline uint16_t altp2m_vcpu_idx(const struct vcpu *v)
+{
+ return vcpu_altp2m(v).p2midx;
+}
+#endif /* __ASM_X86_ALTP2M_H */
@@ -391,7 +391,7 @@ struct arch_domain
unsigned long *pirq_eoi_map;
unsigned long pirq_eoi_map_mfn;
- /* Monitor options */
+ /* Arch-specific monitor options */
struct {
unsigned int write_ctrlreg_enabled : 4;
unsigned int write_ctrlreg_sync : 4;
@@ -400,8 +400,6 @@ struct arch_domain
unsigned int mov_to_msr_extended : 1;
unsigned int singlestep_enabled : 1;
unsigned int software_breakpoint_enabled : 1;
- unsigned int guest_request_enabled : 1;
- unsigned int guest_request_sync : 1;
} monitor;
/* Mem_access emulation control */
@@ -1,5 +1,7 @@
/*
- * event.h: Hardware virtual machine assist events.
+ * include/asm-x86/hvm/event.h
+ *
+ * Arch-specific hardware virtual machine event abstractions.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -17,6 +19,10 @@
#ifndef __ASM_X86_HVM_EVENT_H__
#define __ASM_X86_HVM_EVENT_H__
+#include <xen/sched.h>
+#include <xen/paging.h>
+#include <public/vm_event.h>
+
enum hvm_event_breakpoint_type
{
HVM_EVENT_SOFTWARE_BREAKPOINT,
@@ -30,12 +36,11 @@ enum hvm_event_breakpoint_type
*/
bool_t hvm_event_cr(unsigned int index, unsigned long value,
unsigned long old);
-#define hvm_event_crX(what, new, old) \
- hvm_event_cr(VM_EVENT_X86_##what, new, old)
+#define hvm_event_crX(cr, new, old) \
+ hvm_event_cr(VM_EVENT_X86_##cr, new, old)
void hvm_event_msr(unsigned int msr, uint64_t value);
int hvm_event_breakpoint(unsigned long rip,
enum hvm_event_breakpoint_type type);
-void hvm_event_guest_request(void);
#endif /* __ASM_X86_HVM_EVENT_H__ */
@@ -29,29 +29,6 @@
#define monitor_ctrlreg_bitmask(ctrlreg_index) (1U << (ctrlreg_index))
-static inline uint32_t arch_monitor_get_capabilities(struct domain *d)
-{
- uint32_t capabilities = 0;
-
- /*
- * At the moment only Intel HVM domains are supported. However, event
- * delivery could be extended to AMD and PV domains.
- */
- if ( !is_hvm_domain(d) || !cpu_has_vmx )
- return capabilities;
-
- capabilities = (1U << XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG) |
- (1U << XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR) |
- (1U << XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT) |
- (1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST);
-
- /* Since we know this is on VMX, we can just call the hvm func */
- if ( hvm_is_singlestep_supported() )
- capabilities |= (1U << XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP);
-
- return capabilities;
-}
-
static inline
int arch_monitor_domctl_op(struct domain *d, struct xen_domctl_monitor_op *mop)
{
@@ -44,4 +44,27 @@ void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp);
void vm_event_fill_regs(vm_event_request_t *req);
+static inline uint32_t vm_event_monitor_get_capabilities(struct domain *d)
+{
+ uint32_t capabilities = 0;
+
+ /*
+ * At the moment only Intel HVM domains are supported. However, event
+ * delivery could be extended to AMD and PV domains.
+ */
+ if ( !is_hvm_domain(d) || !cpu_has_vmx )
+ return capabilities;
+
+ capabilities = (1U << XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG) |
+ (1U << XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR) |
+ (1U << XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT) |
+ (1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST);
+
+ /* Since we know this is on VMX, we can just call the hvm func */
+ if ( hvm_is_singlestep_supported() )
+ capabilities |= (1U << XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP);
+
+ return capabilities;
+}
+
#endif /* __ASM_X86_VM_EVENT_H__ */
@@ -464,6 +464,12 @@ struct domain
/* vNUMA topology accesses are protected by rwlock. */
rwlock_t vnuma_rwlock;
struct vnuma_info *vnuma;
+
+ /* Common monitor options */
+ struct {
+ unsigned int guest_request_enabled : 1;
+ unsigned int guest_request_sync : 1;
+ } monitor;
};
/* Protect updates/reads (resp.) of domain_list and domain_hash. */
@@ -24,6 +24,7 @@
#define __VM_EVENT_H__
#include <xen/sched.h>
+#include <public/vm_event.h>
/* Clean up on domain destruction */
void vm_event_cleanup(struct domain *d);
@@ -74,6 +75,14 @@ int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec,
void vm_event_vcpu_pause(struct vcpu *v);
void vm_event_vcpu_unpause(struct vcpu *v);
+/*
+ * Monitor vm-events
+ */
+int vm_event_monitor_traps(struct vcpu *v, uint8_t sync,
+ vm_event_request_t *req);
+
+void vm_event_monitor_guest_request(void);
+
#endif /* __VM_EVENT_H__ */