@@ -3,7 +3,7 @@ subdir-y += vmx
obj-y += asid.o
obj-y += emulate.o
-obj-y += event.o
+obj-y += event_x86.o
obj-y += hpet.o
obj-y += hvm.o
obj-y += i8254.o
new file mode 100644
@@ -0,0 +1,51 @@
+/*
+ * arch/x86/hvm/event_x86.c
+ *
+ * Arch-specific hardware virtual machine event abstractions.
+ *
+ * Copyright (c) 2004, Intel Corporation.
+ * Copyright (c) 2005, International Business Machines Corporation.
+ * Copyright (c) 2008, Citrix Systems, Inc.
+ * Copyright (c) 2016, Bitdefender S.R.L.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <xen/hvm/event.h>
+
+void hvm_event_msr(unsigned int msr, uint64_t value)
+{
+ struct vcpu *curr = current;
+
+ if ( curr->domain->arch.monitor.mov_to_msr_enabled )
+ {
+ vm_event_request_t req = {
+ .reason = VM_EVENT_REASON_MOV_TO_MSR,
+ .vcpu_id = curr->vcpu_id,
+ .u.mov_to_msr.msr = msr,
+ .u.mov_to_msr.value = value,
+ };
+
+ hvm_event_traps(curr, 1, &req);
+ }
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
@@ -33,6 +33,7 @@
#include <xen/event.h>
#include <xen/paging.h>
#include <xen/monitor.h>
+#include <xen/hvm/event.h>
#include <xen/cpu.h>
#include <xen/wait.h>
#include <xen/mem_access.h>
@@ -58,7 +59,7 @@
#include <asm/hvm/cacheattr.h>
#include <asm/hvm/trace.h>
#include <asm/hvm/nestedhvm.h>
-#include <asm/hvm/event.h>
+#include <asm/hvm/event_arch.h>
#include <asm/hvm/vmx/vmx.h>
#include <asm/hvm/svm/svm.h> /* for cpu_has_tsc_ratio */
#include <asm/altp2m.h>
@@ -26,6 +26,7 @@
#include <xen/hypercall.h>
#include <xen/perfc.h>
#include <xen/monitor.h>
+#include <xen/hvm/event.h>
#include <asm/current.h>
#include <asm/io.h>
#include <asm/iocap.h>
@@ -51,7 +52,6 @@
#include <asm/hvm/vpt.h>
#include <public/hvm/save.h>
#include <asm/hvm/trace.h>
-#include <asm/hvm/event.h>
#include <asm/xenoprof.h>
#include <asm/debugger.h>
#include <asm/apic.h>
@@ -67,7 +67,7 @@ obj-$(xenoprof) += xenoprof.o
obj-$(CONFIG_COMPAT) += $(addprefix compat/,domain.o kernel.o memory.o multicall.o tmem_xen.o xlat.o)
-subdir-$(CONFIG_X86) += hvm
+subdir-y += hvm
subdir-$(coverage) += gcov
@@ -1 +1,2 @@
-obj-y += save.o
+obj-$(CONFIG_X86) += save.o
+obj-y += event.o
similarity index 44%
rename from xen/arch/x86/hvm/event.c
rename to xen/common/hvm/event.c
@@ -1,7 +1,7 @@
/*
-* arch/x86/hvm/event.c
+* xen/common/hvm/event.c
*
-* Arch-specific hardware virtual machine event abstractions.
+* Common hardware virtual machine event abstractions.
*
* Copyright (c) 2004, Intel Corporation.
* Copyright (c) 2005, International Business Machines Corporation.
@@ -21,52 +21,22 @@
* this program; If not, see <http://www.gnu.org/licenses/>.
*/
-#include <xen/vm_event.h>
-#include <xen/paging.h>
+#include <xen/hvm/event.h>
+#include <xen/vm_event.h> /* for vm_event_# calls */
#include <xen/monitor.h>
-#include <asm/hvm/event.h>
+#include <asm/hvm/event_arch.h> /* for hvm_event_arch_# calls */
+#if CONFIG_X86
#include <asm/altp2m.h>
-#include <public/vm_event.h>
+#endif
-static void hvm_event_fill_regs(vm_event_request_t *req)
-{
- const struct cpu_user_regs *regs = guest_cpu_user_regs();
- const struct vcpu *curr = current;
-
- req->data.regs.x86.rax = regs->eax;
- req->data.regs.x86.rcx = regs->ecx;
- req->data.regs.x86.rdx = regs->edx;
- req->data.regs.x86.rbx = regs->ebx;
- req->data.regs.x86.rsp = regs->esp;
- req->data.regs.x86.rbp = regs->ebp;
- req->data.regs.x86.rsi = regs->esi;
- req->data.regs.x86.rdi = regs->edi;
-
- req->data.regs.x86.r8 = regs->r8;
- req->data.regs.x86.r9 = regs->r9;
- req->data.regs.x86.r10 = regs->r10;
- req->data.regs.x86.r11 = regs->r11;
- req->data.regs.x86.r12 = regs->r12;
- req->data.regs.x86.r13 = regs->r13;
- req->data.regs.x86.r14 = regs->r14;
- req->data.regs.x86.r15 = regs->r15;
-
- req->data.regs.x86.rflags = regs->eflags;
- req->data.regs.x86.rip = regs->eip;
-
- req->data.regs.x86.msr_efer = curr->arch.hvm_vcpu.guest_efer;
- req->data.regs.x86.cr0 = curr->arch.hvm_vcpu.guest_cr[0];
- req->data.regs.x86.cr3 = curr->arch.hvm_vcpu.guest_cr[3];
- req->data.regs.x86.cr4 = curr->arch.hvm_vcpu.guest_cr[4];
-}
-
-static int hvm_event_traps(uint8_t sync, vm_event_request_t *req)
+int hvm_event_traps(struct vcpu *v,
+ uint8_t sync,
+ vm_event_request_t *req)
{
int rc;
- struct vcpu *curr = current;
- struct domain *currd = curr->domain;
+ struct domain *d = v->domain;
- rc = vm_event_claim_slot(currd, &currd->vm_event->monitor);
+ rc = vm_event_claim_slot(d, &d->vm_event->monitor);
switch ( rc )
{
case 0:
@@ -84,93 +54,75 @@ static int hvm_event_traps(uint8_t sync, vm_event_request_t *req)
if ( sync )
{
req->flags |= VM_EVENT_FLAG_VCPU_PAUSED;
- vm_event_vcpu_pause(curr);
+ vm_event_vcpu_pause(v);
}
- if ( altp2m_active(currd) )
+#if CONFIG_X86
+ if ( altp2m_active(d) )
{
req->flags |= VM_EVENT_FLAG_ALTERNATE_P2M;
- req->altp2m_idx = vcpu_altp2m(curr).p2midx;
+ req->altp2m_idx = vcpu_altp2m(v).p2midx;
}
+#endif
+
+ arch_hvm_event_fill_regs(req);
- hvm_event_fill_regs(req);
- vm_event_put_request(currd, &currd->vm_event->monitor, req);
+ vm_event_put_request(d, &d->vm_event->monitor, req);
return 1;
}
-bool_t hvm_event_cr(unsigned int index, unsigned long value, unsigned long old)
+#if CONFIG_HAS_VM_EVENT_WRITE_CTRLREG
+bool_t hvm_event_cr(unsigned int index,
+ unsigned long value,
+ unsigned long old)
{
- struct arch_domain *currad = ¤t->domain->arch;
+ struct vcpu *curr = current;
+ struct arch_domain *ad = &curr->domain->arch;
unsigned int ctrlreg_bitmask = monitor_ctrlreg_bitmask(index);
- if ( (currad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask) &&
- (!(currad->monitor.write_ctrlreg_onchangeonly & ctrlreg_bitmask) ||
+ if ( (ad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask) &&
+ (!(ad->monitor.write_ctrlreg_onchangeonly & ctrlreg_bitmask) ||
value != old) )
{
+ bool_t sync = !!(ad->monitor.write_ctrlreg_sync & ctrlreg_bitmask);
+
vm_event_request_t req = {
.reason = VM_EVENT_REASON_WRITE_CTRLREG,
- .vcpu_id = current->vcpu_id,
+ .vcpu_id = curr->vcpu_id,
.u.write_ctrlreg.index = index,
.u.write_ctrlreg.new_value = value,
.u.write_ctrlreg.old_value = old
};
- hvm_event_traps(currad->monitor.write_ctrlreg_sync & ctrlreg_bitmask,
- &req);
+ hvm_event_traps(curr, sync, &req);
return 1;
}
return 0;
}
+#endif // HAS_VM_EVENT_WRITE_CTRLREG
-void hvm_event_msr(unsigned int msr, uint64_t value)
-{
- struct vcpu *curr = current;
- vm_event_request_t req = {
- .reason = VM_EVENT_REASON_MOV_TO_MSR,
- .vcpu_id = curr->vcpu_id,
- .u.mov_to_msr.msr = msr,
- .u.mov_to_msr.value = value,
- };
-
- if ( curr->domain->arch.monitor.mov_to_msr_enabled )
- hvm_event_traps(1, &req);
-}
-
+#if CONFIG_HAS_VM_EVENT_GUEST_REQUEST
void hvm_event_guest_request(void)
{
struct vcpu *curr = current;
- struct arch_domain *currad = &curr->domain->arch;
+ struct arch_domain *ad = &curr->domain->arch;
- if ( currad->monitor.guest_request_enabled )
+ if ( ad->monitor.guest_request_enabled )
{
vm_event_request_t req = {
.reason = VM_EVENT_REASON_GUEST_REQUEST,
.vcpu_id = curr->vcpu_id,
};
- hvm_event_traps(currad->monitor.guest_request_sync, &req);
+ hvm_event_traps(curr, ad->monitor.guest_request_sync, &req);
}
}
+#endif // HAS_VM_EVENT_GUEST_REQUEST
-static inline
-uint64_t gfn_of_rip(unsigned long rip)
-{
- struct vcpu *curr = current;
- struct segment_register sreg;
- uint32_t pfec = PFEC_page_present | PFEC_insn_fetch;
-
- hvm_get_segment_register(curr, x86_seg_ss, &sreg);
- if ( sreg.attr.fields.dpl == 3 )
- pfec |= PFEC_user_mode;
-
- hvm_get_segment_register(curr, x86_seg_cs, &sreg);
-
- return (uint64_t) paging_gva_to_gfn(curr, sreg.base + rip, &pfec);
-}
-
-int hvm_event_breakpoint(unsigned long rip,
+#if CONFIG_HAS_VM_EVENT_SINGLESTEP || CONFIG_HAS_VM_EVENT_SOFTWARE_BREAKPOINT
+int hvm_event_breakpoint(unsigned long ip,
enum hvm_event_breakpoint_type type)
{
struct vcpu *curr = current;
@@ -179,19 +131,23 @@ int hvm_event_breakpoint(unsigned long rip,
switch ( type )
{
+#if CONFIG_HAS_VM_EVENT_SOFTWARE_BREAKPOINT
case HVM_EVENT_SOFTWARE_BREAKPOINT:
if ( !ad->monitor.software_breakpoint_enabled )
return 0;
req.reason = VM_EVENT_REASON_SOFTWARE_BREAKPOINT;
- req.u.software_breakpoint.gfn = gfn_of_rip(rip);
+ req.u.software_breakpoint.gfn = arch_hvm_event_gfn_of_ip(ip);
break;
+#endif
+#if CONFIG_HAS_VM_EVENT_SINGLESTEP
case HVM_EVENT_SINGLESTEP_BREAKPOINT:
if ( !ad->monitor.singlestep_enabled )
return 0;
req.reason = VM_EVENT_REASON_SINGLESTEP;
- req.u.singlestep.gfn = gfn_of_rip(rip);
+ req.u.singlestep.gfn = arch_hvm_event_gfn_of_ip(ip);
break;
+#endif
default:
return -EOPNOTSUPP;
@@ -199,8 +155,9 @@ int hvm_event_breakpoint(unsigned long rip,
req.vcpu_id = curr->vcpu_id;
- return hvm_event_traps(1, &req);
+ return hvm_event_traps(curr, 1, &req);
}
+#endif // HAS_VM_EVENT_SINGLESTEP || HAS_VM_EVENT_SOFTWARE_BREAKPOINT
/*
* Local variables:
new file mode 100644
@@ -0,0 +1,40 @@
+/*
+ * include/asm-arm/hvm/event_arch.h
+ *
+ * Arch-specific hardware virtual machine event abstractions.
+ *
+ * Copyright (c) 2016, Bitdefender S.R.L.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_ARM_HVM_EVENT_ARCH_H__
+#define __ASM_ARM_HVM_EVENT_ARCH_H__
+
+static inline
+void arch_hvm_event_fill_regs(vm_event_request_t *req)
+{
+ /* Not supported on ARM. */
+}
+
+#endif /* __ASM_ARM_HVM_EVENT_ARCH_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
new file mode 100644
@@ -0,0 +1,93 @@
+/*
+ * include/asm-x86/hvm/event_arch.h
+ *
+ * Arch-specific hardware virtual machine event abstractions.
+ *
+ * Copyright (c) 2016, Bitdefender S.R.L.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_X86_HVM_EVENT_ARCH_H__
+#define __ASM_X86_HVM_EVENT_ARCH_H__
+
+#include <xen/sched.h>
+#include <xen/paging.h>
+#include <public/vm_event.h>
+
+static inline
+void arch_hvm_event_fill_regs(vm_event_request_t *req)
+{
+ const struct cpu_user_regs *regs = guest_cpu_user_regs();
+ const struct vcpu *curr = current;
+
+ req->data.regs.x86.rax = regs->eax;
+ req->data.regs.x86.rcx = regs->ecx;
+ req->data.regs.x86.rdx = regs->edx;
+ req->data.regs.x86.rbx = regs->ebx;
+ req->data.regs.x86.rsp = regs->esp;
+ req->data.regs.x86.rbp = regs->ebp;
+ req->data.regs.x86.rsi = regs->esi;
+ req->data.regs.x86.rdi = regs->edi;
+
+ req->data.regs.x86.r8 = regs->r8;
+ req->data.regs.x86.r9 = regs->r9;
+ req->data.regs.x86.r10 = regs->r10;
+ req->data.regs.x86.r11 = regs->r11;
+ req->data.regs.x86.r12 = regs->r12;
+ req->data.regs.x86.r13 = regs->r13;
+ req->data.regs.x86.r14 = regs->r14;
+ req->data.regs.x86.r15 = regs->r15;
+
+ req->data.regs.x86.rflags = regs->eflags;
+ req->data.regs.x86.rip = regs->eip;
+
+ req->data.regs.x86.msr_efer = curr->arch.hvm_vcpu.guest_efer;
+ req->data.regs.x86.cr0 = curr->arch.hvm_vcpu.guest_cr[0];
+ req->data.regs.x86.cr3 = curr->arch.hvm_vcpu.guest_cr[3];
+ req->data.regs.x86.cr4 = curr->arch.hvm_vcpu.guest_cr[4];
+}
+
+/*
+ * Returns the GFN of the given instruction pointer.
+ * Needed by hvm_event_software_breakpoint.
+ */
+static inline
+uint64_t arch_hvm_event_gfn_of_ip(unsigned long ip)
+{
+ struct vcpu *curr = current;
+ struct segment_register sreg;
+ uint32_t pfec = PFEC_page_present | PFEC_insn_fetch;
+
+ hvm_get_segment_register(curr, x86_seg_ss, &sreg);
+ if ( 3 == sreg.attr.fields.dpl )
+ pfec |= PFEC_user_mode;
+
+ hvm_get_segment_register(curr, x86_seg_cs, &sreg);
+
+ return (uint64_t) paging_gva_to_gfn(curr, sreg.base + ip, &pfec);
+}
+
+void hvm_event_msr(unsigned int msr, uint64_t value);
+
+#endif /* __ASM_X86_HVM_EVENT_ARCH_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
similarity index 59%
rename from xen/include/asm-x86/hvm/event.h
rename to xen/include/xen/hvm/event.h
@@ -1,5 +1,9 @@
/*
- * event.h: Hardware virtual machine assist events.
+ * include/xen/hvm/event.h
+ *
+ * Common hardware virtual machine event abstractions.
+ *
+ * Copyright (c) 2016, Bitdefender S.R.L.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -14,30 +18,48 @@
* this program; If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef __ASM_X86_HVM_EVENT_H__
-#define __ASM_X86_HVM_EVENT_H__
+#ifndef __HVM_EVENT_H__
+#define __HVM_EVENT_H__
-enum hvm_event_breakpoint_type
-{
- HVM_EVENT_SOFTWARE_BREAKPOINT,
- HVM_EVENT_SINGLESTEP_BREAKPOINT,
-};
+#include <xen/sched.h> /* for struct vcpu */
+#include <public/vm_event.h> /* for vm_event_request_t */
+#if CONFIG_HAS_VM_EVENT_WRITE_CTRLREG
/*
* Called for current VCPU on crX/MSR changes by guest.
* The event might not fire if the client has subscribed to it in onchangeonly
* mode, hence the bool_t return type for control register write events.
*/
-bool_t hvm_event_cr(unsigned int index, unsigned long value,
+bool_t hvm_event_cr(unsigned int index,
+ unsigned long value,
unsigned long old);
+
+#if CONFIG_X86
#define hvm_event_crX(what, new, old) \
hvm_event_cr(VM_EVENT_X86_##what, new, old)
-void hvm_event_msr(unsigned int msr, uint64_t value);
-int hvm_event_breakpoint(unsigned long rip,
+#endif
+#endif // HAS_VM_EVENT_WRITE_CTRLREG
+
+#if CONFIG_HAS_VM_EVENT_SINGLESTEP || CONFIG_HAS_VM_EVENT_SOFTWARE_BREAKPOINT
+enum hvm_event_breakpoint_type
+{
+ HVM_EVENT_SOFTWARE_BREAKPOINT,
+ HVM_EVENT_SINGLESTEP_BREAKPOINT,
+};
+
+int hvm_event_breakpoint(unsigned long ip,
enum hvm_event_breakpoint_type type);
+#endif // HAS_VM_EVENT_SINGLESTEP || HAS_VM_EVENT_SOFTWARE_BREAKPOINT
+
+#if CONFIG_HAS_VM_EVENT_GUEST_REQUEST
void hvm_event_guest_request(void);
+#endif // HAS_VM_EVENT_GUEST_REQUEST
+
+int hvm_event_traps(struct vcpu *v,
+ uint8_t sync,
+ vm_event_request_t *req);
-#endif /* __ASM_X86_HVM_EVENT_H__ */
+#endif /* __HVM_EVENT_H__ */
/*
* Local variables:
1. Moved hvm_event_traps, hvm_event_cr, hvm_event_guest_request, hvm_event_breakpoint from arch-side to common-side 1.1. Moved arch/x86/hvm/event.c to common/hvm/event.c # see files: arch/x86/hvm/Makefile, xen/common/Makefile, xen/common/hvm/Makefile, xen/common/hvm/event.c # changes: - moved hvm_event_fill_regs to arch-side (arch_hvm_event_fill_regs) - added vcpu parameter to hvm_event_traps - surrounded common hvm_event_* implementations w/ CONFIG_HAS_VM_EVENT_* - moved hvm_event_msr to arch-side (see x86/hvm/event_x86.c) - moved rip->gfn code in hvm_event_breakpoint to arch-side (see arch_hvm_event_gfn_of_ip) and renamed rip param to ip (i.e. now the parameter signifies a 'generic' instruction pointer, rather than the X86_64 RIP register) 1.2. Moved asm-x86/hvm/event.h to xen/hvm/event.h # see files: arch/x86/hvm/hvm.c, arch/x86/hvm/vmx/vmx.c 2. Added x86/hvm/event_x86.c => will rename in next commit to event.c (not done in this commit to avoid git seeing this as being the modified old event.c => keeping the same name would have rendered an unnecessarily bulky diff) # see files: arch/x86/hvm/Makefile # implements X86-specific hvm_event_msr 3. Added asm-x86/hvm/event_arch.h, asm-arm/hvm/event_arch.h (renamed to event.h in next commit, reason is the same as @ (2.). # define/implement: arch_hvm_event_fill_regs, arch_hvm_event_gfn_of_ip and hvm_event_msr (X86 side only) Signed-off-by: Corneliu ZUZU <czuzu@bitdefender.com> --- xen/arch/x86/hvm/Makefile | 2 +- xen/arch/x86/hvm/event_x86.c | 51 ++++++++++++ xen/arch/x86/hvm/hvm.c | 3 +- xen/arch/x86/hvm/vmx/vmx.c | 2 +- xen/common/Makefile | 2 +- xen/common/hvm/Makefile | 3 +- xen/{arch/x86 => common}/hvm/event.c | 139 +++++++++++-------------------- xen/include/asm-arm/hvm/event_arch.h | 40 +++++++++ xen/include/asm-x86/hvm/event_arch.h | 93 +++++++++++++++++++++ xen/include/{asm-x86 => xen}/hvm/event.h | 46 +++++++--- 10 files changed, 273 insertions(+), 108 deletions(-) create mode 100644 xen/arch/x86/hvm/event_x86.c rename xen/{arch/x86 => common}/hvm/event.c (44%) create mode 100644 xen/include/asm-arm/hvm/event_arch.h create mode 100644 xen/include/asm-x86/hvm/event_arch.h rename xen/include/{asm-x86 => xen}/hvm/event.h (59%)