@@ -39,6 +39,7 @@
#include <asm/vm_event.h>
#include "vmx.h"
+#include "vvmx.h"
/*
* A few notes on virtual NMI and INTR delivery, and interactions with
@@ -34,7 +34,6 @@
#include <asm/hvm/io.h>
#include <asm/hvm/nestedhvm.h>
#include <asm/hvm/vmx/vmx.h>
-#include <asm/hvm/vmx/vvmx.h>
#include <asm/hvm/vmx/vmcs.h>
#include <asm/flushtlb.h>
#include <asm/monitor.h>
@@ -44,6 +43,7 @@
#include <asm/apic.h>
#include "vmx.h"
+#include "vvmx.h"
static bool_t __read_mostly opt_vpid_enabled = 1;
boolean_param("vpid", opt_vpid_enabled);
@@ -58,6 +58,7 @@
#include "pi.h"
#include "vmx.h"
+#include "vvmx.h"
static bool_t __initdata opt_force_ept;
boolean_param("force-ept", opt_force_ept);
@@ -30,6 +30,7 @@
#include <asm/hvm/nestedhvm.h>
#include "vmx.h"
+#include "vvmx.h"
static DEFINE_PER_CPU(u64 *, vvmcs_buf);
new file mode 100644
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * vvmx.h: Support virtual VMX for nested virtualization.
+ *
+ * Copyright (c) 2010, Intel Corporation.
+ * Author: Qing He <qing.he@intel.com>
+ * Eddie Dong <eddie.dong@intel.com>
+ */
+
+#ifndef __X86_HVM_VMX_VVMX_PRIV_H__
+#define __X86_HVM_VMX_VVMX_PRIV_H__
+
+#include <xen/list.h>
+#include <xen/sched.h>
+#include <xen/types.h>
+
+#include <asm/hvm/vcpu.h>
+#include <asm/hvm/hvm.h>
+#include <asm/hvm/vmx/vmcs.h>
+
+struct vvmcs_list {
+ unsigned long vvmcs_mfn;
+ struct list_head node;
+};
+
+#define vcpu_2_nvmx(v) (vcpu_nestedhvm(v).u.nvmx)
+
+/* bit 1, 2, 4 must be 1 */
+#define VMX_PINBASED_CTLS_DEFAULT1 0x16
+/* bit 1, 4-6,8,13-16,26 must be 1 */
+#define VMX_PROCBASED_CTLS_DEFAULT1 0x401e172
+/* bit 0-8, 10,11,13,14,16,17 must be 1 */
+#define VMX_EXIT_CTLS_DEFAULT1 0x36dff
+/* bit 0-8, and 12 must be 1 */
+#define VMX_ENTRY_CTLS_DEFAULT1 0x11ff
+
+union vmx_inst_info {
+ struct {
+ unsigned int scaling :2; /* bit 0-1 */
+ unsigned int __rsvd0 :1; /* bit 2 */
+ unsigned int reg1 :4; /* bit 3-6 */
+ unsigned int addr_size :3; /* bit 7-9 */
+ unsigned int memreg :1; /* bit 10 */
+ unsigned int __rsvd1 :4; /* bit 11-14 */
+ unsigned int segment :3; /* bit 15-17 */
+ unsigned int index_reg :4; /* bit 18-21 */
+ unsigned int index_reg_invalid :1; /* bit 22 */
+ unsigned int base_reg :4; /* bit 23-26 */
+ unsigned int base_reg_invalid :1; /* bit 27 */
+ unsigned int reg2 :4; /* bit 28-31 */
+ } fields;
+ uint32_t word;
+};
+
+/*
+ * Virtual VMCS layout
+ *
+ * Since physical VMCS layout is unknown, a custom layout is used
+ * for virtual VMCS seen by guest. It occupies a 4k page, and the
+ * field is offset by an 9-bit offset into u64[], The offset is as
+ * follow, which means every <width, type> pair has a max of 32
+ * fields available.
+ *
+ * 9 7 5 0
+ * --------------------------------
+ * offset: | width | type | index |
+ * --------------------------------
+ *
+ * Also, since the lower range <width=0, type={0,1}> has only one
+ * field: VPID, it is moved to a higher offset (63), and leaves the
+ * lower range to non-indexed field like VMCS revision.
+ *
+ */
+
+struct vvmcs_header {
+ uint32_t revision;
+ uint32_t abort;
+};
+
+union vmcs_encoding {
+ struct {
+ uint32_t access_type : 1;
+ uint32_t index : 9;
+ uint32_t type : 2;
+ uint32_t rsv1 : 1;
+ uint32_t width : 2;
+ uint32_t rsv2 : 17;
+ };
+ uint32_t word;
+};
+
+enum vvmcs_encoding_width {
+ VVMCS_WIDTH_16 = 0,
+ VVMCS_WIDTH_64,
+ VVMCS_WIDTH_32,
+ VVMCS_WIDTH_NATURAL,
+};
+
+enum vvmcs_encoding_type {
+ VVMCS_TYPE_CONTROL = 0,
+ VVMCS_TYPE_RO,
+ VVMCS_TYPE_GSTATE,
+ VVMCS_TYPE_HSTATE,
+};
+
+int cf_check nvmx_vcpu_initialise(struct vcpu *v);
+void cf_check nvmx_vcpu_destroy(struct vcpu *v);
+int cf_check nvmx_vcpu_reset(struct vcpu *v);
+uint64_t cf_check nvmx_vcpu_eptp_base(struct vcpu *v);
+enum hvm_intblk cf_check nvmx_intr_blocked(struct vcpu *v);
+bool cf_check nvmx_intercepts_exception(struct vcpu *v, unsigned int vector,
+ int error_code);
+void cf_check nvmx_domain_relinquish_resources(struct domain *d);
+
+bool cf_check nvmx_ept_enabled(struct vcpu *v);
+
+int cf_check nvmx_hap_walk_L1_p2m(
+ struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order,
+ uint8_t *p2m_acc, struct npfec npfec);
+
+uint64_t get_vvmcs_virtual(void *vvmcs, uint32_t encoding);
+uint64_t get_vvmcs_real(const struct vcpu *, uint32_t encoding);
+void set_vvmcs_virtual(void *vvmcs, uint32_t encoding, uint64_t val);
+void set_vvmcs_real(const struct vcpu *, uint32_t encoding, uint64_t val);
+enum vmx_insn_errno get_vvmcs_virtual_safe(void *vvmcs, uint32_t encoding,
+ uint64_t *val);
+enum vmx_insn_errno get_vvmcs_real_safe(const struct vcpu *, uint32_t encoding,
+ uint64_t *val);
+enum vmx_insn_errno set_vvmcs_virtual_safe(void *vvmcs, uint32_t encoding,
+ uint64_t val);
+enum vmx_insn_errno set_vvmcs_real_safe(const struct vcpu *, uint32_t encoding,
+ uint64_t val);
+
+#define get_vvmcs(vcpu, encoding) \
+ (cpu_has_vmx_vmcs_shadowing ? \
+ get_vvmcs_real(vcpu, encoding) : \
+ get_vvmcs_virtual(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding))
+
+#define set_vvmcs(vcpu, encoding, val) \
+ (cpu_has_vmx_vmcs_shadowing ? \
+ set_vvmcs_real(vcpu, encoding, val) : \
+ set_vvmcs_virtual(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val))
+
+#define get_vvmcs_safe(vcpu, encoding, val) \
+ (cpu_has_vmx_vmcs_shadowing ? \
+ get_vvmcs_real_safe(vcpu, encoding, val) : \
+ get_vvmcs_virtual_safe(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val))
+
+#define set_vvmcs_safe(vcpu, encoding, val) \
+ (cpu_has_vmx_vmcs_shadowing ? \
+ set_vvmcs_real_safe(vcpu, encoding, val) : \
+ set_vvmcs_virtual_safe(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val))
+
+void nvmx_destroy_vmcs(struct vcpu *v);
+int nvmx_handle_vmx_insn(struct cpu_user_regs *regs, unsigned int exit_reason);
+int nvmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
+
+void nvmx_update_exec_control(struct vcpu *v, uint32_t value);
+void nvmx_update_secondary_exec_control(struct vcpu *v, unsigned long value);
+void nvmx_update_exception_bitmap(struct vcpu *v, unsigned long value);
+void nvmx_switch_guest(void);
+void nvmx_idtv_handling(void);
+uint64_t nvmx_get_tsc_offset(struct vcpu *v);
+int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
+ unsigned int exit_reason);
+void nvmx_set_cr_read_shadow(struct vcpu *v, unsigned int cr);
+
+uint64_t nept_get_ept_vpid_cap(void);
+
+int nept_translate_l2ga(struct vcpu *v, paddr_t l2ga,
+ unsigned int *page_order, uint32_t rwx_acc,
+ unsigned long *l1gfn, uint8_t *p2m_acc,
+ uint64_t *exit_qual, uint32_t *exit_reason);
+int nvmx_cpu_up_prepare(unsigned int cpu);
+void nvmx_cpu_dead(unsigned int cpu);
+
+#endif /* __X86_HVM_VMX_VVMX_PRIV_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
@@ -22,10 +22,10 @@
#ifndef __ASM_X86_HVM_VVMX_H__
#define __ASM_X86_HVM_VVMX_H__
-struct vvmcs_list {
- unsigned long vvmcs_mfn;
- struct list_head node;
-};
+#include <xen/list.h>
+#include <xen/types.h>
+
+#include <asm/hvm/vmx/vmcs.h>
struct nestedvmx {
/*
@@ -42,11 +42,11 @@ struct nestedvmx {
/* deferred nested interrupt */
struct {
unsigned long intr_info;
- u32 error_code;
- u8 source;
+ uint32_t error_code;
+ uint8_t source;
} intr;
struct {
- bool_t enabled;
+ bool enabled;
uint32_t exit_reason;
uint32_t exit_qual;
} ept;
@@ -54,161 +54,10 @@ struct nestedvmx {
struct list_head launched_list;
};
-#define vcpu_2_nvmx(v) (vcpu_nestedhvm(v).u.nvmx)
-
-/* bit 1, 2, 4 must be 1 */
-#define VMX_PINBASED_CTLS_DEFAULT1 0x16
-/* bit 1, 4-6,8,13-16,26 must be 1 */
-#define VMX_PROCBASED_CTLS_DEFAULT1 0x401e172
-/* bit 0-8, 10,11,13,14,16,17 must be 1 */
-#define VMX_EXIT_CTLS_DEFAULT1 0x36dff
-/* bit 0-8, and 12 must be 1 */
-#define VMX_ENTRY_CTLS_DEFAULT1 0x11ff
-
-
-union vmx_inst_info {
- struct {
- unsigned int scaling :2; /* bit 0-1 */
- unsigned int __rsvd0 :1; /* bit 2 */
- unsigned int reg1 :4; /* bit 3-6 */
- unsigned int addr_size :3; /* bit 7-9 */
- unsigned int memreg :1; /* bit 10 */
- unsigned int __rsvd1 :4; /* bit 11-14 */
- unsigned int segment :3; /* bit 15-17 */
- unsigned int index_reg :4; /* bit 18-21 */
- unsigned int index_reg_invalid :1; /* bit 22 */
- unsigned int base_reg :4; /* bit 23-26 */
- unsigned int base_reg_invalid :1; /* bit 27 */
- unsigned int reg2 :4; /* bit 28-31 */
- } fields;
- u32 word;
-};
-
-int cf_check nvmx_vcpu_initialise(struct vcpu *v);
-void cf_check nvmx_vcpu_destroy(struct vcpu *v);
-int cf_check nvmx_vcpu_reset(struct vcpu *v);
-uint64_t cf_check nvmx_vcpu_eptp_base(struct vcpu *v);
-enum hvm_intblk cf_check nvmx_intr_blocked(struct vcpu *v);
-bool cf_check nvmx_intercepts_exception(
- struct vcpu *v, unsigned int vector, int error_code);
-void cf_check nvmx_domain_relinquish_resources(struct domain *d);
-
-bool cf_check nvmx_ept_enabled(struct vcpu *v);
-
#define EPT_TRANSLATE_SUCCEED 0
#define EPT_TRANSLATE_VIOLATION 1
#define EPT_TRANSLATE_MISCONFIG 2
#define EPT_TRANSLATE_RETRY 3
-int cf_check nvmx_hap_walk_L1_p2m(
- struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order,
- uint8_t *p2m_acc, struct npfec npfec);
-
-/*
- * Virtual VMCS layout
- *
- * Since physical VMCS layout is unknown, a custom layout is used
- * for virtual VMCS seen by guest. It occupies a 4k page, and the
- * field is offset by an 9-bit offset into u64[], The offset is as
- * follow, which means every <width, type> pair has a max of 32
- * fields available.
- *
- * 9 7 5 0
- * --------------------------------
- * offset: | width | type | index |
- * --------------------------------
- *
- * Also, since the lower range <width=0, type={0,1}> has only one
- * field: VPID, it is moved to a higher offset (63), and leaves the
- * lower range to non-indexed field like VMCS revision.
- *
- */
-
-struct vvmcs_header {
- u32 revision;
- u32 abort;
-};
-
-union vmcs_encoding {
- struct {
- u32 access_type : 1;
- u32 index : 9;
- u32 type : 2;
- u32 rsv1 : 1;
- u32 width : 2;
- u32 rsv2 : 17;
- };
- u32 word;
-};
-
-enum vvmcs_encoding_width {
- VVMCS_WIDTH_16 = 0,
- VVMCS_WIDTH_64,
- VVMCS_WIDTH_32,
- VVMCS_WIDTH_NATURAL,
-};
-
-enum vvmcs_encoding_type {
- VVMCS_TYPE_CONTROL = 0,
- VVMCS_TYPE_RO,
- VVMCS_TYPE_GSTATE,
- VVMCS_TYPE_HSTATE,
-};
-
-u64 get_vvmcs_virtual(void *vvmcs, u32 encoding);
-u64 get_vvmcs_real(const struct vcpu *, u32 encoding);
-void set_vvmcs_virtual(void *vvmcs, u32 encoding, u64 val);
-void set_vvmcs_real(const struct vcpu *, u32 encoding, u64 val);
-enum vmx_insn_errno get_vvmcs_virtual_safe(void *vvmcs, u32 encoding, u64 *val);
-enum vmx_insn_errno get_vvmcs_real_safe(const struct vcpu *, u32 encoding,
- u64 *val);
-enum vmx_insn_errno set_vvmcs_virtual_safe(void *vvmcs, u32 encoding, u64 val);
-enum vmx_insn_errno set_vvmcs_real_safe(const struct vcpu *, u32 encoding,
- u64 val);
-
-#define get_vvmcs(vcpu, encoding) \
- (cpu_has_vmx_vmcs_shadowing ? \
- get_vvmcs_real(vcpu, encoding) : \
- get_vvmcs_virtual(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding))
-
-#define set_vvmcs(vcpu, encoding, val) \
- (cpu_has_vmx_vmcs_shadowing ? \
- set_vvmcs_real(vcpu, encoding, val) : \
- set_vvmcs_virtual(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val))
-
-#define get_vvmcs_safe(vcpu, encoding, val) \
- (cpu_has_vmx_vmcs_shadowing ? \
- get_vvmcs_real_safe(vcpu, encoding, val) : \
- get_vvmcs_virtual_safe(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val))
-
-#define set_vvmcs_safe(vcpu, encoding, val) \
- (cpu_has_vmx_vmcs_shadowing ? \
- set_vvmcs_real_safe(vcpu, encoding, val) : \
- set_vvmcs_virtual_safe(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val))
-
-void nvmx_destroy_vmcs(struct vcpu *v);
-int nvmx_handle_vmx_insn(struct cpu_user_regs *regs, unsigned int exit_reason);
-int nvmx_msr_read_intercept(unsigned int msr,
- u64 *msr_content);
-
-void nvmx_update_exec_control(struct vcpu *v, u32 value);
-void nvmx_update_secondary_exec_control(struct vcpu *v,
- unsigned long value);
-void nvmx_update_exception_bitmap(struct vcpu *v, unsigned long value);
-void nvmx_switch_guest(void);
-void nvmx_idtv_handling(void);
-u64 nvmx_get_tsc_offset(struct vcpu *v);
-int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
- unsigned int exit_reason);
-void nvmx_set_cr_read_shadow(struct vcpu *v, unsigned int cr);
-
-uint64_t nept_get_ept_vpid_cap(void);
-
-int nept_translate_l2ga(struct vcpu *v, paddr_t l2ga,
- unsigned int *page_order, uint32_t rwx_acc,
- unsigned long *l1gfn, uint8_t *p2m_acc,
- uint64_t *exit_qual, uint32_t *exit_reason);
-int nvmx_cpu_up_prepare(unsigned int cpu);
-void nvmx_cpu_dead(unsigned int cpu);
#endif /* __ASM_X86_HVM_VVMX_H__ */
Create a new private header in arch/x86/hvm/vmx called vvmx.h and move there all definitions and declarations that are used only by vmx code and don't need to reside in an external header. Take the opportunity to replace u* with uint*_t, bool_t with bool and to re-arrange the header as follows, all structures first, then all variable decalarations, all function delarations, and finally all inline functions. No functional change intended. Signed-off-by: Xenia Ragiadakou <burzalodowa@gmail.com> --- Changes in v3: - new patch xen/arch/x86/hvm/vmx/intr.c | 1 + xen/arch/x86/hvm/vmx/vmcs.c | 2 +- xen/arch/x86/hvm/vmx/vmx.c | 1 + xen/arch/x86/hvm/vmx/vvmx.c | 1 + xen/arch/x86/hvm/vmx/vvmx.h | 187 ++++++++++++++++++++++++ xen/arch/x86/include/asm/hvm/vmx/vvmx.h | 165 +-------------------- 6 files changed, 198 insertions(+), 159 deletions(-) create mode 100644 xen/arch/x86/hvm/vmx/vvmx.h