@@ -4471,6 +4471,173 @@ static int handle_vmclear(struct kvm_vcp
return 1;
}
+enum vmcs_field_type {
+ VMCS_FIELD_TYPE_U16 = 0,
+ VMCS_FIELD_TYPE_U64 = 1,
+ VMCS_FIELD_TYPE_U32 = 2,
+ VMCS_FIELD_TYPE_ULONG = 3
+};
+
+static inline int vmcs_field_type(unsigned long field)
+{
+ if (0x1 & field) /* one of the *_HIGH fields, all are 32 bit */
+ return VMCS_FIELD_TYPE_U32;
+ return (field >> 13) & 0x3 ;
+}
+
+static inline int vmcs_field_readonly(unsigned long field)
+{
+ return (((field >> 10) & 0x3) == 1);
+}
+
+static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
+ unsigned long field, u64 *ret)
+{
+ short offset = vmcs_field_to_offset(field);
+ char *p;
+
+ if (offset < 0)
+ return 0;
+
+ p = ((char *)(get_vmcs12_fields(vcpu))) + offset;
+
+ switch (vmcs_field_type(field)) {
+ case VMCS_FIELD_TYPE_ULONG:
+ *ret = *((unsigned long *)p);
+ return 1;
+ case VMCS_FIELD_TYPE_U16:
+ *ret = (u16) *((unsigned long *)p);
+ return 1;
+ case VMCS_FIELD_TYPE_U32:
+ *ret = (u32) *((unsigned long *)p);
+ return 1;
+ case VMCS_FIELD_TYPE_U64:
+ *ret = *((u64 *)p);
+ return 1;
+ default:
+ return 0; /* can never happen. */
+ }
+}
+
+static int handle_vmread(struct kvm_vcpu *vcpu)
+{
+ unsigned long field;
+ u64 field_value;
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ gva_t gva = 0;
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ /* decode instruction info and find the field to read */
+ field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+ if (!vmcs12_read_any(vcpu, field, &field_value)) {
+ nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+
+ /*
+ * and now check if reuqest to put the value in register or memory.
+ * Note that the number of bits actually written is 32 or 64 depending
+ * in the mode, not on the given field's length.
+ */
+ if (vmx_instruction_info & (1u << 10)) {
+ kvm_register_write(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
+ field_value);
+ } else {
+ if (get_vmx_mem_address(vcpu, exit_qualification,
+ vmx_instruction_info, &gva))
+ return 1;
+ /* ok to use *_system, because handle_vmread verified cpl=0 */
+ kvm_write_guest_virt_system(gva, &field_value,
+ (is_long_mode(vcpu) ? 8 : 4), vcpu, NULL);
+ }
+
+ nested_vmx_succeed(vcpu);
+ skip_emulated_instruction(vcpu);
+ return 1;
+}
+
+
+static int handle_vmwrite(struct kvm_vcpu *vcpu)
+{
+ unsigned long field;
+ u64 field_value = 0;
+ gva_t gva;
+ int field_type;
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ char *p;
+ short offset;
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ if (vmx_instruction_info & (1u << 10))
+ field_value = kvm_register_read(vcpu,
+ (((vmx_instruction_info) >> 3) & 0xf));
+ else {
+ if (get_vmx_mem_address(vcpu, exit_qualification,
+ vmx_instruction_info, &gva))
+ return 1;
+ if (kvm_read_guest_virt(gva, &field_value,
+ (is_long_mode(vcpu) ? 8 : 4), vcpu, NULL)) {
+ kvm_queue_exception(vcpu, PF_VECTOR);
+ return 1;
+ }
+ }
+
+
+ field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+
+ if (vmcs_field_readonly(field)) {
+ nested_vmx_failValid(vcpu,
+ VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+
+ field_type = vmcs_field_type(field);
+
+ offset = vmcs_field_to_offset(field);
+ if (offset < 0) {
+ nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+ p = ((char *) get_vmcs12_fields(vcpu)) + offset;
+
+ switch (field_type) {
+ case VMCS_FIELD_TYPE_U16:
+ *(u16 *)p = field_value;
+ break;
+ case VMCS_FIELD_TYPE_U32:
+ *(u32 *)p = field_value;
+ break;
+ case VMCS_FIELD_TYPE_U64:
+#ifdef CONFIG_X86_64
+ *(unsigned long *)p = field_value;
+#else
+ *(unsigned long *)p = field_value;
+ *(((unsigned long *)p)+1) = field_value >> 32;
+#endif
+ break;
+ case VMCS_FIELD_TYPE_ULONG:
+ *(unsigned long *)p = field_value;
+ break;
+ default:
+ nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+
+ nested_vmx_succeed(vcpu);
+ skip_emulated_instruction(vcpu);
+ return 1;
+}
+
/* Emulate the VMPTRLD instruction */
static int handle_vmptrld(struct kvm_vcpu *vcpu)
{
@@ -4580,9 +4747,9 @@ static int (*kvm_vmx_exit_handlers[])(st
[EXIT_REASON_VMLAUNCH] = handle_vmx_insn,
[EXIT_REASON_VMPTRLD] = handle_vmptrld,
[EXIT_REASON_VMPTRST] = handle_vmptrst,
- [EXIT_REASON_VMREAD] = handle_vmx_insn,
+ [EXIT_REASON_VMREAD] = handle_vmread,
[EXIT_REASON_VMRESUME] = handle_vmx_insn,
- [EXIT_REASON_VMWRITE] = handle_vmx_insn,
+ [EXIT_REASON_VMWRITE] = handle_vmwrite,
[EXIT_REASON_VMOFF] = handle_vmoff,
[EXIT_REASON_VMON] = handle_vmon,
[EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,