@@ -4180,6 +4180,184 @@ static int handle_vmclear(struct kvm_vcp
return 1;
}
+enum vmcs_field_type {
+ VMCS_FIELD_TYPE_U16 = 0,
+ VMCS_FIELD_TYPE_U64 = 1,
+ VMCS_FIELD_TYPE_U32 = 2,
+ VMCS_FIELD_TYPE_ULONG = 3
+};
+
+static inline int vmcs_field_type(unsigned long field)
+{
+ if (0x1 & field) /* one of the *_HIGH fields, all are 32 bit */
+ return VMCS_FIELD_TYPE_U32;
+ return (field >> 13) & 0x3 ;
+}
+
+static inline int vmcs_field_size(int field_type, struct kvm_vcpu *vcpu)
+{
+ switch (field_type) {
+ case VMCS_FIELD_TYPE_U16:
+ return 2;
+ case VMCS_FIELD_TYPE_U32:
+ return 4;
+ case VMCS_FIELD_TYPE_U64:
+ return 8;
+ case VMCS_FIELD_TYPE_ULONG:
+ return is_long_mode(vcpu) ? 8 : 4;
+ }
+ BUG(); /* can never happen */
+}
+
+static inline int vmcs_field_readonly(unsigned long field)
+{
+ return (((field >> 10) & 0x3) == 1);
+}
+
+static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
+ unsigned long field, u64 *ret)
+{
+ short offset = vmcs_field_to_offset(field);
+ char *p;
+
+ if (offset < 0)
+ return 0;
+
+ p = ((char *)(get_vmcs12_fields(vcpu))) + offset;
+
+ switch (vmcs_field_type(field)) {
+ case VMCS_FIELD_TYPE_ULONG:
+ *ret = *((unsigned long *)p);
+ return 1;
+ case VMCS_FIELD_TYPE_U16:
+ *ret = (u16) *((unsigned long *)p);
+ return 1;
+ case VMCS_FIELD_TYPE_U32:
+ *ret = (u32) *((unsigned long *)p);
+ return 1;
+ case VMCS_FIELD_TYPE_U64:
+ *ret = *((u64 *)p);
+ return 1;
+ default:
+ return 0; /* can never happen. */
+ }
+}
+
+static int handle_vmread(struct kvm_vcpu *vcpu)
+{
+ unsigned long field;
+ u64 field_value;
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ gva_t gva = 0;
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ /* decode instruction info and find the field to read */
+ field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+ if(!vmcs12_read_any(vcpu, field, &field_value)){
+ nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+
+ /*
+ * and now check if reuqest to put the value in register or memory.
+ * Note that the number of bits actually written is 32 or 64 depending
+ * in the mode, not on the given field's length.
+ */
+ if (vmx_instruction_info & (1u << 10)) {
+ kvm_register_write(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
+ field_value);
+ } else {
+ if (get_vmx_mem_address(vcpu, exit_qualification,
+ vmx_instruction_info, &gva))
+ return 1;
+ /* ok to use *_system, because handle_vmread verified cpl=0 */
+ kvm_write_guest_virt_system(gva, &field_value,
+ (is_long_mode(vcpu) ? 8 : 4), vcpu, NULL);
+ }
+
+ nested_vmx_succeed(vcpu);
+ skip_emulated_instruction(vcpu);
+ return 1;
+}
+
+
+static int handle_vmwrite(struct kvm_vcpu *vcpu)
+{
+ unsigned long field;
+ u64 field_value = 0;
+ gva_t gva;
+ int field_type;
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ char *p;
+ short offset;
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+
+ if (vmcs_field_readonly(field)) {
+ nested_vmx_failValid(vcpu,
+ VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+
+ field_type = vmcs_field_type(field);
+
+ offset = vmcs_field_to_offset(field);
+ if (offset < 0) {
+ nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+ p = ((char *) get_vmcs12_fields(vcpu)) + offset;
+
+ if (vmx_instruction_info & (1u << 10))
+ field_value = kvm_register_read(vcpu,
+ (((vmx_instruction_info) >> 3) & 0xf));
+ else {
+ if (get_vmx_mem_address(vcpu, exit_qualification,
+ vmx_instruction_info, &gva))
+ return 1;
+ kvm_read_guest_virt(gva, &field_value,
+ vmcs_field_size(field_type, vcpu), vcpu, NULL);
+ }
+
+ switch (field_type) {
+ case VMCS_FIELD_TYPE_U16:
+ *(u16 *)p = field_value;
+ break;
+ case VMCS_FIELD_TYPE_U32:
+ *(u32 *)p = field_value;
+ break;
+ case VMCS_FIELD_TYPE_U64:
+#ifdef CONFIG_X86_64
+ *(unsigned long *)p = field_value;
+#else
+ *(unsigned long *)p = field_value;
+ *(((unsigned long *)p)+1) = field_value >> 32;
+#endif
+ break;
+ case VMCS_FIELD_TYPE_ULONG:
+ *(unsigned long *)p = field_value;
+ break;
+ default:
+ nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+
+ nested_vmx_succeed(vcpu);
+ skip_emulated_instruction(vcpu);
+ return 1;
+}
+
static bool verify_vmcs12_revision(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
{
if (vmcs12->revision_id == VMCS12_REVISION)
@@ -4546,9 +4724,9 @@ static int (*kvm_vmx_exit_handlers[])(st
[EXIT_REASON_VMLAUNCH] = handle_vmx_insn,
[EXIT_REASON_VMPTRLD] = handle_vmptrld,
[EXIT_REASON_VMPTRST] = handle_vmptrst,
- [EXIT_REASON_VMREAD] = handle_vmx_insn,
+ [EXIT_REASON_VMREAD] = handle_vmread,
[EXIT_REASON_VMRESUME] = handle_vmx_insn,
- [EXIT_REASON_VMWRITE] = handle_vmx_insn,
+ [EXIT_REASON_VMWRITE] = handle_vmwrite,
[EXIT_REASON_VMOFF] = handle_vmoff,
[EXIT_REASON_VMON] = handle_vmon,
[EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,