From patchwork Wed Aug 4 16:09:07 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nadav Har'El X-Patchwork-Id: 117079 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter.kernel.org (8.14.4/8.14.3) with ESMTP id o74G9FGf009748 for ; Wed, 4 Aug 2010 16:09:16 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754804Ab0HDQJN (ORCPT ); Wed, 4 Aug 2010 12:09:13 -0400 Received: from mailgw13.technion.ac.il ([132.68.225.13]:57692 "EHLO mailgw13.technion.ac.il" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751440Ab0HDQJM (ORCPT ); Wed, 4 Aug 2010 12:09:12 -0400 X-IronPort-Anti-Spam-Filtered: true X-IronPort-Anti-Spam-Result: AvsEAPMtWUyERHMG/2dsb2JhbACgGXHFaQKFOQSEGYUJ X-IronPort-AV: E=Sophos;i="4.55,316,1278277200"; d="scan'208";a="769703" Received: from fermat.math.technion.ac.il ([132.68.115.6]) by mailgw13.technion.ac.il with ESMTP; 04 Aug 2010 19:09:08 +0300 Received: from fermat.math.technion.ac.il (localhost [127.0.0.1]) by fermat.math.technion.ac.il (8.12.10/8.12.10) with ESMTP id o74G97cQ025362; Wed, 4 Aug 2010 19:09:07 +0300 (IDT) Received: (from nyh@localhost) by fermat.math.technion.ac.il (8.12.10/8.12.10/Submit) id o74G97QQ025361; Wed, 4 Aug 2010 19:09:07 +0300 (IDT) X-Authentication-Warning: fermat.math.technion.ac.il: nyh set sender to nyh@math.technion.ac.il using -f Date: Wed, 4 Aug 2010 19:09:07 +0300 From: "Nadav Har'El" To: Avi Kivity Cc: kvm@vger.kernel.org Subject: Re: [PATCH 13/24] Implement VMREAD and VMWRITE Message-ID: <20100804160907.GC15156@fermat.math.technion.ac.il> References: <1276431753-nyh@il.ibm.com> <201006131229.o5DCTDhk013030@rice.haifa.ibm.com> <4C15F802.7060106@redhat.com> Mime-Version: 1.0 Content-Disposition: inline In-Reply-To: <4C15F802.7060106@redhat.com> User-Agent: Mutt/1.4.2.2i Hebrew-Date: 24 Av 5770 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter.kernel.org [140.211.167.41]); Wed, 04 Aug 2010 16:09:16 +0000 (UTC) --- .before/arch/x86/kvm/vmx.c 2010-08-04 19:07:21.000000000 +0300 +++ .after/arch/x86/kvm/vmx.c 2010-08-04 19:07:21.000000000 +0300 @@ -4180,6 +4180,184 @@ static int handle_vmclear(struct kvm_vcp return 1; } +enum vmcs_field_type { + VMCS_FIELD_TYPE_U16 = 0, + VMCS_FIELD_TYPE_U64 = 1, + VMCS_FIELD_TYPE_U32 = 2, + VMCS_FIELD_TYPE_ULONG = 3 +}; + +static inline int vmcs_field_type(unsigned long field) +{ + if (0x1 & field) /* one of the *_HIGH fields, all are 32 bit */ + return VMCS_FIELD_TYPE_U32; + return (field >> 13) & 0x3 ; +} + +static inline int vmcs_field_size(int field_type, struct kvm_vcpu *vcpu) +{ + switch (field_type) { + case VMCS_FIELD_TYPE_U16: + return 2; + case VMCS_FIELD_TYPE_U32: + return 4; + case VMCS_FIELD_TYPE_U64: + return 8; + case VMCS_FIELD_TYPE_ULONG: + return is_long_mode(vcpu) ? 8 : 4; + } + BUG(); /* can never happen */ +} + +static inline int vmcs_field_readonly(unsigned long field) +{ + return (((field >> 10) & 0x3) == 1); +} + +static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu, + unsigned long field, u64 *ret) +{ + short offset = vmcs_field_to_offset(field); + char *p; + + if (offset < 0) + return 0; + + p = ((char *)(get_vmcs12_fields(vcpu))) + offset; + + switch (vmcs_field_type(field)) { + case VMCS_FIELD_TYPE_ULONG: + *ret = *((unsigned long *)p); + return 1; + case VMCS_FIELD_TYPE_U16: + *ret = (u16) *((unsigned long *)p); + return 1; + case VMCS_FIELD_TYPE_U32: + *ret = (u32) *((unsigned long *)p); + return 1; + case VMCS_FIELD_TYPE_U64: + *ret = *((u64 *)p); + return 1; + default: + return 0; /* can never happen. */ + } +} + +static int handle_vmread(struct kvm_vcpu *vcpu) +{ + unsigned long field; + u64 field_value; + unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); + gva_t gva = 0; + + if (!nested_vmx_check_permission(vcpu)) + return 1; + + /* decode instruction info and find the field to read */ + field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); + if(!vmcs12_read_any(vcpu, field, &field_value)){ + nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); + skip_emulated_instruction(vcpu); + return 1; + } + + /* + * and now check if reuqest to put the value in register or memory. + * Note that the number of bits actually written is 32 or 64 depending + * in the mode, not on the given field's length. + */ + if (vmx_instruction_info & (1u << 10)) { + kvm_register_write(vcpu, (((vmx_instruction_info) >> 3) & 0xf), + field_value); + } else { + if (get_vmx_mem_address(vcpu, exit_qualification, + vmx_instruction_info, &gva)) + return 1; + /* ok to use *_system, because handle_vmread verified cpl=0 */ + kvm_write_guest_virt_system(gva, &field_value, + (is_long_mode(vcpu) ? 8 : 4), vcpu, NULL); + } + + nested_vmx_succeed(vcpu); + skip_emulated_instruction(vcpu); + return 1; +} + + +static int handle_vmwrite(struct kvm_vcpu *vcpu) +{ + unsigned long field; + u64 field_value = 0; + gva_t gva; + int field_type; + unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); + char *p; + short offset; + + if (!nested_vmx_check_permission(vcpu)) + return 1; + + field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); + + if (vmcs_field_readonly(field)) { + nested_vmx_failValid(vcpu, + VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); + skip_emulated_instruction(vcpu); + return 1; + } + + field_type = vmcs_field_type(field); + + offset = vmcs_field_to_offset(field); + if (offset < 0) { + nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); + skip_emulated_instruction(vcpu); + return 1; + } + p = ((char *) get_vmcs12_fields(vcpu)) + offset; + + if (vmx_instruction_info & (1u << 10)) + field_value = kvm_register_read(vcpu, + (((vmx_instruction_info) >> 3) & 0xf)); + else { + if (get_vmx_mem_address(vcpu, exit_qualification, + vmx_instruction_info, &gva)) + return 1; + kvm_read_guest_virt(gva, &field_value, + vmcs_field_size(field_type, vcpu), vcpu, NULL); + } + + switch (field_type) { + case VMCS_FIELD_TYPE_U16: + *(u16 *)p = field_value; + break; + case VMCS_FIELD_TYPE_U32: + *(u32 *)p = field_value; + break; + case VMCS_FIELD_TYPE_U64: +#ifdef CONFIG_X86_64 + *(unsigned long *)p = field_value; +#else + *(unsigned long *)p = field_value; + *(((unsigned long *)p)+1) = field_value >> 32; +#endif + break; + case VMCS_FIELD_TYPE_ULONG: + *(unsigned long *)p = field_value; + break; + default: + nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); + skip_emulated_instruction(vcpu); + return 1; + } + + nested_vmx_succeed(vcpu); + skip_emulated_instruction(vcpu); + return 1; +} + static bool verify_vmcs12_revision(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { if (vmcs12->revision_id == VMCS12_REVISION) @@ -4546,9 +4724,9 @@ static int (*kvm_vmx_exit_handlers[])(st [EXIT_REASON_VMLAUNCH] = handle_vmx_insn, [EXIT_REASON_VMPTRLD] = handle_vmptrld, [EXIT_REASON_VMPTRST] = handle_vmptrst, - [EXIT_REASON_VMREAD] = handle_vmx_insn, + [EXIT_REASON_VMREAD] = handle_vmread, [EXIT_REASON_VMRESUME] = handle_vmx_insn, - [EXIT_REASON_VMWRITE] = handle_vmx_insn, + [EXIT_REASON_VMWRITE] = handle_vmwrite, [EXIT_REASON_VMOFF] = handle_vmoff, [EXIT_REASON_VMON] = handle_vmon, [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,