diff mbox

[1/4] nested vmx: clean up for vmcs12 read and write

Message ID 1353488677-29937-2-git-send-email-dongxiao.xu@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Xu, Dongxiao Nov. 21, 2012, 9:04 a.m. UTC
abstract vmcs12_read and vmcs12_write functions to do the vmcs12
read/write operations.

Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
---
 arch/x86/kvm/vmx.c |   86 +++++++++++++++++++++++++++-------------------------
 1 files changed, 45 insertions(+), 41 deletions(-)

Comments

Gleb Natapov Nov. 21, 2012, 1:04 p.m. UTC | #1
On Wed, Nov 21, 2012 at 05:04:34PM +0800, Dongxiao Xu wrote:
> abstract vmcs12_read and vmcs12_write functions to do the vmcs12
> read/write operations.
> 
> Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
> ---
>  arch/x86/kvm/vmx.c |   86 +++++++++++++++++++++++++++-------------------------
>  1 files changed, 45 insertions(+), 41 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index f858159..d8670e4 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -5407,32 +5407,67 @@ static inline int vmcs_field_readonly(unsigned long field)
>   * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
>   * 64-bit fields are to be returned).
>   */
> -static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
> -					unsigned long field, u64 *ret)
> +static inline u64 vmcs12_read(struct kvm_vcpu *vcpu, unsigned long field)
>  {
>  	short offset = vmcs_field_to_offset(field);
>  	char *p;
>  
> -	if (offset < 0)
> +	if (offset < 0) {
> +		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> +		skip_emulated_instruction(vcpu);
>  		return 0;
> +	}
>  
>  	p = ((char *)(get_vmcs12(vcpu))) + offset;
>  
>  	switch (vmcs_field_type(field)) {
>  	case VMCS_FIELD_TYPE_NATURAL_WIDTH:
> -		*ret = *((natural_width *)p);
> +		return *((natural_width *)p);
> +	case VMCS_FIELD_TYPE_U16:
> +		return *((u16 *)p);
> +	case VMCS_FIELD_TYPE_U32:
> +		return *((u32 *)p);
> +	case VMCS_FIELD_TYPE_U64:
> +		return *((u64 *)p);
> +	default:
> +		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> +		skip_emulated_instruction(vcpu);
> +		return 0; /* can never happen. */
> +	}
> +}
> +
> +static inline int vmcs12_write(struct kvm_vcpu *vcpu,
> +			unsigned long field,
> +			u64 value)
> +{
> +	short offset = vmcs_field_to_offset(field);
> +	char *p;
> +
> +	if (offset < 0) {
> +		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> +		skip_emulated_instruction(vcpu);
> +		return 0;
> +	}
> +
Shouldn't vmcs_field_readonly() check be in vmcs12_write() instead of a
caller?

> +	p = ((char *)(get_vmcs12(vcpu))) + offset;
> +
> +	switch (vmcs_field_type(field)) {
> +	case VMCS_FIELD_TYPE_NATURAL_WIDTH:
> +		*(natural_width *)p = value;
>  		return 1;
>  	case VMCS_FIELD_TYPE_U16:
> -		*ret = *((u16 *)p);
> +		*(u16 *)p = value;
>  		return 1;
>  	case VMCS_FIELD_TYPE_U32:
> -		*ret = *((u32 *)p);
> +		*(u32 *)p = value;
>  		return 1;
>  	case VMCS_FIELD_TYPE_U64:
> -		*ret = *((u64 *)p);
> +		*(u64 *)p = value;
>  		return 1;
>  	default:
> -		return 0; /* can never happen. */
> +		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> +		skip_emulated_instruction(vcpu);
> +		return 0;
>  	}
>  }
>  
> @@ -5466,11 +5501,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
>  	/* Decode instruction info and find the field to read */
>  	field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
>  	/* Read the field, zero-extended to a u64 field_value */
> -	if (!vmcs12_read_any(vcpu, field, &field_value)) {
> -		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> -		skip_emulated_instruction(vcpu);
> -		return 1;
> -	}
> +	field_value = vmcs12_read(vcpu, field);
>  	/*
>  	 * Now copy part of this value to register or memory, as requested.
>  	 * Note that the number of bits actually copied is 32 or 64 depending
> @@ -5500,8 +5531,6 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
>  	gva_t gva;
>  	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
>  	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
> -	char *p;
> -	short offset;
>  	/* The value to write might be 32 or 64 bits, depending on L1's long
>  	 * mode, and eventually we need to write that into a field of several
>  	 * possible lengths. The code below first zero-extends the value to 64
> @@ -5537,33 +5566,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
>  		skip_emulated_instruction(vcpu);
>  		return 1;
>  	}
> -
> -	offset = vmcs_field_to_offset(field);
> -	if (offset < 0) {
> -		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> -		skip_emulated_instruction(vcpu);
> -		return 1;
> -	}
> -	p = ((char *) get_vmcs12(vcpu)) + offset;
> -
> -	switch (vmcs_field_type(field)) {
> -	case VMCS_FIELD_TYPE_U16:
> -		*(u16 *)p = field_value;
> -		break;
> -	case VMCS_FIELD_TYPE_U32:
> -		*(u32 *)p = field_value;
> -		break;
> -	case VMCS_FIELD_TYPE_U64:
> -		*(u64 *)p = field_value;
> -		break;
> -	case VMCS_FIELD_TYPE_NATURAL_WIDTH:
> -		*(natural_width *)p = field_value;
> -		break;
> -	default:
> -		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> -		skip_emulated_instruction(vcpu);
> +	if (!vmcs12_write(vcpu, field, field_value))
>  		return 1;
> -	}
>  
>  	nested_vmx_succeed(vcpu);
>  	skip_emulated_instruction(vcpu);
> -- 
> 1.7.1

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Gleb Natapov Nov. 21, 2012, 1:27 p.m. UTC | #2
On Wed, Nov 21, 2012 at 05:04:34PM +0800, Dongxiao Xu wrote:
> abstract vmcs12_read and vmcs12_write functions to do the vmcs12
> read/write operations.
> 
> Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
> ---
>  arch/x86/kvm/vmx.c |   86 +++++++++++++++++++++++++++-------------------------
>  1 files changed, 45 insertions(+), 41 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index f858159..d8670e4 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -5407,32 +5407,67 @@ static inline int vmcs_field_readonly(unsigned long field)
>   * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
>   * 64-bit fields are to be returned).
>   */
> -static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
> -					unsigned long field, u64 *ret)
> +static inline u64 vmcs12_read(struct kvm_vcpu *vcpu, unsigned long field)
>  {
>  	short offset = vmcs_field_to_offset(field);
>  	char *p;
>  
> -	if (offset < 0)
> +	if (offset < 0) {
> +		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> +		skip_emulated_instruction(vcpu);
>  		return 0;
> +	}
>  
>  	p = ((char *)(get_vmcs12(vcpu))) + offset;
>  
>  	switch (vmcs_field_type(field)) {
>  	case VMCS_FIELD_TYPE_NATURAL_WIDTH:
> -		*ret = *((natural_width *)p);
> +		return *((natural_width *)p);
> +	case VMCS_FIELD_TYPE_U16:
> +		return *((u16 *)p);
> +	case VMCS_FIELD_TYPE_U32:
> +		return *((u32 *)p);
> +	case VMCS_FIELD_TYPE_U64:
> +		return *((u64 *)p);
> +	default:
> +		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> +		skip_emulated_instruction(vcpu);
> +		return 0; /* can never happen. */
> +	}
> +}
> +
> +static inline int vmcs12_write(struct kvm_vcpu *vcpu,
> +			unsigned long field,
> +			u64 value)
> +{
> +	short offset = vmcs_field_to_offset(field);
> +	char *p;
> +
> +	if (offset < 0) {
> +		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> +		skip_emulated_instruction(vcpu);
> +		return 0;
> +	}
> +
> +	p = ((char *)(get_vmcs12(vcpu))) + offset;
> +
> +	switch (vmcs_field_type(field)) {
> +	case VMCS_FIELD_TYPE_NATURAL_WIDTH:
> +		*(natural_width *)p = value;
>  		return 1;
>  	case VMCS_FIELD_TYPE_U16:
> -		*ret = *((u16 *)p);
> +		*(u16 *)p = value;
>  		return 1;
>  	case VMCS_FIELD_TYPE_U32:
> -		*ret = *((u32 *)p);
> +		*(u32 *)p = value;
>  		return 1;
>  	case VMCS_FIELD_TYPE_U64:
> -		*ret = *((u64 *)p);
> +		*(u64 *)p = value;
>  		return 1;
>  	default:
> -		return 0; /* can never happen. */
> +		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> +		skip_emulated_instruction(vcpu);
> +		return 0;
>  	}
>  }
>  
> @@ -5466,11 +5501,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
>  	/* Decode instruction info and find the field to read */
>  	field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
>  	/* Read the field, zero-extended to a u64 field_value */
> -	if (!vmcs12_read_any(vcpu, field, &field_value)) {
> -		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> -		skip_emulated_instruction(vcpu);
> -		return 1;
> -	}
> +	field_value = vmcs12_read(vcpu, field);
You do not handle failure here and always write back field_value even if
vmcs12_read() failed. Actually now it is impossible to detect a
failure. Call to nested_vmx_failValid() in vmcs12_read() will be
overwritten by call to nested_vmx_succeed() at the end of
handle_vmread() and skip_emulated_instruction() will be called twice.

> +             skip_emulated_instruction(vcpu);

>  	/*
>  	 * Now copy part of this value to register or memory, as requested.
>  	 * Note that the number of bits actually copied is 32 or 64 depending
> @@ -5500,8 +5531,6 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
>  	gva_t gva;
>  	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
>  	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
> -	char *p;
> -	short offset;
>  	/* The value to write might be 32 or 64 bits, depending on L1's long
>  	 * mode, and eventually we need to write that into a field of several
>  	 * possible lengths. The code below first zero-extends the value to 64
> @@ -5537,33 +5566,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
>  		skip_emulated_instruction(vcpu);
>  		return 1;
>  	}
> -
> -	offset = vmcs_field_to_offset(field);
> -	if (offset < 0) {
> -		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> -		skip_emulated_instruction(vcpu);
> -		return 1;
> -	}
> -	p = ((char *) get_vmcs12(vcpu)) + offset;
> -
> -	switch (vmcs_field_type(field)) {
> -	case VMCS_FIELD_TYPE_U16:
> -		*(u16 *)p = field_value;
> -		break;
> -	case VMCS_FIELD_TYPE_U32:
> -		*(u32 *)p = field_value;
> -		break;
> -	case VMCS_FIELD_TYPE_U64:
> -		*(u64 *)p = field_value;
> -		break;
> -	case VMCS_FIELD_TYPE_NATURAL_WIDTH:
> -		*(natural_width *)p = field_value;
> -		break;
> -	default:
> -		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> -		skip_emulated_instruction(vcpu);
> +	if (!vmcs12_write(vcpu, field, field_value))
>  		return 1;
> -	}
>  
>  	nested_vmx_succeed(vcpu);
>  	skip_emulated_instruction(vcpu);
> -- 
> 1.7.1

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Orit Wasserman Nov. 21, 2012, 1:38 p.m. UTC | #3
On 11/21/2012 11:04 AM, Dongxiao Xu wrote:
> abstract vmcs12_read and vmcs12_write functions to do the vmcs12
> read/write operations.
> 
> Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
> ---
>  arch/x86/kvm/vmx.c |   86 +++++++++++++++++++++++++++-------------------------
>  1 files changed, 45 insertions(+), 41 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index f858159..d8670e4 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -5407,32 +5407,67 @@ static inline int vmcs_field_readonly(unsigned long field)
>   * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
>   * 64-bit fields are to be returned).
>   */
> -static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
> -					unsigned long field, u64 *ret)
> +static inline u64 vmcs12_read(struct kvm_vcpu *vcpu, unsigned long field)
>  {
>  	short offset = vmcs_field_to_offset(field);
>  	char *p;
>  
> -	if (offset < 0)
> +	if (offset < 0) {
> +		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> +		skip_emulated_instruction(vcpu);
>  		return 0;
> +	}
>  
>  	p = ((char *)(get_vmcs12(vcpu))) + offset;
>  
>  	switch (vmcs_field_type(field)) {
>  	case VMCS_FIELD_TYPE_NATURAL_WIDTH:
> -		*ret = *((natural_width *)p);
> +		return *((natural_width *)p);
> +	case VMCS_FIELD_TYPE_U16:
> +		return *((u16 *)p);
> +	case VMCS_FIELD_TYPE_U32:
> +		return *((u32 *)p);
> +	case VMCS_FIELD_TYPE_U64:
> +		return *((u64 *)p);
> +	default:
> +		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> +		skip_emulated_instruction(vcpu);
> +		return 0; /* can never happen. */
> +	}
> +}
> +
> +static inline int vmcs12_write(struct kvm_vcpu *vcpu,
> +			unsigned long field,
> +			u64 value)
> +{
> +	short offset = vmcs_field_to_offset(field);
> +	char *p;
> +
> +	if (offset < 0) {
> +		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> +		skip_emulated_instruction(vcpu);
> +		return 0;
> +	}
> +
> +	p = ((char *)(get_vmcs12(vcpu))) + offset;
> +
> +	switch (vmcs_field_type(field)) {
> +	case VMCS_FIELD_TYPE_NATURAL_WIDTH:
> +		*(natural_width *)p = value;
>  		return 1;
>  	case VMCS_FIELD_TYPE_U16:
> -		*ret = *((u16 *)p);
> +		*(u16 *)p = value;
>  		return 1;
>  	case VMCS_FIELD_TYPE_U32:
> -		*ret = *((u32 *)p);
> +		*(u32 *)p = value;
>  		return 1;
>  	case VMCS_FIELD_TYPE_U64:
> -		*ret = *((u64 *)p);
> +		*(u64 *)p = value;
>  		return 1;
>  	default:
> -		return 0; /* can never happen. */
> +		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> +		skip_emulated_instruction(vcpu);
> +		return 0;
>  	}
>  }
>  
> @@ -5466,11 +5501,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
>  	/* Decode instruction info and find the field to read */
>  	field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
>  	/* Read the field, zero-extended to a u64 field_value */
> -	if (!vmcs12_read_any(vcpu, field, &field_value)) {
> -		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> -		skip_emulated_instruction(vcpu);
> -		return 1;
> -	}
> +	field_value = vmcs12_read(vcpu, field);
If you had an error here, you will still continue running the function,
you will override the failure (by calling nested_vmx_succeed) and skip emulated instruction twice.
You need to detect there was an error and return (like in vmwrite),

Orit
>  	/*
>  	 * Now copy part of this value to register or memory, as requested.
>  	 * Note that the number of bits actually copied is 32 or 64 depending
> @@ -5500,8 +5531,6 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
>  	gva_t gva;
>  	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
>  	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
> -	char *p;
> -	short offset;
>  	/* The value to write might be 32 or 64 bits, depending on L1's long
>  	 * mode, and eventually we need to write that into a field of several
>  	 * possible lengths. The code below first zero-extends the value to 64
> @@ -5537,33 +5566,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
>  		skip_emulated_instruction(vcpu);
>  		return 1;
>  	}
> -
> -	offset = vmcs_field_to_offset(field);
> -	if (offset < 0) {
> -		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> -		skip_emulated_instruction(vcpu);
> -		return 1;
> -	}
> -	p = ((char *) get_vmcs12(vcpu)) + offset;
> -
> -	switch (vmcs_field_type(field)) {
> -	case VMCS_FIELD_TYPE_U16:
> -		*(u16 *)p = field_value;
> -		break;
> -	case VMCS_FIELD_TYPE_U32:
> -		*(u32 *)p = field_value;
> -		break;
> -	case VMCS_FIELD_TYPE_U64:
> -		*(u64 *)p = field_value;
> -		break;
> -	case VMCS_FIELD_TYPE_NATURAL_WIDTH:
> -		*(natural_width *)p = field_value;
> -		break;
> -	default:
> -		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> -		skip_emulated_instruction(vcpu);
> +	if (!vmcs12_write(vcpu, field, field_value))
>  		return 1;
> -	}
>  
>  	nested_vmx_succeed(vcpu);
>  	skip_emulated_instruction(vcpu);
> 

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Xu, Dongxiao Nov. 22, 2012, 1:07 a.m. UTC | #4
> -----Original Message-----
> From: Gleb Natapov [mailto:gleb@redhat.com]
> Sent: Wednesday, November 21, 2012 9:04 PM
> To: Xu, Dongxiao
> Cc: kvm@vger.kernel.org; mtosatti@redhat.com
> Subject: Re: [PATCH 1/4] nested vmx: clean up for vmcs12 read and write
> 
> On Wed, Nov 21, 2012 at 05:04:34PM +0800, Dongxiao Xu wrote:
> > abstract vmcs12_read and vmcs12_write functions to do the vmcs12
> > read/write operations.
> >
> > Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
> > ---
> >  arch/x86/kvm/vmx.c |   86
> +++++++++++++++++++++++++++-------------------------
> >  1 files changed, 45 insertions(+), 41 deletions(-)
> >
> > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index
> > f858159..d8670e4 100644
> > --- a/arch/x86/kvm/vmx.c
> > +++ b/arch/x86/kvm/vmx.c
> > @@ -5407,32 +5407,67 @@ static inline int vmcs_field_readonly(unsigned
> long field)
> >   * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
> >   * 64-bit fields are to be returned).
> >   */
> > -static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
> > -					unsigned long field, u64 *ret)
> > +static inline u64 vmcs12_read(struct kvm_vcpu *vcpu, unsigned long
> > +field)
> >  {
> >  	short offset = vmcs_field_to_offset(field);
> >  	char *p;
> >
> > -	if (offset < 0)
> > +	if (offset < 0) {
> > +		nested_vmx_failValid(vcpu,
> VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> > +		skip_emulated_instruction(vcpu);
> >  		return 0;
> > +	}
> >
> >  	p = ((char *)(get_vmcs12(vcpu))) + offset;
> >
> >  	switch (vmcs_field_type(field)) {
> >  	case VMCS_FIELD_TYPE_NATURAL_WIDTH:
> > -		*ret = *((natural_width *)p);
> > +		return *((natural_width *)p);
> > +	case VMCS_FIELD_TYPE_U16:
> > +		return *((u16 *)p);
> > +	case VMCS_FIELD_TYPE_U32:
> > +		return *((u32 *)p);
> > +	case VMCS_FIELD_TYPE_U64:
> > +		return *((u64 *)p);
> > +	default:
> > +		nested_vmx_failValid(vcpu,
> VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> > +		skip_emulated_instruction(vcpu);
> > +		return 0; /* can never happen. */
> > +	}
> > +}
> > +
> > +static inline int vmcs12_write(struct kvm_vcpu *vcpu,
> > +			unsigned long field,
> > +			u64 value)
> > +{
> > +	short offset = vmcs_field_to_offset(field);
> > +	char *p;
> > +
> > +	if (offset < 0) {
> > +		nested_vmx_failValid(vcpu,
> VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> > +		skip_emulated_instruction(vcpu);
> > +		return 0;
> > +	}
> > +
> Shouldn't vmcs_field_readonly() check be in vmcs12_write() instead of a caller?

Well, you can see from the later patch that, we construct vmcs12 through the vmcs12_write() function. Some fields like the exit reason, exit qualification are needed to be written into the vmcs12 area. Therefore we could not put the read only check into the vmcs12_write() function.

Thanks,
Dongxiao

> 
> > +	p = ((char *)(get_vmcs12(vcpu))) + offset;
> > +
> > +	switch (vmcs_field_type(field)) {
> > +	case VMCS_FIELD_TYPE_NATURAL_WIDTH:
> > +		*(natural_width *)p = value;
> >  		return 1;
> >  	case VMCS_FIELD_TYPE_U16:
> > -		*ret = *((u16 *)p);
> > +		*(u16 *)p = value;
> >  		return 1;
> >  	case VMCS_FIELD_TYPE_U32:
> > -		*ret = *((u32 *)p);
> > +		*(u32 *)p = value;
> >  		return 1;
> >  	case VMCS_FIELD_TYPE_U64:
> > -		*ret = *((u64 *)p);
> > +		*(u64 *)p = value;
> >  		return 1;
> >  	default:
> > -		return 0; /* can never happen. */
> > +		nested_vmx_failValid(vcpu,
> VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> > +		skip_emulated_instruction(vcpu);
> > +		return 0;
> >  	}
> >  }
> >
> > @@ -5466,11 +5501,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
> >  	/* Decode instruction info and find the field to read */
> >  	field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
> >  	/* Read the field, zero-extended to a u64 field_value */
> > -	if (!vmcs12_read_any(vcpu, field, &field_value)) {
> > -		nested_vmx_failValid(vcpu,
> VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> > -		skip_emulated_instruction(vcpu);
> > -		return 1;
> > -	}
> > +	field_value = vmcs12_read(vcpu, field);
> >  	/*
> >  	 * Now copy part of this value to register or memory, as requested.
> >  	 * Note that the number of bits actually copied is 32 or 64
> > depending @@ -5500,8 +5531,6 @@ static int handle_vmwrite(struct
> kvm_vcpu *vcpu)
> >  	gva_t gva;
> >  	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
> >  	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
> > -	char *p;
> > -	short offset;
> >  	/* The value to write might be 32 or 64 bits, depending on L1's long
> >  	 * mode, and eventually we need to write that into a field of several
> >  	 * possible lengths. The code below first zero-extends the value to
> > 64 @@ -5537,33 +5566,8 @@ static int handle_vmwrite(struct kvm_vcpu
> *vcpu)
> >  		skip_emulated_instruction(vcpu);
> >  		return 1;
> >  	}
> > -
> > -	offset = vmcs_field_to_offset(field);
> > -	if (offset < 0) {
> > -		nested_vmx_failValid(vcpu,
> VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> > -		skip_emulated_instruction(vcpu);
> > -		return 1;
> > -	}
> > -	p = ((char *) get_vmcs12(vcpu)) + offset;
> > -
> > -	switch (vmcs_field_type(field)) {
> > -	case VMCS_FIELD_TYPE_U16:
> > -		*(u16 *)p = field_value;
> > -		break;
> > -	case VMCS_FIELD_TYPE_U32:
> > -		*(u32 *)p = field_value;
> > -		break;
> > -	case VMCS_FIELD_TYPE_U64:
> > -		*(u64 *)p = field_value;
> > -		break;
> > -	case VMCS_FIELD_TYPE_NATURAL_WIDTH:
> > -		*(natural_width *)p = field_value;
> > -		break;
> > -	default:
> > -		nested_vmx_failValid(vcpu,
> VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> > -		skip_emulated_instruction(vcpu);
> > +	if (!vmcs12_write(vcpu, field, field_value))
> >  		return 1;
> > -	}
> >
> >  	nested_vmx_succeed(vcpu);
> >  	skip_emulated_instruction(vcpu);
> > --
> > 1.7.1
> 
> --
> 			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Xu, Dongxiao Nov. 22, 2012, 3:16 a.m. UTC | #5
> -----Original Message-----
> From: Gleb Natapov [mailto:gleb@redhat.com]
> Sent: Wednesday, November 21, 2012 9:27 PM
> To: Xu, Dongxiao
> Cc: kvm@vger.kernel.org; mtosatti@redhat.com
> Subject: Re: [PATCH 1/4] nested vmx: clean up for vmcs12 read and write
> 
> On Wed, Nov 21, 2012 at 05:04:34PM +0800, Dongxiao Xu wrote:
> > abstract vmcs12_read and vmcs12_write functions to do the vmcs12
> > read/write operations.
> >
> > Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
> > ---
> >  arch/x86/kvm/vmx.c |   86
> +++++++++++++++++++++++++++-------------------------
> >  1 files changed, 45 insertions(+), 41 deletions(-)
> >
> > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index
> > f858159..d8670e4 100644
> > --- a/arch/x86/kvm/vmx.c
> > +++ b/arch/x86/kvm/vmx.c
> > @@ -5407,32 +5407,67 @@ static inline int vmcs_field_readonly(unsigned
> long field)
> >   * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
> >   * 64-bit fields are to be returned).
> >   */
> > -static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
> > -					unsigned long field, u64 *ret)
> > +static inline u64 vmcs12_read(struct kvm_vcpu *vcpu, unsigned long
> > +field)
> >  {
> >  	short offset = vmcs_field_to_offset(field);
> >  	char *p;
> >
> > -	if (offset < 0)
> > +	if (offset < 0) {
> > +		nested_vmx_failValid(vcpu,
> VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> > +		skip_emulated_instruction(vcpu);
> >  		return 0;
> > +	}
> >
> >  	p = ((char *)(get_vmcs12(vcpu))) + offset;
> >
> >  	switch (vmcs_field_type(field)) {
> >  	case VMCS_FIELD_TYPE_NATURAL_WIDTH:
> > -		*ret = *((natural_width *)p);
> > +		return *((natural_width *)p);
> > +	case VMCS_FIELD_TYPE_U16:
> > +		return *((u16 *)p);
> > +	case VMCS_FIELD_TYPE_U32:
> > +		return *((u32 *)p);
> > +	case VMCS_FIELD_TYPE_U64:
> > +		return *((u64 *)p);
> > +	default:
> > +		nested_vmx_failValid(vcpu,
> VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> > +		skip_emulated_instruction(vcpu);
> > +		return 0; /* can never happen. */
> > +	}
> > +}
> > +
> > +static inline int vmcs12_write(struct kvm_vcpu *vcpu,
> > +			unsigned long field,
> > +			u64 value)
> > +{
> > +	short offset = vmcs_field_to_offset(field);
> > +	char *p;
> > +
> > +	if (offset < 0) {
> > +		nested_vmx_failValid(vcpu,
> VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> > +		skip_emulated_instruction(vcpu);
> > +		return 0;
> > +	}
> > +
> > +	p = ((char *)(get_vmcs12(vcpu))) + offset;
> > +
> > +	switch (vmcs_field_type(field)) {
> > +	case VMCS_FIELD_TYPE_NATURAL_WIDTH:
> > +		*(natural_width *)p = value;
> >  		return 1;
> >  	case VMCS_FIELD_TYPE_U16:
> > -		*ret = *((u16 *)p);
> > +		*(u16 *)p = value;
> >  		return 1;
> >  	case VMCS_FIELD_TYPE_U32:
> > -		*ret = *((u32 *)p);
> > +		*(u32 *)p = value;
> >  		return 1;
> >  	case VMCS_FIELD_TYPE_U64:
> > -		*ret = *((u64 *)p);
> > +		*(u64 *)p = value;
> >  		return 1;
> >  	default:
> > -		return 0; /* can never happen. */
> > +		nested_vmx_failValid(vcpu,
> VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> > +		skip_emulated_instruction(vcpu);
> > +		return 0;
> >  	}
> >  }
> >
> > @@ -5466,11 +5501,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
> >  	/* Decode instruction info and find the field to read */
> >  	field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
> >  	/* Read the field, zero-extended to a u64 field_value */
> > -	if (!vmcs12_read_any(vcpu, field, &field_value)) {
> > -		nested_vmx_failValid(vcpu,
> VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> > -		skip_emulated_instruction(vcpu);
> > -		return 1;
> > -	}
> > +	field_value = vmcs12_read(vcpu, field);
> You do not handle failure here and always write back field_value even if
> vmcs12_read() failed. Actually now it is impossible to detect a failure. Call to
> nested_vmx_failValid() in vmcs12_read() will be overwritten by call to
> nested_vmx_succeed() at the end of
> handle_vmread() and skip_emulated_instruction() will be called twice.

Thanks Gleb and Orit to raise this issue.

What about moving the offset check outside the vmcs12_read() and vmcs12_write() function, and put it directly in handle_vmread() and handle_vmwrite()?
I think we only need to do offset error check in handle_vmread() and handle_vmwrite() since they are to emulate correct behavior for guest VMM. For example, if guest VMM reads a field that is not valid or writes a field that is read only, then in emulation code handle_vmread() and handle_vmwrite, we need to raise error to guest VMM.
For other calling of vmcs12_read() and vmcs12_write() functions in KVM hypervisor (see PATCH 3/4), actually the caller needs to ensure the field should be valid. Otherwise it is a bug in KVM hypervisor itself.
Does it make sense? If so, I will revise this patch according to above.

Thanks,
Dongxiao

> 
> > +             skip_emulated_instruction(vcpu);
> 
> >  	/*
> >  	 * Now copy part of this value to register or memory, as requested.
> >  	 * Note that the number of bits actually copied is 32 or 64
> > depending @@ -5500,8 +5531,6 @@ static int handle_vmwrite(struct
> kvm_vcpu *vcpu)
> >  	gva_t gva;
> >  	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
> >  	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
> > -	char *p;
> > -	short offset;
> >  	/* The value to write might be 32 or 64 bits, depending on L1's long
> >  	 * mode, and eventually we need to write that into a field of several
> >  	 * possible lengths. The code below first zero-extends the value to
> > 64 @@ -5537,33 +5566,8 @@ static int handle_vmwrite(struct kvm_vcpu
> *vcpu)
> >  		skip_emulated_instruction(vcpu);
> >  		return 1;
> >  	}
> > -
> > -	offset = vmcs_field_to_offset(field);
> > -	if (offset < 0) {
> > -		nested_vmx_failValid(vcpu,
> VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> > -		skip_emulated_instruction(vcpu);
> > -		return 1;
> > -	}
> > -	p = ((char *) get_vmcs12(vcpu)) + offset;
> > -
> > -	switch (vmcs_field_type(field)) {
> > -	case VMCS_FIELD_TYPE_U16:
> > -		*(u16 *)p = field_value;
> > -		break;
> > -	case VMCS_FIELD_TYPE_U32:
> > -		*(u32 *)p = field_value;
> > -		break;
> > -	case VMCS_FIELD_TYPE_U64:
> > -		*(u64 *)p = field_value;
> > -		break;
> > -	case VMCS_FIELD_TYPE_NATURAL_WIDTH:
> > -		*(natural_width *)p = field_value;
> > -		break;
> > -	default:
> > -		nested_vmx_failValid(vcpu,
> VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> > -		skip_emulated_instruction(vcpu);
> > +	if (!vmcs12_write(vcpu, field, field_value))
> >  		return 1;
> > -	}
> >
> >  	nested_vmx_succeed(vcpu);
> >  	skip_emulated_instruction(vcpu);
> > --
> > 1.7.1
> 
> --
> 			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Gleb Natapov Nov. 22, 2012, 6:45 a.m. UTC | #6
On Thu, Nov 22, 2012 at 03:16:47AM +0000, Xu, Dongxiao wrote:
> 
> 
> > -----Original Message-----
> > From: Gleb Natapov [mailto:gleb@redhat.com]
> > Sent: Wednesday, November 21, 2012 9:27 PM
> > To: Xu, Dongxiao
> > Cc: kvm@vger.kernel.org; mtosatti@redhat.com
> > Subject: Re: [PATCH 1/4] nested vmx: clean up for vmcs12 read and write
> > 
> > On Wed, Nov 21, 2012 at 05:04:34PM +0800, Dongxiao Xu wrote:
> > > abstract vmcs12_read and vmcs12_write functions to do the vmcs12
> > > read/write operations.
> > >
> > > Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
> > > ---
> > >  arch/x86/kvm/vmx.c |   86
> > +++++++++++++++++++++++++++-------------------------
> > >  1 files changed, 45 insertions(+), 41 deletions(-)
> > >
> > > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index
> > > f858159..d8670e4 100644
> > > --- a/arch/x86/kvm/vmx.c
> > > +++ b/arch/x86/kvm/vmx.c
> > > @@ -5407,32 +5407,67 @@ static inline int vmcs_field_readonly(unsigned
> > long field)
> > >   * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
> > >   * 64-bit fields are to be returned).
> > >   */
> > > -static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
> > > -					unsigned long field, u64 *ret)
> > > +static inline u64 vmcs12_read(struct kvm_vcpu *vcpu, unsigned long
> > > +field)
> > >  {
> > >  	short offset = vmcs_field_to_offset(field);
> > >  	char *p;
> > >
> > > -	if (offset < 0)
> > > +	if (offset < 0) {
> > > +		nested_vmx_failValid(vcpu,
> > VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> > > +		skip_emulated_instruction(vcpu);
> > >  		return 0;
> > > +	}
> > >
> > >  	p = ((char *)(get_vmcs12(vcpu))) + offset;
> > >
> > >  	switch (vmcs_field_type(field)) {
> > >  	case VMCS_FIELD_TYPE_NATURAL_WIDTH:
> > > -		*ret = *((natural_width *)p);
> > > +		return *((natural_width *)p);
> > > +	case VMCS_FIELD_TYPE_U16:
> > > +		return *((u16 *)p);
> > > +	case VMCS_FIELD_TYPE_U32:
> > > +		return *((u32 *)p);
> > > +	case VMCS_FIELD_TYPE_U64:
> > > +		return *((u64 *)p);
> > > +	default:
> > > +		nested_vmx_failValid(vcpu,
> > VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> > > +		skip_emulated_instruction(vcpu);
> > > +		return 0; /* can never happen. */
> > > +	}
> > > +}
> > > +
> > > +static inline int vmcs12_write(struct kvm_vcpu *vcpu,
> > > +			unsigned long field,
> > > +			u64 value)
> > > +{
> > > +	short offset = vmcs_field_to_offset(field);
> > > +	char *p;
> > > +
> > > +	if (offset < 0) {
> > > +		nested_vmx_failValid(vcpu,
> > VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> > > +		skip_emulated_instruction(vcpu);
> > > +		return 0;
> > > +	}
> > > +
> > > +	p = ((char *)(get_vmcs12(vcpu))) + offset;
> > > +
> > > +	switch (vmcs_field_type(field)) {
> > > +	case VMCS_FIELD_TYPE_NATURAL_WIDTH:
> > > +		*(natural_width *)p = value;
> > >  		return 1;
> > >  	case VMCS_FIELD_TYPE_U16:
> > > -		*ret = *((u16 *)p);
> > > +		*(u16 *)p = value;
> > >  		return 1;
> > >  	case VMCS_FIELD_TYPE_U32:
> > > -		*ret = *((u32 *)p);
> > > +		*(u32 *)p = value;
> > >  		return 1;
> > >  	case VMCS_FIELD_TYPE_U64:
> > > -		*ret = *((u64 *)p);
> > > +		*(u64 *)p = value;
> > >  		return 1;
> > >  	default:
> > > -		return 0; /* can never happen. */
> > > +		nested_vmx_failValid(vcpu,
> > VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> > > +		skip_emulated_instruction(vcpu);
> > > +		return 0;
> > >  	}
> > >  }
> > >
> > > @@ -5466,11 +5501,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
> > >  	/* Decode instruction info and find the field to read */
> > >  	field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
> > >  	/* Read the field, zero-extended to a u64 field_value */
> > > -	if (!vmcs12_read_any(vcpu, field, &field_value)) {
> > > -		nested_vmx_failValid(vcpu,
> > VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> > > -		skip_emulated_instruction(vcpu);
> > > -		return 1;
> > > -	}
> > > +	field_value = vmcs12_read(vcpu, field);
> > You do not handle failure here and always write back field_value even if
> > vmcs12_read() failed. Actually now it is impossible to detect a failure. Call to
> > nested_vmx_failValid() in vmcs12_read() will be overwritten by call to
> > nested_vmx_succeed() at the end of
> > handle_vmread() and skip_emulated_instruction() will be called twice.
> 
> Thanks Gleb and Orit to raise this issue.
> 
> What about moving the offset check outside the vmcs12_read() and vmcs12_write() function, and put it directly in handle_vmread() and handle_vmwrite()?
> I think we only need to do offset error check in handle_vmread() and handle_vmwrite() since they are to emulate correct behavior for guest VMM. For example, if guest VMM reads a field that is not valid or writes a field that is read only, then in emulation code handle_vmread() and handle_vmwrite, we need to raise error to guest VMM.
> For other calling of vmcs12_read() and vmcs12_write() functions in KVM hypervisor (see PATCH 3/4), actually the caller needs to ensure the field should be valid. Otherwise it is a bug in KVM hypervisor itself.
> Does it make sense? If so, I will revise this patch according to above.
> 
Yes, it makes sense.

> Thanks,
> Dongxiao
> 
> > 
> > > +             skip_emulated_instruction(vcpu);
> > 
> > >  	/*
> > >  	 * Now copy part of this value to register or memory, as requested.
> > >  	 * Note that the number of bits actually copied is 32 or 64
> > > depending @@ -5500,8 +5531,6 @@ static int handle_vmwrite(struct
> > kvm_vcpu *vcpu)
> > >  	gva_t gva;
> > >  	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
> > >  	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
> > > -	char *p;
> > > -	short offset;
> > >  	/* The value to write might be 32 or 64 bits, depending on L1's long
> > >  	 * mode, and eventually we need to write that into a field of several
> > >  	 * possible lengths. The code below first zero-extends the value to
> > > 64 @@ -5537,33 +5566,8 @@ static int handle_vmwrite(struct kvm_vcpu
> > *vcpu)
> > >  		skip_emulated_instruction(vcpu);
> > >  		return 1;
> > >  	}
> > > -
> > > -	offset = vmcs_field_to_offset(field);
> > > -	if (offset < 0) {
> > > -		nested_vmx_failValid(vcpu,
> > VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> > > -		skip_emulated_instruction(vcpu);
> > > -		return 1;
> > > -	}
> > > -	p = ((char *) get_vmcs12(vcpu)) + offset;
> > > -
> > > -	switch (vmcs_field_type(field)) {
> > > -	case VMCS_FIELD_TYPE_U16:
> > > -		*(u16 *)p = field_value;
> > > -		break;
> > > -	case VMCS_FIELD_TYPE_U32:
> > > -		*(u32 *)p = field_value;
> > > -		break;
> > > -	case VMCS_FIELD_TYPE_U64:
> > > -		*(u64 *)p = field_value;
> > > -		break;
> > > -	case VMCS_FIELD_TYPE_NATURAL_WIDTH:
> > > -		*(natural_width *)p = field_value;
> > > -		break;
> > > -	default:
> > > -		nested_vmx_failValid(vcpu,
> > VMXERR_UNSUPPORTED_VMCS_COMPONENT);
> > > -		skip_emulated_instruction(vcpu);
> > > +	if (!vmcs12_write(vcpu, field, field_value))
> > >  		return 1;
> > > -	}
> > >
> > >  	nested_vmx_succeed(vcpu);
> > >  	skip_emulated_instruction(vcpu);
> > > --
> > > 1.7.1
> > 
> > --
> > 			Gleb.

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f858159..d8670e4 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5407,32 +5407,67 @@  static inline int vmcs_field_readonly(unsigned long field)
  * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
  * 64-bit fields are to be returned).
  */
-static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
-					unsigned long field, u64 *ret)
+static inline u64 vmcs12_read(struct kvm_vcpu *vcpu, unsigned long field)
 {
 	short offset = vmcs_field_to_offset(field);
 	char *p;
 
-	if (offset < 0)
+	if (offset < 0) {
+		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+		skip_emulated_instruction(vcpu);
 		return 0;
+	}
 
 	p = ((char *)(get_vmcs12(vcpu))) + offset;
 
 	switch (vmcs_field_type(field)) {
 	case VMCS_FIELD_TYPE_NATURAL_WIDTH:
-		*ret = *((natural_width *)p);
+		return *((natural_width *)p);
+	case VMCS_FIELD_TYPE_U16:
+		return *((u16 *)p);
+	case VMCS_FIELD_TYPE_U32:
+		return *((u32 *)p);
+	case VMCS_FIELD_TYPE_U64:
+		return *((u64 *)p);
+	default:
+		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+		skip_emulated_instruction(vcpu);
+		return 0; /* can never happen. */
+	}
+}
+
+static inline int vmcs12_write(struct kvm_vcpu *vcpu,
+			unsigned long field,
+			u64 value)
+{
+	short offset = vmcs_field_to_offset(field);
+	char *p;
+
+	if (offset < 0) {
+		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+		skip_emulated_instruction(vcpu);
+		return 0;
+	}
+
+	p = ((char *)(get_vmcs12(vcpu))) + offset;
+
+	switch (vmcs_field_type(field)) {
+	case VMCS_FIELD_TYPE_NATURAL_WIDTH:
+		*(natural_width *)p = value;
 		return 1;
 	case VMCS_FIELD_TYPE_U16:
-		*ret = *((u16 *)p);
+		*(u16 *)p = value;
 		return 1;
 	case VMCS_FIELD_TYPE_U32:
-		*ret = *((u32 *)p);
+		*(u32 *)p = value;
 		return 1;
 	case VMCS_FIELD_TYPE_U64:
-		*ret = *((u64 *)p);
+		*(u64 *)p = value;
 		return 1;
 	default:
-		return 0; /* can never happen. */
+		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+		skip_emulated_instruction(vcpu);
+		return 0;
 	}
 }
 
@@ -5466,11 +5501,7 @@  static int handle_vmread(struct kvm_vcpu *vcpu)
 	/* Decode instruction info and find the field to read */
 	field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
 	/* Read the field, zero-extended to a u64 field_value */
-	if (!vmcs12_read_any(vcpu, field, &field_value)) {
-		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
-		skip_emulated_instruction(vcpu);
-		return 1;
-	}
+	field_value = vmcs12_read(vcpu, field);
 	/*
 	 * Now copy part of this value to register or memory, as requested.
 	 * Note that the number of bits actually copied is 32 or 64 depending
@@ -5500,8 +5531,6 @@  static int handle_vmwrite(struct kvm_vcpu *vcpu)
 	gva_t gva;
 	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
 	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
-	char *p;
-	short offset;
 	/* The value to write might be 32 or 64 bits, depending on L1's long
 	 * mode, and eventually we need to write that into a field of several
 	 * possible lengths. The code below first zero-extends the value to 64
@@ -5537,33 +5566,8 @@  static int handle_vmwrite(struct kvm_vcpu *vcpu)
 		skip_emulated_instruction(vcpu);
 		return 1;
 	}
-
-	offset = vmcs_field_to_offset(field);
-	if (offset < 0) {
-		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
-		skip_emulated_instruction(vcpu);
-		return 1;
-	}
-	p = ((char *) get_vmcs12(vcpu)) + offset;
-
-	switch (vmcs_field_type(field)) {
-	case VMCS_FIELD_TYPE_U16:
-		*(u16 *)p = field_value;
-		break;
-	case VMCS_FIELD_TYPE_U32:
-		*(u32 *)p = field_value;
-		break;
-	case VMCS_FIELD_TYPE_U64:
-		*(u64 *)p = field_value;
-		break;
-	case VMCS_FIELD_TYPE_NATURAL_WIDTH:
-		*(natural_width *)p = field_value;
-		break;
-	default:
-		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
-		skip_emulated_instruction(vcpu);
+	if (!vmcs12_write(vcpu, field, field_value))
 		return 1;
-	}
 
 	nested_vmx_succeed(vcpu);
 	skip_emulated_instruction(vcpu);