diff mbox

[v3,3/6] KVM: MMU: make return value of mmio page fault handler more readable

Message ID 1370595088-3315-4-git-send-email-xiaoguangrong@linux.vnet.ibm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Xiao Guangrong June 7, 2013, 8:51 a.m. UTC
Define some meaningful names instead of raw code

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
---
 arch/x86/kvm/mmu.c | 15 +++++----------
 arch/x86/kvm/mmu.h | 14 ++++++++++++++
 arch/x86/kvm/vmx.c |  4 ++--
 3 files changed, 21 insertions(+), 12 deletions(-)

Comments

Gleb Natapov June 10, 2013, 7:57 a.m. UTC | #1
On Fri, Jun 07, 2013 at 04:51:25PM +0800, Xiao Guangrong wrote:
> Define some meaningful names instead of raw code
> 
> Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
> ---
>  arch/x86/kvm/mmu.c | 15 +++++----------
>  arch/x86/kvm/mmu.h | 14 ++++++++++++++
>  arch/x86/kvm/vmx.c |  4 ++--
>  3 files changed, 21 insertions(+), 12 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index eca91bd..044d8c0 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -3222,17 +3222,12 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
>  	return spte;
>  }
>  
> -/*
> - * If it is a real mmio page fault, return 1 and emulat the instruction
> - * directly, return 0 to let CPU fault again on the address, -1 is
> - * returned if bug is detected.
> - */
>  int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
>  {
>  	u64 spte;
>  
>  	if (quickly_check_mmio_pf(vcpu, addr, direct))
> -		return 1;
> +		return RET_MMIO_PF_EMULATE;
>  
>  	spte = walk_shadow_page_get_mmio_spte(vcpu, addr);
>  
> @@ -3245,7 +3240,7 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
>  
>  		trace_handle_mmio_page_fault(addr, gfn, access);
>  		vcpu_cache_mmio_info(vcpu, addr, gfn, access);
> -		return 1;
> +		return RET_MMIO_PF_EMULATE;
>  	}
>  
>  	/*
> @@ -3253,13 +3248,13 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
>  	 * it's a BUG if the gfn is not a mmio page.
>  	 */
>  	if (direct && !check_direct_spte_mmio_pf(spte))
> -		return -1;
> +		return RET_MMIO_PF_BUG;
>  
>  	/*
>  	 * If the page table is zapped by other cpus, let CPU fault again on
>  	 * the address.
>  	 */
> -	return 0;
> +	return RET_MMIO_PF_RETRY;
>  }
>  EXPORT_SYMBOL_GPL(handle_mmio_page_fault_common);
>  
> @@ -3269,7 +3264,7 @@ static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr,
>  	int ret;
>  
>  	ret = handle_mmio_page_fault_common(vcpu, addr, direct);
> -	WARN_ON(ret < 0);
> +	WARN_ON(ret == RET_MMIO_PF_BUG);
>  	return ret;
>  }
>  
> diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
> index 922bfae..ba6a19c 100644
> --- a/arch/x86/kvm/mmu.h
> +++ b/arch/x86/kvm/mmu.h
> @@ -52,6 +52,20 @@
>  
>  int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
>  void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
> +
> +/*
> + * Return values of handle_mmio_page_fault_common:
> + * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
> + *			 directly.
> + * RET_MMIO_PF_RETRY: let CPU fault again on the address.
> + * RET_MMIO_PF_BUG: bug is detected.
> + */
> +enum {
> +	RET_MMIO_PF_EMULATE = 1,
> +	RET_MMIO_PF_RETRY = 0,
> +	RET_MMIO_PF_BUG = -1
> +};
I would order them from -1 to 1 and rename RET_MMIO_PF_BUG to
RET_MMIO_PF_ERROR, but no need to resend just for that.

> +
>  int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
>  int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
>  
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 78ee123..85c8d51 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -5366,10 +5366,10 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
>  	gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
>  
>  	ret = handle_mmio_page_fault_common(vcpu, gpa, true);
> -	if (likely(ret == 1))
> +	if (likely(ret == RET_MMIO_PF_EMULATE))
>  		return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
>  					      EMULATE_DONE;
> -	if (unlikely(!ret))
> +	if (unlikely(ret == RET_MMIO_PF_RETRY))
>  		return 1;
>  
>  	/* It is the real ept misconfig */
> -- 
> 1.8.1.4

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Xiao Guangrong June 10, 2013, 8:45 a.m. UTC | #2
On 06/10/2013 03:57 PM, Gleb Natapov wrote:
> On Fri, Jun 07, 2013 at 04:51:25PM +0800, Xiao Guangrong wrote:
>> Define some meaningful names instead of raw code
>>
>> Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
>> ---
>>  arch/x86/kvm/mmu.c | 15 +++++----------
>>  arch/x86/kvm/mmu.h | 14 ++++++++++++++
>>  arch/x86/kvm/vmx.c |  4 ++--
>>  3 files changed, 21 insertions(+), 12 deletions(-)
>>
>> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>> index eca91bd..044d8c0 100644
>> --- a/arch/x86/kvm/mmu.c
>> +++ b/arch/x86/kvm/mmu.c
>> @@ -3222,17 +3222,12 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
>>  	return spte;
>>  }
>>  
>> -/*
>> - * If it is a real mmio page fault, return 1 and emulat the instruction
>> - * directly, return 0 to let CPU fault again on the address, -1 is
>> - * returned if bug is detected.
>> - */
>>  int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
>>  {
>>  	u64 spte;
>>  
>>  	if (quickly_check_mmio_pf(vcpu, addr, direct))
>> -		return 1;
>> +		return RET_MMIO_PF_EMULATE;
>>  
>>  	spte = walk_shadow_page_get_mmio_spte(vcpu, addr);
>>  
>> @@ -3245,7 +3240,7 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
>>  
>>  		trace_handle_mmio_page_fault(addr, gfn, access);
>>  		vcpu_cache_mmio_info(vcpu, addr, gfn, access);
>> -		return 1;
>> +		return RET_MMIO_PF_EMULATE;
>>  	}
>>  
>>  	/*
>> @@ -3253,13 +3248,13 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
>>  	 * it's a BUG if the gfn is not a mmio page.
>>  	 */
>>  	if (direct && !check_direct_spte_mmio_pf(spte))
>> -		return -1;
>> +		return RET_MMIO_PF_BUG;
>>  
>>  	/*
>>  	 * If the page table is zapped by other cpus, let CPU fault again on
>>  	 * the address.
>>  	 */
>> -	return 0;
>> +	return RET_MMIO_PF_RETRY;
>>  }
>>  EXPORT_SYMBOL_GPL(handle_mmio_page_fault_common);
>>  
>> @@ -3269,7 +3264,7 @@ static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr,
>>  	int ret;
>>  
>>  	ret = handle_mmio_page_fault_common(vcpu, addr, direct);
>> -	WARN_ON(ret < 0);
>> +	WARN_ON(ret == RET_MMIO_PF_BUG);
>>  	return ret;
>>  }
>>  
>> diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
>> index 922bfae..ba6a19c 100644
>> --- a/arch/x86/kvm/mmu.h
>> +++ b/arch/x86/kvm/mmu.h
>> @@ -52,6 +52,20 @@
>>  
>>  int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
>>  void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
>> +
>> +/*
>> + * Return values of handle_mmio_page_fault_common:
>> + * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
>> + *			 directly.
>> + * RET_MMIO_PF_RETRY: let CPU fault again on the address.
>> + * RET_MMIO_PF_BUG: bug is detected.
>> + */
>> +enum {
>> +	RET_MMIO_PF_EMULATE = 1,
>> +	RET_MMIO_PF_RETRY = 0,
>> +	RET_MMIO_PF_BUG = -1
>> +};
> I would order them from -1 to 1 and rename RET_MMIO_PF_BUG to
> RET_MMIO_PF_ERROR, but no need to resend just for that.

Fine to me. Will make a separate patch to update it later. Thanks for your review!
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Takuya Yoshikawa June 10, 2013, 1:16 p.m. UTC | #3
On Mon, 10 Jun 2013 10:57:50 +0300
Gleb Natapov <gleb@redhat.com> wrote:

> On Fri, Jun 07, 2013 at 04:51:25PM +0800, Xiao Guangrong wrote:

> > +
> > +/*
> > + * Return values of handle_mmio_page_fault_common:
> > + * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
> > + *			 directly.
> > + * RET_MMIO_PF_RETRY: let CPU fault again on the address.
> > + * RET_MMIO_PF_BUG: bug is detected.
> > + */
> > +enum {
> > +	RET_MMIO_PF_EMULATE = 1,
> > +	RET_MMIO_PF_RETRY = 0,
> > +	RET_MMIO_PF_BUG = -1
> > +};
> I would order them from -1 to 1 and rename RET_MMIO_PF_BUG to
> RET_MMIO_PF_ERROR, but no need to resend just for that.

Why not just let compilers select the values? -- It's an enum.
Any reason to make it start from -1?

	Takuya
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Gleb Natapov June 11, 2013, 9:18 a.m. UTC | #4
On Mon, Jun 10, 2013 at 10:16:04PM +0900, Takuya Yoshikawa wrote:
> On Mon, 10 Jun 2013 10:57:50 +0300
> Gleb Natapov <gleb@redhat.com> wrote:
> 
> > On Fri, Jun 07, 2013 at 04:51:25PM +0800, Xiao Guangrong wrote:
> 
> > > +
> > > +/*
> > > + * Return values of handle_mmio_page_fault_common:
> > > + * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
> > > + *			 directly.
> > > + * RET_MMIO_PF_RETRY: let CPU fault again on the address.
> > > + * RET_MMIO_PF_BUG: bug is detected.
> > > + */
> > > +enum {
> > > +	RET_MMIO_PF_EMULATE = 1,
> > > +	RET_MMIO_PF_RETRY = 0,
> > > +	RET_MMIO_PF_BUG = -1
> > > +};
> > I would order them from -1 to 1 and rename RET_MMIO_PF_BUG to
> > RET_MMIO_PF_ERROR, but no need to resend just for that.
> 
> Why not just let compilers select the values? -- It's an enum.
> Any reason to make it start from -1?
> 
I am fine with this too as an additional patch. It makes sense to preserve
original values like Xiao did for initial patch, since it is easier to
verify that the patch is just a mechanical change.

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index eca91bd..044d8c0 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3222,17 +3222,12 @@  static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
 	return spte;
 }
 
-/*
- * If it is a real mmio page fault, return 1 and emulat the instruction
- * directly, return 0 to let CPU fault again on the address, -1 is
- * returned if bug is detected.
- */
 int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
 {
 	u64 spte;
 
 	if (quickly_check_mmio_pf(vcpu, addr, direct))
-		return 1;
+		return RET_MMIO_PF_EMULATE;
 
 	spte = walk_shadow_page_get_mmio_spte(vcpu, addr);
 
@@ -3245,7 +3240,7 @@  int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
 
 		trace_handle_mmio_page_fault(addr, gfn, access);
 		vcpu_cache_mmio_info(vcpu, addr, gfn, access);
-		return 1;
+		return RET_MMIO_PF_EMULATE;
 	}
 
 	/*
@@ -3253,13 +3248,13 @@  int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
 	 * it's a BUG if the gfn is not a mmio page.
 	 */
 	if (direct && !check_direct_spte_mmio_pf(spte))
-		return -1;
+		return RET_MMIO_PF_BUG;
 
 	/*
 	 * If the page table is zapped by other cpus, let CPU fault again on
 	 * the address.
 	 */
-	return 0;
+	return RET_MMIO_PF_RETRY;
 }
 EXPORT_SYMBOL_GPL(handle_mmio_page_fault_common);
 
@@ -3269,7 +3264,7 @@  static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr,
 	int ret;
 
 	ret = handle_mmio_page_fault_common(vcpu, addr, direct);
-	WARN_ON(ret < 0);
+	WARN_ON(ret == RET_MMIO_PF_BUG);
 	return ret;
 }
 
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 922bfae..ba6a19c 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -52,6 +52,20 @@ 
 
 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
 void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
+
+/*
+ * Return values of handle_mmio_page_fault_common:
+ * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
+ *			 directly.
+ * RET_MMIO_PF_RETRY: let CPU fault again on the address.
+ * RET_MMIO_PF_BUG: bug is detected.
+ */
+enum {
+	RET_MMIO_PF_EMULATE = 1,
+	RET_MMIO_PF_RETRY = 0,
+	RET_MMIO_PF_BUG = -1
+};
+
 int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
 int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
 
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 78ee123..85c8d51 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5366,10 +5366,10 @@  static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
 	gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
 
 	ret = handle_mmio_page_fault_common(vcpu, gpa, true);
-	if (likely(ret == 1))
+	if (likely(ret == RET_MMIO_PF_EMULATE))
 		return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
 					      EMULATE_DONE;
-	if (unlikely(!ret))
+	if (unlikely(ret == RET_MMIO_PF_RETRY))
 		return 1;
 
 	/* It is the real ept misconfig */