diff mbox series

[v2,7/9] KVM: x86/mmu: Add try_get_mt_mask to x86_ops

Message ID 20220321224358.1305530-8-bgardon@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/MMU: Optimize disabling dirty logging | expand

Commit Message

Ben Gardon March 21, 2022, 10:43 p.m. UTC
Add another function for getting the memory type mask to x86_ops.
This version of the function can fail, but it does not require a vCPU
pointer. It will be used in a subsequent commit for in-place large page
promotion when disabling dirty logging.

No functional change intended.

Signed-off-by: Ben Gardon <bgardon@google.com>
---
 arch/x86/include/asm/kvm-x86-ops.h | 1 +
 arch/x86/include/asm/kvm_host.h    | 2 ++
 arch/x86/kvm/svm/svm.c             | 9 +++++++++
 arch/x86/kvm/vmx/vmx.c             | 1 +
 4 files changed, 13 insertions(+)

Comments

Sean Christopherson April 11, 2022, 11 p.m. UTC | #1
On Mon, Mar 21, 2022, Ben Gardon wrote:
> Add another function for getting the memory type mask to x86_ops.
> This version of the function can fail, but it does not require a vCPU
> pointer. It will be used in a subsequent commit for in-place large page
> promotion when disabling dirty logging.
> 
> No functional change intended.
> 
> Signed-off-by: Ben Gardon <bgardon@google.com>
> ---
>  arch/x86/include/asm/kvm-x86-ops.h | 1 +
>  arch/x86/include/asm/kvm_host.h    | 2 ++
>  arch/x86/kvm/svm/svm.c             | 9 +++++++++
>  arch/x86/kvm/vmx/vmx.c             | 1 +
>  4 files changed, 13 insertions(+)
> 
> diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
> index 29affccb353c..29880363b5ed 100644
> --- a/arch/x86/include/asm/kvm-x86-ops.h
> +++ b/arch/x86/include/asm/kvm-x86-ops.h
> @@ -88,6 +88,7 @@ KVM_X86_OP_OPTIONAL(sync_pir_to_irr)
>  KVM_X86_OP_OPTIONAL_RET0(set_tss_addr)
>  KVM_X86_OP_OPTIONAL_RET0(set_identity_map_addr)
>  KVM_X86_OP_OPTIONAL_RET0(get_mt_mask)
> +KVM_X86_OP(try_get_mt_mask)
>  KVM_X86_OP(load_mmu_pgd)
>  KVM_X86_OP(has_wbinvd_exit)
>  KVM_X86_OP(get_l2_tsc_offset)
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index f72e80178ffc..a114e4782702 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1422,6 +1422,8 @@ struct kvm_x86_ops {
>  	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
>  	int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
>  	u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
> +	bool (*try_get_mt_mask)(struct kvm *kvm, gfn_t gfn,
> +				bool is_mmio, u64 *mask);

There's an old saying in Tennessee - I know it's in Texas, probably in Tennessee -
that says, fool me once, shame on... shame on you. Fool me... you can't get fooled again.

Thou shalt not trick me again by using a bool for pass/fail!  Though this one
doesn't have same potential for pain as the TDP MMU's atomic operations.

And as a bonus, if we use 0/-errno, then we can use KVM_X86_OP_OPTIONAL_RET0()
and SVM doesn't need to provide an implementation.

Tangentially related to the return type, what about naming it something like
get_vm_wide_mt_mask() to convey exactly what it's doing?  The @kvm param kinda
does that, but IMO it doesn't do a good of capturing why the function can fail.
Adding "vm_wide" helps explain why it can, i.e. that there may not be a VM-wide
memtype established for the gfn.

As penance for your boolean sin, can you slot this in earlier in your series?
It's obviously not a hard dependency, but using a u64 for the mask here and then
undoing the whole thing is rather silly.  Compile tested only at this point, I'll
test on an actual system ASAP and let you know if I did something stupid.

From: Sean Christopherson <seanjc@google.com>
Date: Mon, 11 Apr 2022 15:12:16 -0700
Subject: [PATCH] KVM: x86: Restrict get_mt_mask() to a u8, use
 KVM_X86_OP_OPTIONAL_RET0

Restrict get_mt_mask() to a u8 and reintroduce using a RET0 static_call
for the SVM implementation.  EPT stores the memtype information in the
lower 8 bits (bits 6:3 to be precise), and even returns a shifted u8
without an explicit cast to a larger type; there's no need to return a
full u64.

Note, RET0 doesn't play nice with a u64 return on 32-bit kernels, see
commit bf07be36cd88 ("KVM: x86: do not use KVM_X86_OP_OPTIONAL_RET0 for
get_mt_mask").

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/include/asm/kvm-x86-ops.h | 2 +-
 arch/x86/include/asm/kvm_host.h    | 2 +-
 arch/x86/kvm/svm/svm.c             | 6 ------
 arch/x86/kvm/vmx/vmx.c             | 2 +-
 4 files changed, 3 insertions(+), 9 deletions(-)

diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 96e4e9842dfc..0d16f21a6203 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -87,7 +87,7 @@ KVM_X86_OP(deliver_interrupt)
 KVM_X86_OP_OPTIONAL(sync_pir_to_irr)
 KVM_X86_OP_OPTIONAL_RET0(set_tss_addr)
 KVM_X86_OP_OPTIONAL_RET0(set_identity_map_addr)
-KVM_X86_OP(get_mt_mask)
+KVM_X86_OP_OPTIONAL_RET0(get_mt_mask)
 KVM_X86_OP(load_mmu_pgd)
 KVM_X86_OP(has_wbinvd_exit)
 KVM_X86_OP(get_l2_tsc_offset)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2c20f715f009..dc4d34f1bcf9 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1421,7 +1421,7 @@ struct kvm_x86_ops {
 	int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
 	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
 	int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
-	u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
+	u8 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);

 	void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
 			     int root_level);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index fc1725b7d05f..56f03eafe421 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4011,11 +4011,6 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
 	return true;
 }

-static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
-{
-	return 0;
-}
-
 static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
@@ -4673,7 +4668,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
 	.check_apicv_inhibit_reasons = avic_check_apicv_inhibit_reasons,
 	.apicv_post_state_restore = avic_apicv_post_state_restore,

-	.get_mt_mask = svm_get_mt_mask,
 	.get_exit_info = svm_get_exit_info,

 	.vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid,
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index cf8581978bce..646fa609aa0d 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7142,7 +7142,7 @@ static int __init vmx_check_processor_compat(void)
 	return 0;
 }

-static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
+static u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
 {
 	u8 cache;


base-commit: 59d9e75d641565603e7c293f4cec182d86db8586
--
Ben Gardon April 11, 2022, 11:24 p.m. UTC | #2
On Mon, Apr 11, 2022 at 4:00 PM Sean Christopherson <seanjc@google.com> wrote:
>
> On Mon, Mar 21, 2022, Ben Gardon wrote:
> > Add another function for getting the memory type mask to x86_ops.
> > This version of the function can fail, but it does not require a vCPU
> > pointer. It will be used in a subsequent commit for in-place large page
> > promotion when disabling dirty logging.
> >
> > No functional change intended.
> >
> > Signed-off-by: Ben Gardon <bgardon@google.com>
> > ---
> >  arch/x86/include/asm/kvm-x86-ops.h | 1 +
> >  arch/x86/include/asm/kvm_host.h    | 2 ++
> >  arch/x86/kvm/svm/svm.c             | 9 +++++++++
> >  arch/x86/kvm/vmx/vmx.c             | 1 +
> >  4 files changed, 13 insertions(+)
> >
> > diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
> > index 29affccb353c..29880363b5ed 100644
> > --- a/arch/x86/include/asm/kvm-x86-ops.h
> > +++ b/arch/x86/include/asm/kvm-x86-ops.h
> > @@ -88,6 +88,7 @@ KVM_X86_OP_OPTIONAL(sync_pir_to_irr)
> >  KVM_X86_OP_OPTIONAL_RET0(set_tss_addr)
> >  KVM_X86_OP_OPTIONAL_RET0(set_identity_map_addr)
> >  KVM_X86_OP_OPTIONAL_RET0(get_mt_mask)
> > +KVM_X86_OP(try_get_mt_mask)
> >  KVM_X86_OP(load_mmu_pgd)
> >  KVM_X86_OP(has_wbinvd_exit)
> >  KVM_X86_OP(get_l2_tsc_offset)
> > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> > index f72e80178ffc..a114e4782702 100644
> > --- a/arch/x86/include/asm/kvm_host.h
> > +++ b/arch/x86/include/asm/kvm_host.h
> > @@ -1422,6 +1422,8 @@ struct kvm_x86_ops {
> >       int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
> >       int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
> >       u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
> > +     bool (*try_get_mt_mask)(struct kvm *kvm, gfn_t gfn,
> > +                             bool is_mmio, u64 *mask);
>
> There's an old saying in Tennessee - I know it's in Texas, probably in Tennessee -
> that says, fool me once, shame on... shame on you. Fool me... you can't get fooled again.

Haha shoot here I was saying it wrong all these years and getting
fooled too many times.

Paolo, did you already queue this series or should I send out a v3? I
thought I saw the "queued,thanks" come through at some point, but
maybe I'm mis-remembering.

>
> Thou shalt not trick me again by using a bool for pass/fail!  Though this one
> doesn't have same potential for pain as the TDP MMU's atomic operations.
>
> And as a bonus, if we use 0/-errno, then we can use KVM_X86_OP_OPTIONAL_RET0()
> and SVM doesn't need to provide an implementation.
>
> Tangentially related to the return type, what about naming it something like
> get_vm_wide_mt_mask() to convey exactly what it's doing?  The @kvm param kinda
> does that, but IMO it doesn't do a good of capturing why the function can fail.
> Adding "vm_wide" helps explain why it can, i.e. that there may not be a VM-wide
> memtype established for the gfn.
>
> As penance for your boolean sin, can you slot this in earlier in your series?
> It's obviously not a hard dependency, but using a u64 for the mask here and then
> undoing the whole thing is rather silly.  Compile tested only at this point, I'll
> test on an actual system ASAP and let you know if I did something stupid.
>
> From: Sean Christopherson <seanjc@google.com>
> Date: Mon, 11 Apr 2022 15:12:16 -0700
> Subject: [PATCH] KVM: x86: Restrict get_mt_mask() to a u8, use
>  KVM_X86_OP_OPTIONAL_RET0
>
> Restrict get_mt_mask() to a u8 and reintroduce using a RET0 static_call
> for the SVM implementation.  EPT stores the memtype information in the
> lower 8 bits (bits 6:3 to be precise), and even returns a shifted u8
> without an explicit cast to a larger type; there's no need to return a
> full u64.
>
> Note, RET0 doesn't play nice with a u64 return on 32-bit kernels, see
> commit bf07be36cd88 ("KVM: x86: do not use KVM_X86_OP_OPTIONAL_RET0 for
> get_mt_mask").
>
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
>  arch/x86/include/asm/kvm-x86-ops.h | 2 +-
>  arch/x86/include/asm/kvm_host.h    | 2 +-
>  arch/x86/kvm/svm/svm.c             | 6 ------
>  arch/x86/kvm/vmx/vmx.c             | 2 +-
>  4 files changed, 3 insertions(+), 9 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
> index 96e4e9842dfc..0d16f21a6203 100644
> --- a/arch/x86/include/asm/kvm-x86-ops.h
> +++ b/arch/x86/include/asm/kvm-x86-ops.h
> @@ -87,7 +87,7 @@ KVM_X86_OP(deliver_interrupt)
>  KVM_X86_OP_OPTIONAL(sync_pir_to_irr)
>  KVM_X86_OP_OPTIONAL_RET0(set_tss_addr)
>  KVM_X86_OP_OPTIONAL_RET0(set_identity_map_addr)
> -KVM_X86_OP(get_mt_mask)
> +KVM_X86_OP_OPTIONAL_RET0(get_mt_mask)
>  KVM_X86_OP(load_mmu_pgd)
>  KVM_X86_OP(has_wbinvd_exit)
>  KVM_X86_OP(get_l2_tsc_offset)
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 2c20f715f009..dc4d34f1bcf9 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1421,7 +1421,7 @@ struct kvm_x86_ops {
>         int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
>         int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
>         int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
> -       u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
> +       u8 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
>
>         void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
>                              int root_level);
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index fc1725b7d05f..56f03eafe421 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -4011,11 +4011,6 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
>         return true;
>  }
>
> -static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
> -{
> -       return 0;
> -}
> -
>  static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
>  {
>         struct vcpu_svm *svm = to_svm(vcpu);
> @@ -4673,7 +4668,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
>         .check_apicv_inhibit_reasons = avic_check_apicv_inhibit_reasons,
>         .apicv_post_state_restore = avic_apicv_post_state_restore,
>
> -       .get_mt_mask = svm_get_mt_mask,
>         .get_exit_info = svm_get_exit_info,
>
>         .vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid,
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index cf8581978bce..646fa609aa0d 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -7142,7 +7142,7 @@ static int __init vmx_check_processor_compat(void)
>         return 0;
>  }
>
> -static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
> +static u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
>  {
>         u8 cache;
>
>
> base-commit: 59d9e75d641565603e7c293f4cec182d86db8586
> --
>
>
>
>
>
>
Sean Christopherson April 11, 2022, 11:33 p.m. UTC | #3
On Mon, Apr 11, 2022, Sean Christopherson wrote:
> And as a bonus, if we use 0/-errno, then we can use KVM_X86_OP_OPTIONAL_RET0()
> and SVM doesn't need to provide an implementation.

Gah, got ahead of myself.  If we go this route, the caller would need to ensure
it initializes mt_mask.  Probabably not worth it, so scratch that idea.  I also
though about overloading the return type, e.g.

	mt_mask = ...
	if (mt_mask < 0)
		return false;

But again, probably not a net positive.
Sean Christopherson April 12, 2022, 7:30 p.m. UTC | #4
On Mon, Apr 11, 2022, Sean Christopherson wrote:
> It's obviously not a hard dependency, but using a u64 for the mask here and then
> undoing the whole thing is rather silly.  Compile tested only at this point, I'll
> test on an actual system ASAP and let you know if I did something stupid.
> 
> From: Sean Christopherson <seanjc@google.com>
> Date: Mon, 11 Apr 2022 15:12:16 -0700
> Subject: [PATCH] KVM: x86: Restrict get_mt_mask() to a u8, use
>  KVM_X86_OP_OPTIONAL_RET0

Verified this works as expected when patching in RET0 on a 32-bit kernel (returning
a u64 results in reserved bits in the upper half being set due to EDX not being cleared).
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 29affccb353c..29880363b5ed 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -88,6 +88,7 @@  KVM_X86_OP_OPTIONAL(sync_pir_to_irr)
 KVM_X86_OP_OPTIONAL_RET0(set_tss_addr)
 KVM_X86_OP_OPTIONAL_RET0(set_identity_map_addr)
 KVM_X86_OP_OPTIONAL_RET0(get_mt_mask)
+KVM_X86_OP(try_get_mt_mask)
 KVM_X86_OP(load_mmu_pgd)
 KVM_X86_OP(has_wbinvd_exit)
 KVM_X86_OP(get_l2_tsc_offset)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f72e80178ffc..a114e4782702 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1422,6 +1422,8 @@  struct kvm_x86_ops {
 	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
 	int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
 	u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
+	bool (*try_get_mt_mask)(struct kvm *kvm, gfn_t gfn,
+				bool is_mmio, u64 *mask);
 
 	void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
 			     int root_level);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index b069493ad5c7..e73415dfcf52 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3944,6 +3944,13 @@  static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
 	return true;
 }
 
+static bool svm_try_get_mt_mask(struct kvm *kvm, gfn_t gfn,
+				bool is_mmio, u64 *mask)
+{
+	*mask = 0;
+	return true;
+}
+
 static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
@@ -4600,6 +4607,8 @@  static struct kvm_x86_ops svm_x86_ops __initdata = {
 	.check_apicv_inhibit_reasons = avic_check_apicv_inhibit_reasons,
 	.apicv_post_state_restore = avic_apicv_post_state_restore,
 
+	.try_get_mt_mask = svm_try_get_mt_mask,
+
 	.get_exit_info = svm_get_exit_info,
 
 	.vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid,
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 69c654567475..81e9805ed1d8 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7813,6 +7813,7 @@  static struct kvm_x86_ops vmx_x86_ops __initdata = {
 	.set_tss_addr = vmx_set_tss_addr,
 	.set_identity_map_addr = vmx_set_identity_map_addr,
 	.get_mt_mask = vmx_get_mt_mask,
+	.try_get_mt_mask = vmx_try_get_mt_mask,
 
 	.get_exit_info = vmx_get_exit_info,