diff mbox series

[v2,06/10] KVM: X86: Add functions that calculate the 02 TSC offset and multiplier

Message ID 20210512150945.4591-7-ilstam@amazon.com (mailing list archive)
State New, archived
Headers show
Series KVM: Implement nested TSC scaling | expand

Commit Message

Ilias Stamatis May 12, 2021, 3:09 p.m. UTC
When L2 is entered we need to "merge" the TSC multiplier and TSC offset
values of 01 and 12 together.

The merging is done using the following equations:
  offset_02 = ((offset_01 * mult_12) >> shift_bits) + offset_12
  mult_02 = (mult_01 * mult_12) >> shift_bits

Where shift_bits is kvm_tsc_scaling_ratio_frac_bits.

Signed-off-by: Ilias Stamatis <ilstam@amazon.com>
---
 arch/x86/include/asm/kvm_host.h |  2 ++
 arch/x86/kvm/x86.c              | 29 +++++++++++++++++++++++++++++
 2 files changed, 31 insertions(+)

Comments

Sean Christopherson May 18, 2021, 11:21 p.m. UTC | #1
On Wed, May 12, 2021, Ilias Stamatis wrote:
> When L2 is entered we need to "merge" the TSC multiplier and TSC offset
> values of 01 and 12 together.
> 
> The merging is done using the following equations:
>   offset_02 = ((offset_01 * mult_12) >> shift_bits) + offset_12
>   mult_02 = (mult_01 * mult_12) >> shift_bits
> 
> Where shift_bits is kvm_tsc_scaling_ratio_frac_bits.
> 
> Signed-off-by: Ilias Stamatis <ilstam@amazon.com>
> ---
>  arch/x86/include/asm/kvm_host.h |  2 ++
>  arch/x86/kvm/x86.c              | 29 +++++++++++++++++++++++++++++
>  2 files changed, 31 insertions(+)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 4c4a3fefff57..57a25d8e8b0f 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1793,6 +1793,8 @@ static inline bool kvm_is_supported_user_return_msr(u32 msr)
>  u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
>  u64 kvm_scale_tsc_l1(struct kvm_vcpu *vcpu, u64 tsc);
>  u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
> +void kvm_set_02_tsc_offset(struct kvm_vcpu *vcpu);
> +void kvm_set_02_tsc_multiplier(struct kvm_vcpu *vcpu);
>  
>  unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
>  bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 84af1af7a2cc..1db6cfc2079f 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -2346,6 +2346,35 @@ u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
>  }
>  EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
>  
> +void kvm_set_02_tsc_offset(struct kvm_vcpu *vcpu)

I dislike like the "02" nomenclature.  "02" is used specifically to refer to
vmcs02 and vmcb02, whereas these helpers touch KVM's software model, not the CPU
struct.  Can't this simply be "l2"?

> +{
> +	u64 l2_offset = static_call(kvm_x86_get_l2_tsc_offset)(vcpu);
> +	u64 l2_multiplier = static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu);
> +
> +	if (l2_multiplier != kvm_default_tsc_scaling_ratio) {
> +		vcpu->arch.tsc_offset = mul_s64_u64_shr(
> +				(s64) vcpu->arch.l1_tsc_offset,
> +				l2_multiplier,
> +				kvm_tsc_scaling_ratio_frac_bits);
> +	}
> +
> +	vcpu->arch.tsc_offset += l2_offset;
> +}
> +EXPORT_SYMBOL_GPL(kvm_set_02_tsc_offset);
> +
> +void kvm_set_02_tsc_multiplier(struct kvm_vcpu *vcpu)

I normally like splitting patches gratuitously, but in this case I think it would
be better to combine this with the VMX usage in patch 08.  It's impossible to
properly review this patch without looking at its callers.

> +{
> +	u64 l2_multiplier = static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu);

Case in point, calling back into vendor code to get the L2 multiplier is silly,
just have the caller provide it explicitly.

> +	if (l2_multiplier != kvm_default_tsc_scaling_ratio) {

Why does this check against the default ratio instead of L1's ratio?  If L1 is
running a non-default ratio, but L2 is running a default ratio, won't this result
in KVM leaving vcpu->arch.tsc_scaling_ratio at L1's ratio?  Or is there scaling
ratio magic I don't understand (which is likely...)?  If there's magic, can you
add a comment?

Same feedback for the check in the offset version.

> +		vcpu->arch.tsc_scaling_ratio = mul_u64_u64_shr(
> +				vcpu->arch.l1_tsc_scaling_ratio,
> +				l2_multiplier,
> +				kvm_tsc_scaling_ratio_frac_bits);
> +	}
> +}
> +EXPORT_SYMBOL_GPL(kvm_set_02_tsc_multiplier);
> +
>  static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
>  {
>  	vcpu->arch.l1_tsc_offset = offset;
> -- 
> 2.17.1
>
Ilias Stamatis May 19, 2021, 10:15 a.m. UTC | #2
On Tue, 2021-05-18 at 23:21 +0000, Sean Christopherson wrote:
> 
> On Wed, May 12, 2021, Ilias Stamatis wrote:
> > When L2 is entered we need to "merge" the TSC multiplier and TSC offset
> > values of 01 and 12 together.
> > 
> > The merging is done using the following equations:
> >   offset_02 = ((offset_01 * mult_12) >> shift_bits) + offset_12
> >   mult_02 = (mult_01 * mult_12) >> shift_bits
> > 
> > Where shift_bits is kvm_tsc_scaling_ratio_frac_bits.
> > 
> > Signed-off-by: Ilias Stamatis <ilstam@amazon.com>
> > ---
> >  arch/x86/include/asm/kvm_host.h |  2 ++
> >  arch/x86/kvm/x86.c              | 29 +++++++++++++++++++++++++++++
> >  2 files changed, 31 insertions(+)
> > 
> > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> > index 4c4a3fefff57..57a25d8e8b0f 100644
> > --- a/arch/x86/include/asm/kvm_host.h
> > +++ b/arch/x86/include/asm/kvm_host.h
> > @@ -1793,6 +1793,8 @@ static inline bool kvm_is_supported_user_return_msr(u32 msr)
> >  u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
> >  u64 kvm_scale_tsc_l1(struct kvm_vcpu *vcpu, u64 tsc);
> >  u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
> > +void kvm_set_02_tsc_offset(struct kvm_vcpu *vcpu);
> > +void kvm_set_02_tsc_multiplier(struct kvm_vcpu *vcpu);
> > 
> >  unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
> >  bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
> > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > index 84af1af7a2cc..1db6cfc2079f 100644
> > --- a/arch/x86/kvm/x86.c
> > +++ b/arch/x86/kvm/x86.c
> > @@ -2346,6 +2346,35 @@ u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
> >  }
> >  EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
> > 
> > +void kvm_set_02_tsc_offset(struct kvm_vcpu *vcpu)
> 
> I dislike like the "02" nomenclature.  "02" is used specifically to refer to
> vmcs02 and vmcb02, whereas these helpers touch KVM's software model, not the CPU
> struct.  Can't this simply be "l2"?
> 
> > +{
> > +     u64 l2_offset = static_call(kvm_x86_get_l2_tsc_offset)(vcpu);
> > +     u64 l2_multiplier = static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu);
> > +
> > +     if (l2_multiplier != kvm_default_tsc_scaling_ratio) {
> > +             vcpu->arch.tsc_offset = mul_s64_u64_shr(
> > +                             (s64) vcpu->arch.l1_tsc_offset,
> > +                             l2_multiplier,
> > +                             kvm_tsc_scaling_ratio_frac_bits);
> > +     }
> > +
> > +     vcpu->arch.tsc_offset += l2_offset;
> > +}
> > +EXPORT_SYMBOL_GPL(kvm_set_02_tsc_offset);
> > +
> > +void kvm_set_02_tsc_multiplier(struct kvm_vcpu *vcpu)
> 
> I normally like splitting patches gratuitously, but in this case I think it would
> be better to combine this with the VMX usage in patch 08.  It's impossible to
> properly review this patch without looking at its callers.
> 
> > +{
> > +     u64 l2_multiplier = static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu);
> 
> Case in point, calling back into vendor code to get the L2 multiplier is silly,
> just have the caller provide it explicitly.
> 
> > +     if (l2_multiplier != kvm_default_tsc_scaling_ratio) {
> 
> Why does this check against the default ratio instead of L1's ratio?  If L1 is
> running a non-default ratio, but L2 is running a default ratio, won't this result
> in KVM leaving vcpu->arch.tsc_scaling_ratio at L1's ratio?  Or is there scaling
> ratio magic I don't understand (which is likely...)?  If there's magic, can you
> add a comment?
> 

Think of the "default ratio" as a ratio of 1, ie L2 is not scaled (from L1's
perspective). So yes, as you say if L1 is running at a non-default ratio, but
L2 is running at default ratio (not scaled), this results in KVM leaving 
arch.tsc_scaling_ratio at L1's ratio (as it should). 

I am not sure a comment is needed here. 

Having said that, theoretically we could omit this check completely and still
get the correct result. But in reality because of the computer math involved
there will be a small precision error and the tsc_scaling_ratio ratio won't
end up being exactly the same as the l1_tsc_scaling_ratio. 

I will implement the rest of your feedback, thanks.

> 
> Same feedback for the check in the offset version.
> 
> > +             vcpu->arch.tsc_scaling_ratio = mul_u64_u64_shr(
> > +                             vcpu->arch.l1_tsc_scaling_ratio,
> > +                             l2_multiplier,
> > +                             kvm_tsc_scaling_ratio_frac_bits);
> > +     }
> > +}
> > +EXPORT_SYMBOL_GPL(kvm_set_02_tsc_multiplier);
> > +
> >  static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
> >  {
> >       vcpu->arch.l1_tsc_offset = offset;
> > --
> > 2.17.1
> >
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4c4a3fefff57..57a25d8e8b0f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1793,6 +1793,8 @@  static inline bool kvm_is_supported_user_return_msr(u32 msr)
 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
 u64 kvm_scale_tsc_l1(struct kvm_vcpu *vcpu, u64 tsc);
 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
+void kvm_set_02_tsc_offset(struct kvm_vcpu *vcpu);
+void kvm_set_02_tsc_multiplier(struct kvm_vcpu *vcpu);
 
 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 84af1af7a2cc..1db6cfc2079f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2346,6 +2346,35 @@  u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
 }
 EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
 
+void kvm_set_02_tsc_offset(struct kvm_vcpu *vcpu)
+{
+	u64 l2_offset = static_call(kvm_x86_get_l2_tsc_offset)(vcpu);
+	u64 l2_multiplier = static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu);
+
+	if (l2_multiplier != kvm_default_tsc_scaling_ratio) {
+		vcpu->arch.tsc_offset = mul_s64_u64_shr(
+				(s64) vcpu->arch.l1_tsc_offset,
+				l2_multiplier,
+				kvm_tsc_scaling_ratio_frac_bits);
+	}
+
+	vcpu->arch.tsc_offset += l2_offset;
+}
+EXPORT_SYMBOL_GPL(kvm_set_02_tsc_offset);
+
+void kvm_set_02_tsc_multiplier(struct kvm_vcpu *vcpu)
+{
+	u64 l2_multiplier = static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu);
+
+	if (l2_multiplier != kvm_default_tsc_scaling_ratio) {
+		vcpu->arch.tsc_scaling_ratio = mul_u64_u64_shr(
+				vcpu->arch.l1_tsc_scaling_ratio,
+				l2_multiplier,
+				kvm_tsc_scaling_ratio_frac_bits);
+	}
+}
+EXPORT_SYMBOL_GPL(kvm_set_02_tsc_multiplier);
+
 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 {
 	vcpu->arch.l1_tsc_offset = offset;