diff mbox series

[v19,083/130] KVM: TDX: Add TSX_CTRL msr into uret_msrs list

Message ID 06135e0897ae90c3dc7fd608948f8bdcd30a17ae.1708933498.git.isaku.yamahata@intel.com (mailing list archive)
State New, archived
Headers show
Series [v19,001/130] x86/virt/tdx: Rename _offset to _member for TD_SYSINFO_MAP() macro | expand

Commit Message

Isaku Yamahata Feb. 26, 2024, 8:26 a.m. UTC
From: Yang Weijiang <weijiang.yang@intel.com>

TDX module resets the TSX_CTRL MSR to 0 at TD exit if TSX is enabled for
TD. Or it preserves the TSX_CTRL MSR if TSX is disabled for TD.  VMM can
rely on uret_msrs mechanism to defer the reload of host value until exiting
to user space.

Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
---
v19:
- fix the type of tdx_uret_tsx_ctrl_slot. unguent int => int.
---
 arch/x86/kvm/vmx/tdx.c | 33 +++++++++++++++++++++++++++++++--
 arch/x86/kvm/vmx/tdx.h |  8 ++++++++
 2 files changed, 39 insertions(+), 2 deletions(-)

Comments

Binbin Wu April 7, 2024, 7:05 a.m. UTC | #1
On 2/26/2024 4:26 PM, isaku.yamahata@intel.com wrote:
> From: Yang Weijiang <weijiang.yang@intel.com>
>
> TDX module resets the TSX_CTRL MSR to 0 at TD exit if TSX is enabled for
> TD. Or it preserves the TSX_CTRL MSR if TSX is disabled for TD.  VMM can
> rely on uret_msrs mechanism to defer the reload of host value until exiting
> to user space.
>
> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
> Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
> ---
> v19:
> - fix the type of tdx_uret_tsx_ctrl_slot. unguent int => int.
> ---
>   arch/x86/kvm/vmx/tdx.c | 33 +++++++++++++++++++++++++++++++--
>   arch/x86/kvm/vmx/tdx.h |  8 ++++++++
>   2 files changed, 39 insertions(+), 2 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> index 7e2b1e554246..83dcaf5b6fbd 100644
> --- a/arch/x86/kvm/vmx/tdx.c
> +++ b/arch/x86/kvm/vmx/tdx.c
> @@ -547,14 +547,21 @@ static struct tdx_uret_msr tdx_uret_msrs[] = {
>   	{.msr = MSR_LSTAR,},
>   	{.msr = MSR_TSC_AUX,},
>   };
> +static int tdx_uret_tsx_ctrl_slot;
>   
> -static void tdx_user_return_update_cache(void)
> +static void tdx_user_return_update_cache(struct kvm_vcpu *vcpu)
>   {
>   	int i;
>   
>   	for (i = 0; i < ARRAY_SIZE(tdx_uret_msrs); i++)
>   		kvm_user_return_update_cache(tdx_uret_msrs[i].slot,
>   					     tdx_uret_msrs[i].defval);
> +	/*
> +	 * TSX_CTRL is reset to 0 if guest TSX is supported. Otherwise
> +	 * preserved.
> +	 */
> +	if (to_kvm_tdx(vcpu->kvm)->tsx_supported && tdx_uret_tsx_ctrl_slot != -1)

If to_kvm_tdx(vcpu->kvm)->tsx_supported is true, tdx_uret_tsx_ctrl_slot 
shouldn't be -1 at this point.
Otherwise, it's a KVM bug, right?
Not sure if it needs a warning if tdx_uret_tsx_ctrl_slot is -1, or just 
remove the check?

> +		kvm_user_return_update_cache(tdx_uret_tsx_ctrl_slot, 0);
>   }
>   
>   static void tdx_restore_host_xsave_state(struct kvm_vcpu *vcpu)
> @@ -649,7 +656,7 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu)
>   
>   	tdx_vcpu_enter_exit(tdx);
>   
> -	tdx_user_return_update_cache();
> +	tdx_user_return_update_cache(vcpu);
>   	tdx_restore_host_xsave_state(vcpu);
>   	tdx->host_state_need_restore = true;
>   
> @@ -1167,6 +1174,22 @@ static int setup_tdparams_xfam(struct kvm_cpuid2 *cpuid, struct td_params *td_pa
>   	return 0;
>   }
>   
> +static bool tdparams_tsx_supported(struct kvm_cpuid2 *cpuid)
> +{
> +	const struct kvm_cpuid_entry2 *entry;
> +	u64 mask;
> +	u32 ebx;
> +
> +	entry = kvm_find_cpuid_entry2(cpuid->entries, cpuid->nent, 0x7, 0);
> +	if (entry)
> +		ebx = entry->ebx;
> +	else
> +		ebx = 0;
> +
> +	mask = __feature_bit(X86_FEATURE_HLE) | __feature_bit(X86_FEATURE_RTM);
> +	return ebx & mask;
> +}
> +
>   static int setup_tdparams(struct kvm *kvm, struct td_params *td_params,
>   			struct kvm_tdx_init_vm *init_vm)
>   {
> @@ -1209,6 +1232,7 @@ static int setup_tdparams(struct kvm *kvm, struct td_params *td_params,
>   	MEMCPY_SAME_SIZE(td_params->mrowner, init_vm->mrowner);
>   	MEMCPY_SAME_SIZE(td_params->mrownerconfig, init_vm->mrownerconfig);
>   
> +	to_kvm_tdx(kvm)->tsx_supported = tdparams_tsx_supported(cpuid);
>   	return 0;
>   }
>   
> @@ -2014,6 +2038,11 @@ int __init tdx_hardware_setup(struct kvm_x86_ops *x86_ops)
>   			return -EIO;
>   		}
>   	}
> +	tdx_uret_tsx_ctrl_slot = kvm_find_user_return_msr(MSR_IA32_TSX_CTRL);
> +	if (tdx_uret_tsx_ctrl_slot == -1 && boot_cpu_has(X86_FEATURE_MSR_TSX_CTRL)) {
> +		pr_err("MSR_IA32_TSX_CTRL isn't included by kvm_find_user_return_msr\n");
> +		return -EIO;
> +	}
>   
>   	max_pkgs = topology_max_packages();
>   	tdx_mng_key_config_lock = kcalloc(max_pkgs, sizeof(*tdx_mng_key_config_lock),
> diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
> index e96c416e73bf..44eab734e702 100644
> --- a/arch/x86/kvm/vmx/tdx.h
> +++ b/arch/x86/kvm/vmx/tdx.h
> @@ -17,6 +17,14 @@ struct kvm_tdx {
>   	u64 xfam;
>   	int hkid;
>   
> +	/*
> +	 * Used on each TD-exit, see tdx_user_return_update_cache().
> +	 * TSX_CTRL value on TD exit
> +	 * - set 0     if guest TSX enabled
> +	 * - preserved if guest TSX disabled
> +	 */
> +	bool tsx_supported;
> +
>   	bool finalized;
>   	atomic_t tdh_mem_track;
>
Isaku Yamahata April 12, 2024, 8:35 p.m. UTC | #2
On Sun, Apr 07, 2024 at 03:05:21PM +0800,
Binbin Wu <binbin.wu@linux.intel.com> wrote:

> 
> 
> On 2/26/2024 4:26 PM, isaku.yamahata@intel.com wrote:
> > From: Yang Weijiang <weijiang.yang@intel.com>
> > 
> > TDX module resets the TSX_CTRL MSR to 0 at TD exit if TSX is enabled for
> > TD. Or it preserves the TSX_CTRL MSR if TSX is disabled for TD.  VMM can
> > rely on uret_msrs mechanism to defer the reload of host value until exiting
> > to user space.
> > 
> > Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
> > Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
> > ---
> > v19:
> > - fix the type of tdx_uret_tsx_ctrl_slot. unguent int => int.
> > ---
> >   arch/x86/kvm/vmx/tdx.c | 33 +++++++++++++++++++++++++++++++--
> >   arch/x86/kvm/vmx/tdx.h |  8 ++++++++
> >   2 files changed, 39 insertions(+), 2 deletions(-)
> > 
> > diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> > index 7e2b1e554246..83dcaf5b6fbd 100644
> > --- a/arch/x86/kvm/vmx/tdx.c
> > +++ b/arch/x86/kvm/vmx/tdx.c
> > @@ -547,14 +547,21 @@ static struct tdx_uret_msr tdx_uret_msrs[] = {
> >   	{.msr = MSR_LSTAR,},
> >   	{.msr = MSR_TSC_AUX,},
> >   };
> > +static int tdx_uret_tsx_ctrl_slot;
> > -static void tdx_user_return_update_cache(void)
> > +static void tdx_user_return_update_cache(struct kvm_vcpu *vcpu)
> >   {
> >   	int i;
> >   	for (i = 0; i < ARRAY_SIZE(tdx_uret_msrs); i++)
> >   		kvm_user_return_update_cache(tdx_uret_msrs[i].slot,
> >   					     tdx_uret_msrs[i].defval);
> > +	/*
> > +	 * TSX_CTRL is reset to 0 if guest TSX is supported. Otherwise
> > +	 * preserved.
> > +	 */
> > +	if (to_kvm_tdx(vcpu->kvm)->tsx_supported && tdx_uret_tsx_ctrl_slot != -1)
> 
> If to_kvm_tdx(vcpu->kvm)->tsx_supported is true, tdx_uret_tsx_ctrl_slot
> shouldn't be -1 at this point.
> Otherwise, it's a KVM bug, right?
> Not sure if it needs a warning if tdx_uret_tsx_ctrl_slot is -1, or just
> remove the check?

You're right. Let me remove the != -1 check.
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 7e2b1e554246..83dcaf5b6fbd 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -547,14 +547,21 @@  static struct tdx_uret_msr tdx_uret_msrs[] = {
 	{.msr = MSR_LSTAR,},
 	{.msr = MSR_TSC_AUX,},
 };
+static int tdx_uret_tsx_ctrl_slot;
 
-static void tdx_user_return_update_cache(void)
+static void tdx_user_return_update_cache(struct kvm_vcpu *vcpu)
 {
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(tdx_uret_msrs); i++)
 		kvm_user_return_update_cache(tdx_uret_msrs[i].slot,
 					     tdx_uret_msrs[i].defval);
+	/*
+	 * TSX_CTRL is reset to 0 if guest TSX is supported. Otherwise
+	 * preserved.
+	 */
+	if (to_kvm_tdx(vcpu->kvm)->tsx_supported && tdx_uret_tsx_ctrl_slot != -1)
+		kvm_user_return_update_cache(tdx_uret_tsx_ctrl_slot, 0);
 }
 
 static void tdx_restore_host_xsave_state(struct kvm_vcpu *vcpu)
@@ -649,7 +656,7 @@  fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu)
 
 	tdx_vcpu_enter_exit(tdx);
 
-	tdx_user_return_update_cache();
+	tdx_user_return_update_cache(vcpu);
 	tdx_restore_host_xsave_state(vcpu);
 	tdx->host_state_need_restore = true;
 
@@ -1167,6 +1174,22 @@  static int setup_tdparams_xfam(struct kvm_cpuid2 *cpuid, struct td_params *td_pa
 	return 0;
 }
 
+static bool tdparams_tsx_supported(struct kvm_cpuid2 *cpuid)
+{
+	const struct kvm_cpuid_entry2 *entry;
+	u64 mask;
+	u32 ebx;
+
+	entry = kvm_find_cpuid_entry2(cpuid->entries, cpuid->nent, 0x7, 0);
+	if (entry)
+		ebx = entry->ebx;
+	else
+		ebx = 0;
+
+	mask = __feature_bit(X86_FEATURE_HLE) | __feature_bit(X86_FEATURE_RTM);
+	return ebx & mask;
+}
+
 static int setup_tdparams(struct kvm *kvm, struct td_params *td_params,
 			struct kvm_tdx_init_vm *init_vm)
 {
@@ -1209,6 +1232,7 @@  static int setup_tdparams(struct kvm *kvm, struct td_params *td_params,
 	MEMCPY_SAME_SIZE(td_params->mrowner, init_vm->mrowner);
 	MEMCPY_SAME_SIZE(td_params->mrownerconfig, init_vm->mrownerconfig);
 
+	to_kvm_tdx(kvm)->tsx_supported = tdparams_tsx_supported(cpuid);
 	return 0;
 }
 
@@ -2014,6 +2038,11 @@  int __init tdx_hardware_setup(struct kvm_x86_ops *x86_ops)
 			return -EIO;
 		}
 	}
+	tdx_uret_tsx_ctrl_slot = kvm_find_user_return_msr(MSR_IA32_TSX_CTRL);
+	if (tdx_uret_tsx_ctrl_slot == -1 && boot_cpu_has(X86_FEATURE_MSR_TSX_CTRL)) {
+		pr_err("MSR_IA32_TSX_CTRL isn't included by kvm_find_user_return_msr\n");
+		return -EIO;
+	}
 
 	max_pkgs = topology_max_packages();
 	tdx_mng_key_config_lock = kcalloc(max_pkgs, sizeof(*tdx_mng_key_config_lock),
diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
index e96c416e73bf..44eab734e702 100644
--- a/arch/x86/kvm/vmx/tdx.h
+++ b/arch/x86/kvm/vmx/tdx.h
@@ -17,6 +17,14 @@  struct kvm_tdx {
 	u64 xfam;
 	int hkid;
 
+	/*
+	 * Used on each TD-exit, see tdx_user_return_update_cache().
+	 * TSX_CTRL value on TD exit
+	 * - set 0     if guest TSX enabled
+	 * - preserved if guest TSX disabled
+	 */
+	bool tsx_supported;
+
 	bool finalized;
 	atomic_t tdh_mem_track;