@@ -2,6 +2,8 @@
#include <linux/cleanup.h>
#include <linux/cpu.h>
#include <linux/mmu_context.h>
+
+#include <asm/fpu/xcr.h>
#include <asm/tdx.h>
#include "capabilities.h"
#include "mmu.h"
@@ -709,6 +711,24 @@ void tdx_vcpu_free(struct kvm_vcpu *vcpu)
}
+static void tdx_restore_host_xsave_state(struct kvm_vcpu *vcpu)
+{
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
+
+ if (static_cpu_has(X86_FEATURE_XSAVE) &&
+ kvm_host.xcr0 != (kvm_tdx->xfam & kvm_caps.supported_xcr0))
+ xsetbv(XCR_XFEATURE_ENABLED_MASK, kvm_host.xcr0);
+ if (static_cpu_has(X86_FEATURE_XSAVES) &&
+ /* PT can be exposed to TD guest regardless of KVM's XSS support */
+ kvm_host.xss != (kvm_tdx->xfam &
+ (kvm_caps.supported_xss | XFEATURE_MASK_PT |
+ XFEATURE_MASK_CET_USER | XFEATURE_MASK_CET_KERNEL)))
+ wrmsrl(MSR_IA32_XSS, kvm_host.xss);
+ if (static_cpu_has(X86_FEATURE_PKU) &&
+ (kvm_tdx->xfam & XFEATURE_MASK_PKRU))
+ write_pkru(vcpu->arch.host_pkru);
+}
+
static void tdx_vcpu_enter_exit(struct kvm_vcpu *vcpu)
{
struct vcpu_tdx *tdx = to_tdx(vcpu);
@@ -776,6 +796,7 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
tdx_vcpu_enter_exit(vcpu);
+ tdx_restore_host_xsave_state(vcpu);
tdx->host_state_need_restore = true;
vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;