@@ -4251,7 +4251,9 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
svm_set_dr6(vcpu, DR6_ACTIVE_LOW);
clgi();
- kvm_load_guest_xsave_state(vcpu);
+
+ if (!vcpu->arch.guest_state_protected)
+ kvm_load_guest_xsave_state(vcpu);
kvm_wait_lapic_expire(vcpu);
@@ -4280,7 +4282,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
- kvm_load_host_xsave_state(vcpu);
+ if (!vcpu->arch.guest_state_protected)
+ kvm_load_host_xsave_state(vcpu);
stgi();
/* Any pending NMI will happen here */
@@ -1182,11 +1182,10 @@ EXPORT_SYMBOL_GPL(kvm_lmsw);
void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.guest_state_protected)
+ if (WARN_ON_ONCE(vcpu->arch.guest_state_protected))
return;
if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
-
if (vcpu->arch.xcr0 != kvm_host.xcr0)
xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
@@ -1205,7 +1204,7 @@ EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state);
void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.guest_state_protected)
+ if (WARN_ON_ONCE(vcpu->arch.guest_state_protected))
return;
if (cpu_feature_enabled(X86_FEATURE_PKU) &&
kvm_load_host_xsave_state() uses guest save state that is not accessible when guest_state_protected is true. Forbid access to it. For consistency, do the same for kvm_load_guest_xsave_state(). Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- arch/x86/kvm/svm/svm.c | 7 +++++-- arch/x86/kvm/x86.c | 5 ++--- 2 files changed, 7 insertions(+), 5 deletions(-)