@@ -302,6 +302,7 @@ struct kvm_vcpu_arch {
} update_pte;
struct fpu guest_fpu;
+ u64 xcr0;
gva_t mmio_fault_cr2;
struct kvm_pio_request pio;
@@ -266,6 +266,7 @@ enum vmcs_field {
#define EXIT_REASON_EPT_VIOLATION 48
#define EXIT_REASON_EPT_MISCONFIG 49
#define EXIT_REASON_WBINVD 54
+#define EXIT_REASON_XSETBV 55
/*
* Interruption-information format
@@ -36,6 +36,8 @@
#include <asm/vmx.h>
#include <asm/virtext.h>
#include <asm/mce.h>
+#include <asm/i387.h>
+#include <asm/xcr.h>
#include "trace.h"
@@ -247,6 +249,9 @@ static const u32 vmx_msr_index[] = {
};
#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
+#define MERGE_TO_U64(low, high) \
+ (((low) & -1u) | ((u64)((high) & -1u) << 32))
+
static inline bool is_page_fault(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
@@ -3354,6 +3359,28 @@ static int handle_wbinvd(struct kvm_vcpu *vcpu)
return 1;
}
+static int handle_xsetbv(struct kvm_vcpu *vcpu)
+{
+ u64 new_bv = MERGE_TO_U64(kvm_register_read(vcpu, VCPU_REGS_RAX),
+ kvm_register_read(vcpu, VCPU_REGS_RDX));
+
+ if (kvm_register_read(vcpu, VCPU_REGS_RCX) != 0)
+ goto err;
+ if (vmx_get_cpl(vcpu) != 0)
+ goto err;
+ if (!(new_bv & XSTATE_FP))
+ goto err;
+ if ((new_bv & XSTATE_YMM) && !(new_bv & XSTATE_SSE))
+ goto err;
+ vcpu->arch.xcr0 = new_bv;
+ xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
+ skip_emulated_instruction(vcpu);
+ return 1;
+err:
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+}
+
static int handle_apic_access(struct kvm_vcpu *vcpu)
{
return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE;
@@ -3632,6 +3659,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
[EXIT_REASON_APIC_ACCESS] = handle_apic_access,
[EXIT_REASON_WBINVD] = handle_wbinvd,
+ [EXIT_REASON_XSETBV] = handle_xsetbv,
[EXIT_REASON_TASK_SWITCH] = handle_task_switch,
[EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
[EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
@@ -64,6 +64,7 @@
(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
| X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
| X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
+ | X86_CR4_OSXSAVE \
| X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
@@ -149,6 +150,13 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ NULL }
};
+static u64 host_xcr0;
+
+static inline u32 bit(int bitno)
+{
+ return 1 << (bitno & 31);
+}
+
static void kvm_on_user_return(struct user_return_notifier *urn)
{
unsigned slot;
@@ -473,6 +481,30 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
}
EXPORT_SYMBOL_GPL(kvm_lmsw);
+static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 1, 0);
+ return best && (best->ecx & bit(X86_FEATURE_XSAVE));
+}
+
+static void update_cpuid(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 1, 0);
+ if (!best)
+ return;
+
+ /* Update OSXSAVE bit */
+ if (cpu_has_xsave && best->function == 0x1) {
+ best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
+ if (kvm_read_cr4(vcpu) & X86_CR4_OSXSAVE)
+ best->ecx |= bit(X86_FEATURE_OSXSAVE);
+ }
+}
+
int __kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long old_cr4 = kvm_read_cr4(vcpu);
@@ -481,6 +513,9 @@ int __kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if (cr4 & CR4_RESERVED_BITS)
return 1;
+ if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
+ return 1;
+
if (is_long_mode(vcpu)) {
if (!(cr4 & X86_CR4_PAE))
return 1;
@@ -497,6 +532,9 @@ int __kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if ((cr4 ^ old_cr4) & pdptr_bits)
kvm_mmu_reset_context(vcpu);
+ if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
+ update_cpuid(vcpu);
+
return 0;
}
@@ -665,11 +703,6 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
}
EXPORT_SYMBOL_GPL(kvm_get_dr);
-static inline u32 bit(int bitno)
-{
- return 1 << (bitno & 31);
-}
-
/*
* List of msr numbers which we expose to userspace through KVM_GET_MSRS
* and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
@@ -1916,7 +1949,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
0 /* Reserved, DCA */ | F(XMM4_1) |
F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
- 0 /* Reserved, XSAVE, OSXSAVE */;
+ 0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */;
/* cpuid 0x80000001.ecx */
const u32 kvm_supported_word6_x86_features =
F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
@@ -1931,7 +1964,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
switch (function) {
case 0:
- entry->eax = min(entry->eax, (u32)0xb);
+ entry->eax = min(entry->eax, (u32)0xd);
break;
case 1:
entry->edx &= kvm_supported_word0_x86_features;
@@ -1989,6 +2022,20 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
}
break;
}
+ case 0xd: {
+ int i;
+
+ entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ for (i = 1; *nent < maxnent; ++i) {
+ if (entry[i - 1].eax == 0 && i != 2)
+ break;
+ do_cpuid_1_ent(&entry[i], function, i);
+ entry[i].flags |=
+ KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ ++*nent;
+ }
+ break;
+ }
case KVM_CPUID_SIGNATURE: {
char signature[12] = "KVMKVMKVM\0\0";
u32 *sigptr = (u32 *)signature;
@@ -4124,6 +4171,8 @@ int kvm_arch_init(void *opaque)
perf_register_guest_info_callbacks(&kvm_guest_cbs);
+ host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
+
return 0;
out:
@@ -4567,6 +4616,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_x86_ops->prepare_guest_switch(vcpu);
if (vcpu->fpu_active)
kvm_load_guest_fpu(vcpu);
+ if (kvm_read_cr4(vcpu) & X86_CR4_OSXSAVE)
+ xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
atomic_set(&vcpu->guest_mode, 1);
smp_wmb();
@@ -5118,6 +5169,10 @@ void fx_init(struct kvm_vcpu *vcpu)
fpu_alloc(&vcpu->arch.guest_fpu);
fpu_finit(&vcpu->arch.guest_fpu);
+ /* Ensure guest xcr0 is valid for loading */
+ if (cpu_has_xsave)
+ vcpu->arch.xcr0 = XSTATE_FP;
+
vcpu->arch.cr0 |= X86_CR0_ET;
}
EXPORT_SYMBOL_GPL(fx_init);
@@ -5127,6 +5182,14 @@ static void fx_free(struct kvm_vcpu *vcpu)
fpu_free(&vcpu->arch.guest_fpu);
}
+static u64 cpuid_get_possible_xcr0(struct kvm_vcpu *vcpu)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ cpuid_count(0xd, 0, &eax, &ebx, &ecx, &edx);
+ return eax + ((u64)edx << 32);
+}
+
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
{
if (vcpu->guest_fpu_loaded)
@@ -5134,6 +5197,10 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
vcpu->guest_fpu_loaded = 1;
unlazy_fpu(current);
+ /* Restore all possible states in the guest */
+ if (cpu_has_xsave && guest_cpuid_has_xsave(vcpu))
+ xsetbv(XCR_XFEATURE_ENABLED_MASK,
+ cpuid_get_possible_xcr0(vcpu));
fpu_restore_checking(&vcpu->arch.guest_fpu);
trace_kvm_fpu(1);
}
@@ -5144,7 +5211,14 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
return;
vcpu->guest_fpu_loaded = 0;
+ /* Save all possible states in the guest */
+ if (cpu_has_xsave && guest_cpuid_has_xsave(vcpu))
+ xsetbv(XCR_XFEATURE_ENABLED_MASK,
+ cpuid_get_possible_xcr0(vcpu));
fpu_save_init(&vcpu->arch.guest_fpu);
+ if (cpu_has_xsave)
+ xsetbv(XCR_XFEATURE_ENABLED_MASK,
+ host_xcr0);
++vcpu->stat.fpu_reload;
set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests);
trace_kvm_fpu(0);