@@ -514,6 +514,8 @@ struct kvm_x86_ops {
void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
+ void (*execute_wbinvd)(struct kvm_vcpu *vcpu);
+
const struct trace_print_flags *exit_reasons_str;
};
@@ -571,6 +573,7 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
int emulate_clts(struct kvm_vcpu *vcpu);
+int emulate_wbinvd(struct kvm_vcpu *vcpu);
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
@@ -3138,8 +3138,11 @@ twobyte_insn:
emulate_clts(ctxt->vcpu);
c->dst.type = OP_NONE;
break;
- case 0x08: /* invd */
case 0x09: /* wbinvd */
+ emulate_wbinvd(ctxt->vcpu);
+ c->dst.type = OP_NONE;
+ break;
+ case 0x08: /* invd */
case 0x0d: /* GrpP (prefetch) */
case 0x18: /* Grp16 (prefetch/nop) */
c->dst.type = OP_NONE;
@@ -3424,6 +3424,10 @@ static bool svm_rdtscp_supported(void)
return false;
}
+static void svm_execute_wbinvd(struct kvm_vcpu *vcpu)
+{
+}
+
static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -3508,6 +3512,8 @@ static struct kvm_x86_ops svm_x86_ops = {
.rdtscp_supported = svm_rdtscp_supported,
.set_supported_cpuid = svm_set_supported_cpuid,
+
+ .execute_wbinvd = svm_execute_wbinvd,
};
static int __init svm_init(void)
@@ -412,6 +412,12 @@ static inline bool cpu_has_virtual_nmis(void)
return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
}
+static inline bool cpu_has_wbinvd_exit(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_WBINVD_EXITING;
+}
+
static inline bool report_flexpriority(void)
{
return flexpriority_enabled;
@@ -874,6 +880,11 @@ static void vmx_load_host_state(struct vcpu_vmx *vmx)
preempt_enable();
}
+static void wbinvd_ipi(void *opaque)
+{
+ wbinvd();
+}
+
/*
* Switches to specified vcpu, until a matching vcpu_put(), but assumes
* vcpu mutex is already taken.
@@ -905,6 +916,12 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
&per_cpu(vcpus_on_cpu, cpu));
local_irq_enable();
+ /* Issue WBINVD in case guest has executed it */
+ if (!cpu_has_wbinvd_exit() && vcpu->kvm->arch.iommu_domain &&
+ vcpu->cpu != -1)
+ smp_call_function_single(vcpu->cpu,
+ wbinvd_ipi, NULL, 1);
+
vcpu->cpu = cpu;
/*
* Linux uses per-cpu TSS and GDT, so set these when switching
@@ -3397,10 +3414,16 @@ static int handle_invlpg(struct kvm_vcpu *vcpu)
return 1;
}
+static void vmx_execute_wbinvd(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->kvm->arch.iommu_domain)
+ smp_call_function(wbinvd_ipi, NULL, 1);
+}
+
static int handle_wbinvd(struct kvm_vcpu *vcpu)
{
skip_emulated_instruction(vcpu);
- /* TODO: Add support for VT-d/pass-through device */
+ vmx_execute_wbinvd(vcpu);
return 1;
}
@@ -4350,6 +4373,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.rdtscp_supported = vmx_rdtscp_supported,
.set_supported_cpuid = vmx_set_supported_cpuid,
+
+ .execute_wbinvd = vmx_execute_wbinvd,
};
static int __init vmx_init(void)
@@ -3650,6 +3650,12 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
return X86EMUL_CONTINUE;
}
+int emulate_wbinvd(struct kvm_vcpu *vcpu)
+{
+ kvm_x86_ops->execute_wbinvd(vcpu);
+ return X86EMUL_CONTINUE;
+}
+
int emulate_clts(struct kvm_vcpu *vcpu)
{
kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));