@@ -365,6 +365,7 @@ struct kvm_vcpu_arch {
unsigned long singlestep_rip;
u32 __user *apf_data;
+ bool apf_send_user_only;
u32 apf_memslot_ver;
u64 apf_msr_val;
u32 async_pf_id;
@@ -24,6 +24,7 @@
#define KVM_MAX_MMU_OP_BATCH 32
#define KVM_ASYNC_PF_ENABLED (1 << 0)
+#define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
/* Operations for KVM_HC_MMU_OP */
#define KVM_MMU_OP_WRITE_PTE 1
@@ -480,6 +480,9 @@ void __cpuinit kvm_guest_cpu_init(void)
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF)) {
u64 pa = __pa(&__get_cpu_var(apf_reason));
+#ifdef CONFIG_PREEMPT
+ pa |= KVM_ASYNC_PF_SEND_ALWAYS;
+#endif
if (native_write_msr_safe(MSR_KVM_ASYNC_PF_EN,
pa | KVM_ASYNC_PF_ENABLED, pa >> 32))
return;
@@ -1009,8 +1009,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
int offset = offset_in_page(gpa);
unsigned long addr;
- /* Bits 1:5 are resrved, Should be zero */
- if (data & 0x3e)
+ /* Bits 2:5 are resrved, Should be zero */
+ if (data & 0x3c)
return 1;
vcpu->arch.apf_msr_val = data;
@@ -1032,6 +1032,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
return 1;
}
vcpu->arch.apf_memslot_ver = vcpu->kvm->memslot_version;
+ vcpu->arch.apf_send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
kvm_async_pf_wakeup_all(vcpu);
return 0;
}