@@ -856,6 +856,12 @@ struct kvm_vcpu_arch {
u64 msr_kvm_poll_control;
+ struct {
+ u64 msr_val;
+ bool preempt_count_enabled;
+ struct gfn_to_hva_cache preempt_count_cache;
+ } pv_pc;
+
/*
* Indicates the guest is trying to write a gfn that contains one or
* more of the PTEs used to translate the write itself, i.e. the access
@@ -36,6 +36,7 @@
#define KVM_FEATURE_MSI_EXT_DEST_ID 15
#define KVM_FEATURE_HC_MAP_GPA_RANGE 16
#define KVM_FEATURE_MIGRATION_CONTROL 17
+#define KVM_FEATURE_PREEMPT_COUNT 18
#define KVM_HINTS_REALTIME 0
@@ -58,6 +59,7 @@
#define MSR_KVM_ASYNC_PF_INT 0x4b564d06
#define MSR_KVM_ASYNC_PF_ACK 0x4b564d07
#define MSR_KVM_MIGRATION_CONTROL 0x4b564d08
+#define MSR_KVM_PREEMPT_COUNT 0x4b564d09
struct kvm_steal_time {
__u64 steal;
@@ -1456,6 +1456,7 @@ static const u32 emulated_msrs_all[] = {
MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK,
+ MSR_KVM_PREEMPT_COUNT,
MSR_IA32_TSC_ADJUST,
MSR_IA32_TSC_DEADLINE,
@@ -3433,6 +3434,25 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
}
+static int kvm_pv_enable_preempt_count(struct kvm_vcpu *vcpu, u64 data)
+{
+ u64 addr = data & ~KVM_MSR_ENABLED;
+ struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_pc.preempt_count_cache;
+
+ vcpu->arch.pv_pc.preempt_count_enabled = false;
+ vcpu->arch.pv_pc.msr_val = data;
+
+ if (!(data & KVM_MSR_ENABLED))
+ return 0;
+
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, sizeof(int)))
+ return 1;
+
+ vcpu->arch.pv_pc.preempt_count_enabled = true;
+
+ return 0;
+}
+
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
bool pr = false;
@@ -3652,6 +3672,14 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu->arch.msr_kvm_poll_control = data;
break;
+ case MSR_KVM_PREEMPT_COUNT:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_PREEMPT_COUNT))
+ return 1;
+
+ if (kvm_pv_enable_preempt_count(vcpu, data))
+ return 1;
+ break;
+
case MSR_IA32_MCG_CTL:
case MSR_IA32_MCG_STATUS:
case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
@@ -3992,6 +4020,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vcpu->arch.msr_kvm_poll_control;
break;
+ case MSR_KVM_PREEMPT_COUNT:
+ if (!guest_pv_has(vcpu, KVM_FEATURE_PREEMPT_COUNT))
+ return 1;
+
+ msr_info->data = vcpu->arch.pv_pc.msr_val;
+ break;
case MSR_IA32_P5_MC_ADDR:
case MSR_IA32_P5_MC_TYPE:
case MSR_IA32_MCG_CAP:
@@ -11190,6 +11224,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.pending_external_vector = -1;
vcpu->arch.preempted_in_kernel = false;
+ vcpu->arch.pv_pc.preempt_count_enabled = false;
#if IS_ENABLED(CONFIG_HYPERV)
vcpu->arch.hv_root_tdp = INVALID_PAGE;