@@ -27,6 +27,7 @@ KVM_X86_PMU_OP_OPTIONAL(cleanup)
KVM_X86_PMU_OP_OPTIONAL(passthrough_pmu_msrs)
KVM_X86_PMU_OP_OPTIONAL(save_pmu_context)
KVM_X86_PMU_OP_OPTIONAL(restore_pmu_context)
+KVM_X86_PMU_OP_OPTIONAL(incr_counter)
#undef KVM_X86_PMU_OP
#undef KVM_X86_PMU_OP_OPTIONAL
@@ -44,6 +44,7 @@ struct kvm_pmu_ops {
void (*passthrough_pmu_msrs)(struct kvm_vcpu *vcpu);
void (*save_pmu_context)(struct kvm_vcpu *vcpu);
void (*restore_pmu_context)(struct kvm_vcpu *vcpu);
+ bool (*incr_counter)(struct kvm_pmc *pmc);
const u64 EVENTSEL_EVENT;
const int MAX_NR_GP_COUNTERS;
@@ -74,6 +74,17 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
}
}
+static bool intel_incr_counter(struct kvm_pmc *pmc)
+{
+ pmc->counter += 1;
+ pmc->counter &= pmc_bitmask(pmc);
+
+ if (!pmc->counter)
+ return true;
+
+ return false;
+}
+
static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
unsigned int idx, u64 *mask)
{
@@ -885,6 +896,7 @@ struct kvm_pmu_ops intel_pmu_ops __initdata = {
.passthrough_pmu_msrs = intel_passthrough_pmu_msrs,
.save_pmu_context = intel_save_guest_pmu_context,
.restore_pmu_context = intel_restore_guest_pmu_context,
+ .incr_counter = intel_incr_counter,
.EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT,
.MAX_NR_GP_COUNTERS = KVM_INTEL_PMC_MAX_GENERIC,
.MIN_NR_GP_COUNTERS = 1,