@@ -28,6 +28,7 @@ KVM_X86_PMU_OP_OPTIONAL(passthrough_pmu_msrs)
KVM_X86_PMU_OP_OPTIONAL(save_pmu_context)
KVM_X86_PMU_OP_OPTIONAL(restore_pmu_context)
KVM_X86_PMU_OP_OPTIONAL(incr_counter)
+KVM_X86_PMU_OP_OPTIONAL(set_overflow)
#undef KVM_X86_PMU_OP
#undef KVM_X86_PMU_OP_OPTIONAL
@@ -45,6 +45,7 @@ struct kvm_pmu_ops {
void (*save_pmu_context)(struct kvm_vcpu *vcpu);
void (*restore_pmu_context)(struct kvm_vcpu *vcpu);
bool (*incr_counter)(struct kvm_pmc *pmc);
+ void (*set_overflow)(struct kvm_vcpu *vcpu);
const u64 EVENTSEL_EVENT;
const int MAX_NR_GP_COUNTERS;
@@ -881,6 +881,10 @@ static void intel_restore_guest_pmu_context(struct kvm_vcpu *vcpu)
wrmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, pmu->fixed_ctr_ctrl_hw);
}
+static void intel_set_overflow(struct kvm_vcpu *vcpu)
+{
+}
+
struct kvm_pmu_ops intel_pmu_ops __initdata = {
.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
.msr_idx_to_pmc = intel_msr_idx_to_pmc,
@@ -897,6 +901,7 @@ struct kvm_pmu_ops intel_pmu_ops __initdata = {
.save_pmu_context = intel_save_guest_pmu_context,
.restore_pmu_context = intel_restore_guest_pmu_context,
.incr_counter = intel_incr_counter,
+ .set_overflow = intel_set_overflow,
.EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT,
.MAX_NR_GP_COUNTERS = KVM_INTEL_PMC_MAX_GENERIC,
.MIN_NR_GP_COUNTERS = 1,