@@ -26,6 +26,8 @@ KVM_X86_PMU_OP(reset)
KVM_X86_PMU_OP_OPTIONAL(deliver_pmi)
KVM_X86_PMU_OP_OPTIONAL(cleanup)
KVM_X86_PMU_OP_OPTIONAL(passthrough_pmu_msrs)
+KVM_X86_PMU_OP_OPTIONAL(save_pmu_context)
+KVM_X86_PMU_OP_OPTIONAL(restore_pmu_context)
#undef KVM_X86_PMU_OP
#undef KVM_X86_PMU_OP_OPTIONAL
@@ -898,3 +898,17 @@ void kvm_pmu_passthrough_pmu_msrs(struct kvm_vcpu *vcpu)
{
static_call_cond(kvm_x86_pmu_passthrough_pmu_msrs)(vcpu);
}
+
+void kvm_pmu_save_pmu_context(struct kvm_vcpu *vcpu)
+{
+ lockdep_assert_irqs_disabled();
+
+ static_call_cond(kvm_x86_pmu_save_pmu_context)(vcpu);
+}
+
+void kvm_pmu_restore_pmu_context(struct kvm_vcpu *vcpu)
+{
+ lockdep_assert_irqs_disabled();
+
+ static_call_cond(kvm_x86_pmu_restore_pmu_context)(vcpu);
+}
@@ -34,6 +34,8 @@ struct kvm_pmu_ops {
void (*deliver_pmi)(struct kvm_vcpu *vcpu);
void (*cleanup)(struct kvm_vcpu *vcpu);
void (*passthrough_pmu_msrs)(struct kvm_vcpu *vcpu);
+ void (*save_pmu_context)(struct kvm_vcpu *vcpu);
+ void (*restore_pmu_context)(struct kvm_vcpu *vcpu);
const u64 EVENTSEL_EVENT;
const int MAX_NR_GP_COUNTERS;
@@ -288,6 +290,8 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id);
void kvm_pmu_passthrough_pmu_msrs(struct kvm_vcpu *vcpu);
+void kvm_pmu_save_pmu_context(struct kvm_vcpu *vcpu);
+void kvm_pmu_restore_pmu_context(struct kvm_vcpu *vcpu);
bool is_vmware_backdoor_pmc(u32 pmc_idx);