@@ -356,7 +356,7 @@ static bool is_fixed_event_allowed(struct kvm_x86_pmu_event_filter *filter,
return true;
}
-static bool check_pmu_event_filter(struct kvm_pmc *pmc)
+bool check_pmu_event_filter(struct kvm_pmc *pmc)
{
struct kvm_x86_pmu_event_filter *filter;
struct kvm *kvm = pmc->vcpu->kvm;
@@ -370,6 +370,7 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc)
return is_fixed_event_allowed(filter, pmc->idx);
}
+EXPORT_SYMBOL_GPL(check_pmu_event_filter);
static bool pmc_event_is_allowed(struct kvm_pmc *pmc)
{
@@ -292,6 +292,7 @@ void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id);
void kvm_pmu_passthrough_pmu_msrs(struct kvm_vcpu *vcpu);
void kvm_pmu_save_pmu_context(struct kvm_vcpu *vcpu);
void kvm_pmu_restore_pmu_context(struct kvm_vcpu *vcpu);
+bool check_pmu_event_filter(struct kvm_pmc *pmc);
bool is_vmware_backdoor_pmc(u32 pmc_idx);