@@ -7,6 +7,7 @@
#include <linux/cpu.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
+#include <linux/list.h>
#include <linux/perf_event.h>
#include <linux/perf/arm_pmu.h>
#include <linux/uaccess.h>
@@ -16,6 +17,9 @@
DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
+static LIST_HEAD(arm_pmus);
+static DEFINE_MUTEX(arm_pmus_lock);
+
static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
@@ -750,9 +754,26 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
void kvm_host_pmu_init(struct arm_pmu *pmu)
{
- if (pmu->pmuver != 0 && pmu->pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
- !kvm_arm_support_pmu_v3() && !is_protected_kvm_enabled())
+ struct arm_pmu_entry *entry;
+
+ if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF ||
+ is_protected_kvm_enabled())
+ return;
+
+ mutex_lock(&arm_pmus_lock);
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ goto out_unlock;
+
+ entry->arm_pmu = pmu;
+ list_add_tail(&entry->entry, &arm_pmus);
+
+ if (list_is_singular(&arm_pmus))
static_branch_enable(&kvm_arm_pmu_available);
+
+out_unlock:
+ mutex_unlock(&arm_pmus_lock);
}
static struct arm_pmu *kvm_pmu_probe_armpmu(void)
@@ -29,6 +29,11 @@ struct kvm_pmu {
struct irq_work overflow_work;
};
+struct arm_pmu_entry {
+ struct list_head entry;
+ struct arm_pmu *arm_pmu;
+};
+
DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
static __always_inline bool kvm_arm_support_pmu_v3(void)