@@ -342,9 +342,10 @@ struct kvm_vcpu_events {
/* Device Control API on vcpu fd */
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
-#define KVM_ARM_VCPU_PMU_V3_IRQ 0
-#define KVM_ARM_VCPU_PMU_V3_INIT 1
-#define KVM_ARM_VCPU_PMU_V3_FILTER 2
+#define KVM_ARM_VCPU_PMU_V3_IRQ 0
+#define KVM_ARM_VCPU_PMU_V3_INIT 1
+#define KVM_ARM_VCPU_PMU_V3_FILTER 2
+#define KVM_ARM_VCPU_PMU_V3_NUM_EVENTS 3
#define KVM_ARM_VCPU_TIMER_CTRL 1
#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
@@ -238,6 +238,8 @@ void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
pmu->pmc[i].idx = i;
+
+ pmu->num_events = perf_num_counters() - 1;
}
/**
@@ -875,6 +877,25 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
return 0;
}
+ case KVM_ARM_VCPU_PMU_V3_NUM_EVENTS: {
+ u64 mask = ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT;
+ int __user *uaddr = (int __user *)(long)attr->addr;
+ unsigned int num_events;
+
+ if (get_user(num_events, uaddr))
+ return -EFAULT;
+
+ if (num_events >= perf_num_counters())
+ return -EINVAL;
+
+ vcpu->arch.pmu.num_events = num_events;
+
+ num_events <<= ARMV8_PMU_PMCR_N_SHIFT;
+ __vcpu_sys_reg(vcpu, SYS_PMCR_EL0) &= ~mask;
+ __vcpu_sys_reg(vcpu, SYS_PMCR_EL0) |= num_events;
+
+ return 0;
+ }
case KVM_ARM_VCPU_PMU_V3_INIT:
return kvm_arm_pmu_v3_init(vcpu);
}
@@ -912,6 +933,7 @@ int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
case KVM_ARM_VCPU_PMU_V3_IRQ:
case KVM_ARM_VCPU_PMU_V3_INIT:
case KVM_ARM_VCPU_PMU_V3_FILTER:
+ case KVM_ARM_VCPU_PMU_V3_NUM_EVENTS:
if (kvm_arm_support_pmu_v3() &&
test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
return 0;
@@ -672,6 +672,11 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
| (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
if (!system_supports_32bit_el0())
val |= ARMV8_PMU_PMCR_LC;
+
+ /* Override number of event selectors */
+ val &= ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
+ val |= (u32)vcpu->arch.pmu.num_events << ARMV8_PMU_PMCR_N_SHIFT;
+
__vcpu_sys_reg(vcpu, r->reg) = val;
}
@@ -27,6 +27,7 @@ struct kvm_pmu {
bool ready;
bool created;
bool irq_level;
+ u8 num_events;
};
#define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready)
We currently pass through the number of PMU counters that we have available in hardware to guests. So if my host supports 10 concurrently active PMU counters, my guest will be able to spawn 10 counters as well. This is undesireable if we also want to use the PMU on the host for monitoring. In that case, we want to split the PMU between guest and host. To help that case, let's add a PMU attr that allows us to limit the number of PMU counters that we expose. With this patch in place, user space can keep some counters free for host use. Signed-off-by: Alexander Graf <graf@amazon.com> --- Because this patch touches the same code paths as the vPMU filtering one and the vPMU filtering generalized a few conditions in the attr path, I've based it on top. Please let me know if you want it independent instead. --- arch/arm64/include/uapi/asm/kvm.h | 7 ++++--- arch/arm64/kvm/pmu-emul.c | 22 ++++++++++++++++++++++ arch/arm64/kvm/sys_regs.c | 5 +++++ include/kvm/arm_pmu.h | 1 + 4 files changed, 32 insertions(+), 3 deletions(-)