Message ID | 20231009230858.3444834-6-rananta@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: arm64: PMU: Allow userspace to limit the number of PMCs on vCPU | expand |
Raghavendra, On 10/10/23 01:08, Raghavendra Rao Ananta wrote: > From: Reiji Watanabe <reijiw@google.com> > > Add a helper to read a vCPU's PMCR_EL0, and use it when KVM > reads a vCPU's PMCR_EL0. > > The PMCR_EL0 value is tracked by a sysreg file per each vCPU. file? > The following patches will make (only) PMCR_EL0.N track per guest. > Having the new helper will be useful to combine the PMCR_EL0.N > field (tracked per guest) and the other fields (tracked per vCPU) > to provide the value of PMCR_EL0. > > No functional change intended. > > Signed-off-by: Reiji Watanabe <reijiw@google.com> > Signed-off-by: Raghavendra Rao Ananta <rananta@google.com> Besides Reviewed-by: Eric Auger <eric.auger@redhat.com> Eric > --- > arch/arm64/kvm/arm.c | 3 +-- > arch/arm64/kvm/pmu-emul.c | 21 +++++++++++++++------ > arch/arm64/kvm/sys_regs.c | 6 +++--- > include/kvm/arm_pmu.h | 6 ++++++ > 4 files changed, 25 insertions(+), 11 deletions(-) > > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c > index 708a53b70a7b..0af4d6bbe3d3 100644 > --- a/arch/arm64/kvm/arm.c > +++ b/arch/arm64/kvm/arm.c > @@ -854,8 +854,7 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu) > } > > if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu)) > - kvm_pmu_handle_pmcr(vcpu, > - __vcpu_sys_reg(vcpu, PMCR_EL0)); > + kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu)); > > if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu)) > kvm_vcpu_pmu_restore_guest(vcpu); > diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c > index cc30c246c010..a161d6266a5c 100644 > --- a/arch/arm64/kvm/pmu-emul.c > +++ b/arch/arm64/kvm/pmu-emul.c > @@ -72,7 +72,7 @@ static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc) > > static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc) > { > - u64 val = __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), PMCR_EL0); > + u64 val = kvm_vcpu_read_pmcr(kvm_pmc_to_vcpu(pmc)); > > return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) || > (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC)); > @@ -250,7 +250,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) > > u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) > { > - u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT; > + u64 val = kvm_vcpu_read_pmcr(vcpu) >> ARMV8_PMU_PMCR_N_SHIFT; > > val &= ARMV8_PMU_PMCR_N_MASK; > if (val == 0) > @@ -272,7 +272,7 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) > if (!kvm_vcpu_has_pmu(vcpu)) > return; > > - if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val) > + if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val) > return; > > for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { > @@ -324,7 +324,7 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) > { > u64 reg = 0; > > - if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) { > + if ((kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) { > reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); > reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); > reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1); > @@ -426,7 +426,7 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu, > { > int i; > > - if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) > + if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) > return; > > /* Weed out disabled counters */ > @@ -569,7 +569,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) > static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc) > { > struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); > - return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) && > + return (kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) && > (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx)); > } > > @@ -1084,3 +1084,12 @@ u8 kvm_arm_pmu_get_pmuver_limit(void) > ID_AA64DFR0_EL1_PMUVer_V3P5); > return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp); > } > + > +/** > + * kvm_vcpu_read_pmcr - Read PMCR_EL0 register for the vCPU > + * @vcpu: The vcpu pointer > + */ > +u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) > +{ > + return __vcpu_sys_reg(vcpu, PMCR_EL0); > +} > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c > index 08af7824e9d8..ff0f7095eaca 100644 > --- a/arch/arm64/kvm/sys_regs.c > +++ b/arch/arm64/kvm/sys_regs.c > @@ -803,7 +803,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, > * Only update writeable bits of PMCR (continuing into > * kvm_pmu_handle_pmcr() as well) > */ > - val = __vcpu_sys_reg(vcpu, PMCR_EL0); > + val = kvm_vcpu_read_pmcr(vcpu); > val &= ~ARMV8_PMU_PMCR_MASK; > val |= p->regval & ARMV8_PMU_PMCR_MASK; > if (!kvm_supports_32bit_el0()) > @@ -811,7 +811,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, > kvm_pmu_handle_pmcr(vcpu, val); > } else { > /* PMCR.P & PMCR.C are RAZ */ > - val = __vcpu_sys_reg(vcpu, PMCR_EL0) > + val = kvm_vcpu_read_pmcr(vcpu) > & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C); > p->regval = val; > } > @@ -860,7 +860,7 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx) > { > u64 pmcr, val; > > - pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0); > + pmcr = kvm_vcpu_read_pmcr(vcpu); > val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; > if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) { > kvm_inject_undefined(vcpu); > diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h > index 858ed9ce828a..cd980d78b86b 100644 > --- a/include/kvm/arm_pmu.h > +++ b/include/kvm/arm_pmu.h > @@ -103,6 +103,7 @@ void kvm_vcpu_pmu_resync_el0(void); > u8 kvm_arm_pmu_get_pmuver_limit(void); > int kvm_arm_set_default_pmu(struct kvm *kvm); > > +u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu); > #else > struct kvm_pmu { > }; > @@ -180,6 +181,11 @@ static inline int kvm_arm_set_default_pmu(struct kvm *kvm) > return -ENODEV; > } > > +static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) > +{ > + return 0; > +} > + > #endif > > #endif
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 708a53b70a7b..0af4d6bbe3d3 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -854,8 +854,7 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu) } if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu)) - kvm_pmu_handle_pmcr(vcpu, - __vcpu_sys_reg(vcpu, PMCR_EL0)); + kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu)); if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu)) kvm_vcpu_pmu_restore_guest(vcpu); diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index cc30c246c010..a161d6266a5c 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -72,7 +72,7 @@ static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc) static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc) { - u64 val = __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), PMCR_EL0); + u64 val = kvm_vcpu_read_pmcr(kvm_pmc_to_vcpu(pmc)); return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) || (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC)); @@ -250,7 +250,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) { - u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT; + u64 val = kvm_vcpu_read_pmcr(vcpu) >> ARMV8_PMU_PMCR_N_SHIFT; val &= ARMV8_PMU_PMCR_N_MASK; if (val == 0) @@ -272,7 +272,7 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) if (!kvm_vcpu_has_pmu(vcpu)) return; - if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val) + if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val) return; for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { @@ -324,7 +324,7 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) { u64 reg = 0; - if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) { + if ((kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) { reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1); @@ -426,7 +426,7 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu, { int i; - if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) + if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) return; /* Weed out disabled counters */ @@ -569,7 +569,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc) { struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); - return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) && + return (kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) && (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx)); } @@ -1084,3 +1084,12 @@ u8 kvm_arm_pmu_get_pmuver_limit(void) ID_AA64DFR0_EL1_PMUVer_V3P5); return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp); } + +/** + * kvm_vcpu_read_pmcr - Read PMCR_EL0 register for the vCPU + * @vcpu: The vcpu pointer + */ +u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) +{ + return __vcpu_sys_reg(vcpu, PMCR_EL0); +} diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 08af7824e9d8..ff0f7095eaca 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -803,7 +803,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, * Only update writeable bits of PMCR (continuing into * kvm_pmu_handle_pmcr() as well) */ - val = __vcpu_sys_reg(vcpu, PMCR_EL0); + val = kvm_vcpu_read_pmcr(vcpu); val &= ~ARMV8_PMU_PMCR_MASK; val |= p->regval & ARMV8_PMU_PMCR_MASK; if (!kvm_supports_32bit_el0()) @@ -811,7 +811,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, kvm_pmu_handle_pmcr(vcpu, val); } else { /* PMCR.P & PMCR.C are RAZ */ - val = __vcpu_sys_reg(vcpu, PMCR_EL0) + val = kvm_vcpu_read_pmcr(vcpu) & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C); p->regval = val; } @@ -860,7 +860,7 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx) { u64 pmcr, val; - pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0); + pmcr = kvm_vcpu_read_pmcr(vcpu); val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) { kvm_inject_undefined(vcpu); diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 858ed9ce828a..cd980d78b86b 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -103,6 +103,7 @@ void kvm_vcpu_pmu_resync_el0(void); u8 kvm_arm_pmu_get_pmuver_limit(void); int kvm_arm_set_default_pmu(struct kvm *kvm); +u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu); #else struct kvm_pmu { }; @@ -180,6 +181,11 @@ static inline int kvm_arm_set_default_pmu(struct kvm *kvm) return -ENODEV; } +static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) +{ + return 0; +} + #endif #endif