@@ -402,6 +402,46 @@ static int validate_id_aa64isar1_el1(struct kvm_vcpu *vcpu, u64 val)
return 0;
}
+static bool id_reg_has_pmu(u64 val, u64 shift, u64 min)
+{
+ u64 pmu = ((val >> shift) & 0xf);
+
+ /*
+ * Treat IMPLEMENTATION DEFINED functionality as unimplemented for
+ * ID_AA64DFR0_EL1.PMUVer/ID_DFR0_EL1.PerfMon.
+ */
+ if (pmu == 0xf)
+ pmu = 0;
+
+ return (pmu >= min);
+}
+
+static int validate_id_aa64dfr0_el1(struct kvm_vcpu *vcpu, u64 val)
+{
+ unsigned int brps, ctx_cmps;
+ bool vcpu_pmu = kvm_vcpu_has_pmu(vcpu);
+ bool dfr0_pmu = id_reg_has_pmu(val, ID_AA64DFR0_PMUVER_SHIFT,
+ ID_AA64DFR0_PMUVER_8_0);
+
+ brps = cpuid_feature_extract_unsigned_field(val,
+ ID_AA64DFR0_BRPS_SHIFT);
+ ctx_cmps = cpuid_feature_extract_unsigned_field(val,
+ ID_AA64DFR0_CTX_CMPS_SHIFT);
+
+ /*
+ * Number of context-aware breakpoints can be no more than number of
+ * supported breakpoints.
+ */
+ if (ctx_cmps > brps)
+ return -EINVAL;
+
+ /* Check if there is a conflict with a request via KVM_ARM_VCPU_INIT */
+ if (vcpu_pmu ^ dfr0_pmu)
+ return -EPERM;
+
+ return 0;
+}
+
static void init_id_aa64pfr0_el1_info(struct id_reg_info *id_reg)
{
u64 limit;
@@ -445,6 +485,47 @@ static void init_id_aa64isar1_el1_info(struct id_reg_info *id_reg)
(id_reg->sys_val & ~PTRAUTH_MASK);
}
+/*
+ * ID_AA64DFR0_EL1.PMUVer/ID_DFR0_EL1.PerfMon == 0xf indicates
+ * IMPLEMENTATION DEFINED form of performance monitors supported,
+ * PMUv3 not supported (NOTE: 0x0 indicates PMU is not supported).
+ * This function is to cap a value of those two fields with the
+ * given 'cap_val' treating 0xf in the fields as 0.
+ */
+static u64 id_reg_cap_pmu(u64 val, u64 shift, u64 cap_val)
+{
+ u64 mask = 0xf;
+ u64 pmu;
+
+ pmu = (val >> shift) & mask;
+ pmu = (pmu == 0xf) ? 0 : pmu;
+ pmu = min(pmu, cap_val);
+
+ val &= ~(0xfULL << shift);
+ val |= (pmu & 0xf) << shift;
+ return val;
+}
+
+static void init_id_aa64dfr0_el1_info(struct id_reg_info *id_reg)
+{
+ u64 limit;
+
+ id_reg->sys_val = read_sanitised_ftr_reg(id_reg->sys_reg);
+ limit = id_reg->sys_val;
+
+ /* Limit guests to PMUv3 for ARMv8.4 */
+ limit = id_reg_cap_pmu(limit, ID_AA64DFR0_PMUVER_SHIFT,
+ ID_AA64DFR0_PMUVER_8_4);
+ /* Limit debug to ARMv8.0 */
+ limit &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER);
+ limit |= (FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), 6));
+
+ /* Hide SPE from guests */
+ limit &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER);
+
+ id_reg->vcpu_limit_val = limit;
+}
+
static u64 get_reset_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
struct id_reg_info *idr)
{
@@ -468,6 +549,14 @@ static u64 get_reset_id_aa64isar1_el1(struct kvm_vcpu *vcpu,
idr->vcpu_limit_val : (idr->vcpu_limit_val & ~PTRAUTH_MASK);
}
+static u64 get_reset_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
+ struct id_reg_info *idr)
+{
+ return kvm_vcpu_has_pmu(vcpu) ?
+ idr->vcpu_limit_val :
+ (idr->vcpu_limit_val & ~(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER)));
+}
+
static struct id_reg_info id_aa64pfr0_el1_info = {
.sys_reg = SYS_ID_AA64PFR0_EL1,
.init = init_id_aa64pfr0_el1_info,
@@ -497,6 +586,13 @@ static struct id_reg_info id_aa64isar1_el1_info = {
.get_reset_val = get_reset_id_aa64isar1_el1,
};
+static struct id_reg_info id_aa64dfr0_el1_info = {
+ .sys_reg = SYS_ID_AA64DFR0_EL1,
+ .init = init_id_aa64dfr0_el1_info,
+ .validate = validate_id_aa64dfr0_el1,
+ .get_reset_val = get_reset_id_aa64dfr0_el1,
+};
+
/*
* An ID register that needs special handling to control the value for the
* guest must have its own id_reg_info in id_reg_info_table.
@@ -508,6 +604,7 @@ static struct id_reg_info id_aa64isar1_el1_info = {
static struct id_reg_info *id_reg_info_table[KVM_ARM_ID_REG_MAX_NUM] = {
[IDREG_IDX(SYS_ID_AA64PFR0_EL1)] = &id_aa64pfr0_el1_info,
[IDREG_IDX(SYS_ID_AA64PFR1_EL1)] = &id_aa64pfr1_el1_info,
+ [IDREG_IDX(SYS_ID_AA64DFR0_EL1)] = &id_aa64dfr0_el1_info,
[IDREG_IDX(SYS_ID_AA64ISAR0_EL1)] = &id_aa64isar0_el1_info,
[IDREG_IDX(SYS_ID_AA64ISAR1_EL1)] = &id_aa64isar1_el1_info,
};
@@ -1346,17 +1443,6 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
u64 val = raz ? 0 : __vcpu_sys_reg(vcpu, IDREG_SYS_IDX(id));
switch (id) {
- case SYS_ID_AA64DFR0_EL1:
- /* Limit debug to ARMv8.0 */
- val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER);
- val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), 6);
- /* Limit guests to PMUv3 for ARMv8.4 */
- val = cpuid_feature_cap_perfmon_field(val,
- ID_AA64DFR0_PMUVER_SHIFT,
- kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0);
- /* Hide SPE from guests */
- val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER);
- break;
case SYS_ID_DFR0_EL1:
/* Limit guests to PMUv3 for ARMv8.4 */
val = cpuid_feature_cap_perfmon_field(val,
This patch adds id_reg_info for ID_AA64DFR0_EL1 to make it writable by userspace. Return an error if userspace tries to set PMUVER field of the register to a value that conflicts with the PMU configuration. Since number of context-aware breakpoints must be no more than number of supported breakpoints according to Arm ARM, return an error if userspace tries to set CTX_CMPS field to such value. Signed-off-by: Reiji Watanabe <reijiw@google.com> --- arch/arm64/kvm/sys_regs.c | 108 ++++++++++++++++++++++++++++++++++---- 1 file changed, 97 insertions(+), 11 deletions(-)