@@ -1355,16 +1355,10 @@ static u64 kvm_arm_update_id_reg(const struct kvm_vcpu *vcpu, u32 encoding, u64
case SYS_ID_AA64PFR0_EL1:
if (!vcpu_has_sve(vcpu))
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
- if (kvm_vgic_global_state.type == VGIC_V3) {
- val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
- val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
- }
break;
case SYS_ID_AA64PFR1_EL1:
if (!kvm_has_mte(vcpu->kvm))
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
-
- val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
break;
case SYS_ID_AA64ISAR1_EL1:
if (!vcpu_has_ptrauth(vcpu))
@@ -1377,8 +1371,6 @@ static u64 kvm_arm_update_id_reg(const struct kvm_vcpu *vcpu, u32 encoding, u64
if (!vcpu_has_ptrauth(vcpu))
val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
- if (!cpus_have_final_cap(ARM64_HAS_WFXT))
- val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
break;
case SYS_ID_AA64DFR0_EL1:
/* Set PMUver to the required version */
@@ -1391,12 +1383,6 @@ static u64 kvm_arm_update_id_reg(const struct kvm_vcpu *vcpu, u32 encoding, u64
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon),
pmuver_to_perfmon(vcpu_pmuver(vcpu)));
break;
- case SYS_ID_AA64MMFR2_EL1:
- val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
- break;
- case SYS_ID_MMFR4_EL1:
- val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
- break;
}
return val;
@@ -1490,6 +1476,20 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
return REG_HIDDEN;
}
+static u64 read_sanitised_id_mmfr4_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ u64 val;
+ u32 id = reg_to_encoding(rd);
+
+ val = read_sanitised_ftr_reg(id);
+
+ /* CCIDX is not supported */
+ val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
+
+ return val;
+}
+
static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
@@ -1516,6 +1516,25 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
+ if (kvm_vgic_global_state.type == VGIC_V3) {
+ val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
+ val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
+ }
+
+ return val;
+}
+
+static u64 read_sanitised_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ u64 val;
+ u32 id = reg_to_encoding(rd);
+
+ val = read_sanitised_ftr_reg(id);
+
+ /* SME is not supported */
+ val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
+
return val;
}
@@ -1638,6 +1657,34 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
return pmuver_update(vcpu, rd, val, perfmon_to_pmuver(perfmon), valid_pmu);
}
+static u64 read_sanitised_id_aa64isar2_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ u64 val;
+ u32 id = reg_to_encoding(rd);
+
+ val = read_sanitised_ftr_reg(id);
+
+ if (!cpus_have_final_cap(ARM64_HAS_WFXT))
+ val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
+
+ return val;
+}
+
+static u64 read_sanitised_id_aa64mmfr2_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ u64 val;
+ u32 id = reg_to_encoding(rd);
+
+ val = read_sanitised_ftr_reg(id);
+
+ /* CCIDX is not supported */
+ val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
+
+ return val;
+}
+
/*
* cpufeature ID register user accessors
*
@@ -2033,7 +2080,13 @@ static const struct sys_reg_desc sys_reg_descs[] = {
AA32_ID_SANITISED(ID_ISAR3_EL1),
AA32_ID_SANITISED(ID_ISAR4_EL1),
AA32_ID_SANITISED(ID_ISAR5_EL1),
- AA32_ID_SANITISED(ID_MMFR4_EL1),
+ { SYS_DESC(SYS_ID_MMFR4_EL1),
+ .access = access_id_reg,
+ .get_user = get_id_reg,
+ .set_user = set_id_reg,
+ .visibility = aa32_id_visibility,
+ .reset = read_sanitised_id_mmfr4_el1,
+ .val = 0, },
AA32_ID_SANITISED(ID_ISAR6_EL1),
/* CRm=3 */
@@ -2054,7 +2107,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
.set_user = set_id_reg,
.reset = read_sanitised_id_aa64pfr0_el1,
.val = ID_AA64PFR0_EL1_CSV2_MASK | ID_AA64PFR0_EL1_CSV3_MASK, },
- ID_SANITISED(ID_AA64PFR1_EL1),
+ { SYS_DESC(SYS_ID_AA64PFR1_EL1),
+ .access = access_id_reg,
+ .get_user = get_id_reg,
+ .set_user = set_id_reg,
+ .reset = read_sanitised_id_aa64pfr1_el1,
+ .val = 0, },
ID_UNALLOCATED(4,2),
ID_UNALLOCATED(4,3),
ID_SANITISED(ID_AA64ZFR0_EL1),
@@ -2080,7 +2138,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* CRm=6 */
ID_SANITISED(ID_AA64ISAR0_EL1),
ID_SANITISED(ID_AA64ISAR1_EL1),
- ID_SANITISED(ID_AA64ISAR2_EL1),
+ { SYS_DESC(SYS_ID_AA64ISAR2_EL1),
+ .access = access_id_reg,
+ .get_user = get_id_reg,
+ .set_user = set_id_reg,
+ .reset = read_sanitised_id_aa64isar2_el1,
+ .val = 0, },
ID_UNALLOCATED(6,3),
ID_UNALLOCATED(6,4),
ID_UNALLOCATED(6,5),
@@ -2090,7 +2153,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* CRm=7 */
ID_SANITISED(ID_AA64MMFR0_EL1),
ID_SANITISED(ID_AA64MMFR1_EL1),
- ID_SANITISED(ID_AA64MMFR2_EL1),
+ { SYS_DESC(SYS_ID_AA64MMFR2_EL1),
+ .access = access_id_reg,
+ .get_user = get_id_reg,
+ .set_user = set_id_reg,
+ .reset = read_sanitised_id_aa64mmfr2_el1,
+ .val = 0, },
ID_UNALLOCATED(7,3),
ID_UNALLOCATED(7,4),
ID_UNALLOCATED(7,5),
There are features which are masked in kvm_arm_update_id_reg() which cannot change throughout the lifecycle of a VM. Thus rather than masking them each time the register is read, mask them at idreg init time so that the value in the kvm id_reg correctly reflects the state of support for that feature. Move masking of AA64PFR0_EL1.GIC and AA64PFR0_EL1.AMU into read_sanitised_id_aa64pfr0_el1(). Create read_sanitised_id_aa64pfr1_el1() and mask AA64PFR1_EL1.SME. Create read_sanitised_id_[mmfr4|aa64mmfr2] and mask CCIDX. Signed-off-by: Suraj Jitindar Singh <surajjs@amazon.com> --- arch/arm64/kvm/sys_regs.c | 104 +++++++++++++++++++++++++++++++------- 1 file changed, 86 insertions(+), 18 deletions(-)