@@ -425,6 +425,29 @@ static int validate_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
return 0;
}
+static int validate_id_aa64isar0_el1(struct kvm_vcpu *vcpu,
+ const struct id_reg_desc *id_reg, u64 val)
+{
+ unsigned int sm3, sm4, sha1, sha2, sha3;
+
+ /* Run consistency checkings according to Arm ARM */
+ sm3 = cpuid_feature_extract_unsigned_field(val, ID_AA64ISAR0_SM3_SHIFT);
+ sm4 = cpuid_feature_extract_unsigned_field(val, ID_AA64ISAR0_SM4_SHIFT);
+ if (sm3 != sm4)
+ return -EINVAL;
+
+ sha1 = cpuid_feature_extract_unsigned_field(val, ID_AA64ISAR0_SHA1_SHIFT);
+ sha2 = cpuid_feature_extract_unsigned_field(val, ID_AA64ISAR0_SHA2_SHIFT);
+ if ((sha1 == 0) ^ (sha2 == 0))
+ return -EINVAL;
+
+ sha3 = cpuid_feature_extract_unsigned_field(val, ID_AA64ISAR0_SHA3_SHIFT);
+ if (((sha2 == 2) ^ (sha3 == 1)) || (!sha1 && sha3))
+ return -EINVAL;
+
+ return 0;
+}
+
static void init_id_aa64pfr0_el1_desc(struct id_reg_desc *id_reg)
{
u64 limit = id_reg->vcpu_limit_val;
@@ -3256,6 +3279,11 @@ static struct id_reg_desc id_aa64pfr1_el1_desc = {
.vcpu_mask = vcpu_mask_id_aa64pfr1_el1,
};
+static struct id_reg_desc id_aa64isar0_el1_desc = {
+ .reg_desc = ID_SANITISED(ID_AA64ISAR0_EL1),
+ .validate = validate_id_aa64isar0_el1,
+};
+
#define ID_DESC(id_reg_name, id_reg_desc) \
[IDREG_IDX(SYS_##id_reg_name)] = (id_reg_desc)
@@ -3264,6 +3292,9 @@ static struct id_reg_desc *id_reg_desc_table[KVM_ARM_ID_REG_MAX_NUM] = {
/* CRm=4 */
ID_DESC(ID_AA64PFR0_EL1, &id_aa64pfr0_el1_desc),
ID_DESC(ID_AA64PFR1_EL1, &id_aa64pfr1_el1_desc),
+
+ /* CRm=6 */
+ ID_DESC(ID_AA64ISAR0_EL1, &id_aa64isar0_el1_desc),
};
static inline struct id_reg_desc *get_id_reg_desc(u32 id)
This patch adds id_reg_desc for ID_AA64ISAR0_EL1 to make it writable by userspace. Updating sm3, sm4, sha1, sha2 and sha3 fields are allowed only if values of those fields follow Arm ARM. Signed-off-by: Reiji Watanabe <reijiw@google.com> --- arch/arm64/kvm/sys_regs.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+)