@@ -289,6 +289,16 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu,
(cpuid_feature_extract_unsigned_field(val, ID_AA64ISAR1_GPI_SHIFT) >= \
ID_AA64ISAR1_GPI_IMP_DEF)
+#define ISAR2_PTRAUTH_MASK (ARM64_FEATURE_MASK(ID_AA64ISAR2_APA3) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR2_GPA3))
+
+#define aa64isar2_has_apa3(val) \
+ (cpuid_feature_extract_unsigned_field(val, ID_AA64ISAR2_APA3_SHIFT) >= \
+ ID_AA64ISAR2_APA3_ARCHITECTED)
+#define aa64isar2_has_gpa3(val) \
+ (cpuid_feature_extract_unsigned_field(val, ID_AA64ISAR2_GPA3_SHIFT) >= \
+ ID_AA64ISAR2_GPA3_ARCHITECTED)
+
#define __FTR_BITS(ftr_sign, ftr_type, bit_pos, safe) { \
.sign = ftr_sign, \
.type = ftr_type, \
@@ -507,6 +517,31 @@ static int validate_id_aa64isar1_el1(struct kvm_vcpu *vcpu,
return 0;
}
+static int validate_id_aa64isar2_el1(struct kvm_vcpu *vcpu,
+ const struct id_reg_desc *id_reg, u64 val)
+{
+ bool has_gpa3, has_apa3, lim_has_gpa3, lim_has_apa3;
+ u64 lim = id_reg->vcpu_limit_val;
+
+ has_gpa3 = aa64isar2_has_gpa3(val);
+ has_apa3 = aa64isar2_has_apa3(val);
+ lim_has_gpa3 = aa64isar2_has_gpa3(lim);
+ lim_has_apa3 = aa64isar2_has_apa3(lim);
+
+ /*
+ * Check if there is a conflict in the requested value for
+ * ID_AA64ISAR2_EL1 with PTRAUTH configuration.
+ * See comments in validate_id_aa64isar1_el1() for more detail.
+ */
+ if (lim_has_gpa3 && (vcpu_has_ptrauth(vcpu) ^ has_gpa3))
+ return -EPERM;
+
+ if (lim_has_apa3 && (vcpu_has_ptrauth(vcpu) ^ has_apa3))
+ return -EPERM;
+
+ return 0;
+}
+
static void init_id_aa64pfr0_el1_desc(struct id_reg_desc *id_reg)
{
u64 limit = id_reg->vcpu_limit_val;
@@ -550,6 +585,13 @@ static void init_id_aa64isar1_el1_desc(struct id_reg_desc *id_reg)
id_reg->vcpu_limit_val &= ~ISAR1_TRAUTH_MASK;
}
+static void init_id_aa64isar2_el1_desc(struct id_reg_desc *id_reg)
+{
+ if (!system_has_full_ptr_auth())
+ id_reg->vcpu_limit_val &= ~ISAR2_PTRAUTH_MASK;
+}
+
+
static u64 vcpu_mask_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu,
const struct id_reg_desc *idr)
{
@@ -568,6 +610,13 @@ static u64 vcpu_mask_id_aa64isar1_el1(const struct kvm_vcpu *vcpu,
return vcpu_has_ptrauth(vcpu) ? 0 : ISAR1_TRAUTH_MASK;
}
+static u64 vcpu_mask_id_aa64isar2_el1(const struct kvm_vcpu *vcpu,
+ const struct id_reg_desc *idr)
+{
+ return vcpu_has_ptrauth(vcpu) ? 0 : ISAR2_PTRAUTH_MASK;
+}
+
+
static int validate_id_reg(struct kvm_vcpu *vcpu,
const struct id_reg_desc *id_reg, u64 val)
{
@@ -1544,11 +1593,6 @@ static u64 read_id_reg_with_encoding(const struct kvm_vcpu *vcpu, u32 id)
val = read_kvm_id_reg(vcpu->kvm, id);
switch (id) {
- case SYS_ID_AA64ISAR2_EL1:
- if (!vcpu_has_ptrauth(vcpu))
- val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_APA3) |
- ARM64_FEATURE_MASK(ID_AA64ISAR2_GPA3));
- break;
case SYS_ID_AA64DFR0_EL1:
/* Limit debug to ARMv8.0 */
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER);
@@ -3359,6 +3403,16 @@ static struct id_reg_desc id_aa64isar1_el1_desc = {
},
};
+static struct id_reg_desc id_aa64isar2_el1_desc = {
+ .reg_desc = ID_SANITISED(ID_AA64ISAR2_EL1),
+ .init = init_id_aa64isar2_el1_desc,
+ .validate = validate_id_aa64isar2_el1,
+ .vcpu_mask = vcpu_mask_id_aa64isar2_el1,
+ .ftr_bits = {
+ U_FTR_BITS(FTR_EXACT, ID_AA64ISAR2_APA3_SHIFT, 0),
+ },
+};
+
#define ID_DESC(id_reg_name, id_reg_desc) \
[IDREG_IDX(SYS_##id_reg_name)] = (id_reg_desc)
@@ -3371,6 +3425,7 @@ static struct id_reg_desc *id_reg_desc_table[KVM_ARM_ID_REG_MAX_NUM] = {
/* CRm=6 */
ID_DESC(ID_AA64ISAR0_EL1, &id_aa64isar0_el1_desc),
ID_DESC(ID_AA64ISAR1_EL1, &id_aa64isar1_el1_desc),
+ ID_DESC(ID_AA64ISAR2_EL1, &id_aa64isar2_el1_desc),
};
static inline struct id_reg_desc *get_id_reg_desc(u32 id)
This patch adds id_reg_desc for ID_AA64ISAR2_EL1 to make it writable by userspace. Return an error if userspace tries to set PTRAUTH related fields of the register to values that conflict with PTRAUTH configuration, which was configured by KVM_ARM_VCPU_INIT, for the guest. Signed-off-by: Reiji Watanabe <reijiw@google.com> --- arch/arm64/kvm/sys_regs.c | 65 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 60 insertions(+), 5 deletions(-)