diff mbox series

[v7,09/38] KVM: arm64: Make ID_AA64ISAR1_EL1 writable

Message ID 20220419065544.3616948-10-reijiw@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Make CPU ID registers writable by userspace | expand

Commit Message

Reiji Watanabe April 19, 2022, 6:55 a.m. UTC
This patch adds id_reg_desc for ID_AA64ISAR1_EL1 to make it
writable by userspace.

Return an error if userspace tries to set PTRAUTH related fields
of the register to values that conflict with PTRAUTH configuration,
which was configured by KVM_ARM_VCPU_INIT, for the guest.

Signed-off-by: Reiji Watanabe <reijiw@google.com>
---
 arch/arm64/kvm/sys_regs.c | 90 ++++++++++++++++++++++++++++++++++++---
 1 file changed, 83 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index c01038cbdb31..dd4dcc1e4982 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -271,6 +271,24 @@  static bool trap_raz_wi(struct kvm_vcpu *vcpu,
 		return read_zero(vcpu, p);
 }
 
+#define ISAR1_TRAUTH_MASK	(ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) |	\
+				 ARM64_FEATURE_MASK(ID_AA64ISAR1_API) | \
+				 ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) |	\
+				 ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI))
+
+#define aa64isar1_has_apa(val)	\
+	(cpuid_feature_extract_unsigned_field(val, ID_AA64ISAR1_APA_SHIFT) >= \
+	 ID_AA64ISAR1_APA_ARCHITECTED)
+#define aa64isar1_has_api(val)	\
+	(cpuid_feature_extract_unsigned_field(val, ID_AA64ISAR1_API_SHIFT) >= \
+	 ID_AA64ISAR1_API_IMP_DEF)
+#define aa64isar1_has_gpa(val)	\
+	(cpuid_feature_extract_unsigned_field(val, ID_AA64ISAR1_GPA_SHIFT) >= \
+	 ID_AA64ISAR1_GPA_ARCHITECTED)
+#define aa64isar1_has_gpi(val)	\
+	(cpuid_feature_extract_unsigned_field(val, ID_AA64ISAR1_GPI_SHIFT) >= \
+	 ID_AA64ISAR1_GPI_IMP_DEF)
+
 #define __FTR_BITS(ftr_sign, ftr_type, bit_pos, safe) {		\
 	.sign = ftr_sign,					\
 	.type = ftr_type,					\
@@ -448,6 +466,47 @@  static int validate_id_aa64isar0_el1(struct kvm_vcpu *vcpu,
 	return 0;
 }
 
+static int validate_id_aa64isar1_el1(struct kvm_vcpu *vcpu,
+				     const struct id_reg_desc *id_reg, u64 val)
+{
+	bool has_gpi, has_gpa, has_api, has_apa;
+	bool generic, address, lim_generic, lim_address;
+	u64 lim = id_reg->vcpu_limit_val;
+
+	has_gpi = aa64isar1_has_gpi(val);
+	has_gpa = aa64isar1_has_gpa(val);
+	has_api = aa64isar1_has_api(val);
+	has_apa = aa64isar1_has_apa(val);
+	if ((has_gpi && has_gpa) || (has_api && has_apa))
+		return -EINVAL;
+
+	generic = has_gpi || has_gpa;
+	address = has_api || has_apa;
+	lim_generic = aa64isar1_has_gpi(lim) || aa64isar1_has_gpa(lim);
+	lim_address = aa64isar1_has_api(lim) || aa64isar1_has_apa(lim);
+
+	/*
+	 * When PTRAUTH is configured for the vCPU via KVM_ARM_VCPU_INIT,
+	 * it should mean that userspace wants to expose
+	 * one of ID_AA64ISAR1_EL1.GPI, GPA or ID_AA64ISAR2_EL1.GPA3 and
+	 * one of ID_AA64ISAR1_EL1.API, APA or ID_AA64ISAR2_EL1.APA3 to
+	 * the guest (As per Arm ARM, for generic code authentication
+	 * and address authentication, only one of those field can be
+	 * non-zero).
+	 * Check if there is a conflict in the requested value for
+	 * ID_AA64ISAR1_EL1 with PTRAUTH configuration.
+	 * (When lim_generic/lim_address is 0, generic/address must be
+	 *  also 0, which is checked by arm64_check_features())
+	 */
+	if (lim_generic && (vcpu_has_ptrauth(vcpu) ^ generic))
+		return -EPERM;
+
+	if (lim_address && (vcpu_has_ptrauth(vcpu) ^ address))
+		return -EPERM;
+
+	return 0;
+}
+
 static void init_id_aa64pfr0_el1_desc(struct id_reg_desc *id_reg)
 {
 	u64 limit = id_reg->vcpu_limit_val;
@@ -485,6 +544,12 @@  static void init_id_aa64pfr1_el1_desc(struct id_reg_desc *id_reg)
 		id_reg->vcpu_limit_val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
 }
 
+static void init_id_aa64isar1_el1_desc(struct id_reg_desc *id_reg)
+{
+	if (!system_has_full_ptr_auth())
+		id_reg->vcpu_limit_val &= ~ISAR1_TRAUTH_MASK;
+}
+
 static u64 vcpu_mask_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu,
 					 const struct id_reg_desc *idr)
 {
@@ -497,6 +562,12 @@  static u64 vcpu_mask_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu,
 	return kvm_has_mte(vcpu->kvm) ? 0 : (ARM64_FEATURE_MASK(ID_AA64PFR1_MTE));
 }
 
+static u64 vcpu_mask_id_aa64isar1_el1(const struct kvm_vcpu *vcpu,
+					  const struct id_reg_desc *idr)
+{
+	return vcpu_has_ptrauth(vcpu) ? 0 : ISAR1_TRAUTH_MASK;
+}
+
 static int validate_id_reg(struct kvm_vcpu *vcpu,
 			   const struct id_reg_desc *id_reg, u64 val)
 {
@@ -1473,13 +1544,6 @@  static u64 read_id_reg_with_encoding(const struct kvm_vcpu *vcpu, u32 id)
 
 	val = read_kvm_id_reg(vcpu->kvm, id);
 	switch (id) {
-	case SYS_ID_AA64ISAR1_EL1:
-		if (!vcpu_has_ptrauth(vcpu))
-			val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) |
-				 ARM64_FEATURE_MASK(ID_AA64ISAR1_API) |
-				 ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) |
-				 ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI));
-		break;
 	case SYS_ID_AA64ISAR2_EL1:
 		if (!vcpu_has_ptrauth(vcpu))
 			val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_APA3) |
@@ -3284,6 +3348,17 @@  static struct id_reg_desc id_aa64isar0_el1_desc = {
 	.validate = validate_id_aa64isar0_el1,
 };
 
+static struct id_reg_desc id_aa64isar1_el1_desc = {
+	.reg_desc = ID_SANITISED(ID_AA64ISAR1_EL1),
+	.init = init_id_aa64isar1_el1_desc,
+	.validate = validate_id_aa64isar1_el1,
+	.vcpu_mask = vcpu_mask_id_aa64isar1_el1,
+	.ftr_bits = {
+		U_FTR_BITS(FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 0),
+		U_FTR_BITS(FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 0),
+	},
+};
+
 #define ID_DESC(id_reg_name, id_reg_desc)	\
 	[IDREG_IDX(SYS_##id_reg_name)] = (id_reg_desc)
 
@@ -3295,6 +3370,7 @@  static struct id_reg_desc *id_reg_desc_table[KVM_ARM_ID_REG_MAX_NUM] = {
 
 	/* CRm=6 */
 	ID_DESC(ID_AA64ISAR0_EL1, &id_aa64isar0_el1_desc),
+	ID_DESC(ID_AA64ISAR1_EL1, &id_aa64isar1_el1_desc),
 };
 
 static inline struct id_reg_desc *get_id_reg_desc(u32 id)