@@ -834,6 +834,7 @@
#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0
#define ID_AA64PFR0_ELx_64BIT_ONLY 0x1
#define ID_AA64PFR0_ELx_32BIT_64BIT 0x2
+#define ID_AA64PFR0_GIC3 0x1
/* id_aa64pfr1 */
#define ID_AA64PFR1_MPAMFRAC_SHIFT 16
@@ -271,6 +271,19 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu,
return read_zero(vcpu, p);
}
+#define __FTR_BITS(ftr_sign, ftr_type, bit_pos, safe) { \
+ .sign = ftr_sign, \
+ .type = ftr_type, \
+ .shift = bit_pos, \
+ .width = ARM64_FEATURE_FIELD_BITS, \
+ .safe_val = safe, \
+}
+
+#define S_FTR_BITS(ftr_type, bit_pos, safe_val) \
+ __FTR_BITS(FTR_SIGNED, ftr_type, bit_pos, safe_val)
+#define U_FTR_BITS(ftr_type, bit_pos, safe_val) \
+ __FTR_BITS(FTR_UNSIGNED, ftr_type, bit_pos, safe_val)
+
/*
* Number of entries in id_reg_desc's ftr_bits[] (Number of 4 bits fields
* in 64 bit register + 1 entry for a terminator entry).
@@ -354,6 +367,86 @@ static void id_reg_desc_init(struct id_reg_desc *id_reg)
id_reg->vcpu_limit_val, val));
}
+static int validate_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
+ const struct id_reg_desc *id_reg, u64 val)
+{
+ int fp, simd;
+ unsigned int gic;
+ bool vcpu_has_sve = vcpu_has_sve(vcpu);
+ bool pfr0_has_sve = id_aa64pfr0_sve(val);
+
+ simd = cpuid_feature_extract_signed_field(val, ID_AA64PFR0_ASIMD_SHIFT);
+ fp = cpuid_feature_extract_signed_field(val, ID_AA64PFR0_FP_SHIFT);
+ /* AdvSIMD field must have the same value as FP field */
+ if (simd != fp)
+ return -EINVAL;
+
+ /* fp must be supported when sve is supported */
+ if (pfr0_has_sve && (fp < 0))
+ return -EINVAL;
+
+ /* Check if there is a conflict with a request via KVM_ARM_VCPU_INIT */
+ if (vcpu_has_sve ^ pfr0_has_sve)
+ return -EPERM;
+
+ if ((irqchip_in_kernel(vcpu->kvm) &&
+ vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)) {
+ gic = cpuid_feature_extract_unsigned_field(val,
+ ID_AA64PFR0_GIC_SHIFT);
+ if (gic == 0)
+ return -EPERM;
+
+ if (gic > ID_AA64PFR0_GIC3)
+ return -E2BIG;
+ } else {
+ u64 mask = ARM64_FEATURE_MASK(ID_AA64PFR0_GIC);
+ int r = arm64_check_features(id_reg->ftr_bits, val & mask,
+ id_reg->vcpu_limit_val & mask);
+
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static void init_id_aa64pfr0_el1_desc(struct id_reg_desc *id_reg)
+{
+ u64 limit = id_reg->vcpu_limit_val;
+ unsigned int gic;
+
+ limit &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_AMU);
+ if (!system_supports_sve())
+ limit &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE);
+
+ /*
+ * The default is to expose CSV2 == 1 and CSV3 == 1 if the HW
+ * isn't affected. Userspace can override this as long as it
+ * doesn't promise the impossible.
+ */
+ limit &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2) |
+ ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3));
+
+ if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
+ limit |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), 1);
+ if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED)
+ limit |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), 1);
+
+ gic = cpuid_feature_extract_unsigned_field(limit, ID_AA64PFR0_GIC_SHIFT);
+ if (gic > 1) {
+ /* Limit to GICv3.0/4.0 */
+ limit &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC);
+ limit |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), ID_AA64PFR0_GIC3);
+ }
+ id_reg->vcpu_limit_val = limit;
+}
+
+static u64 vcpu_mask_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu,
+ const struct id_reg_desc *idr)
+{
+ return vcpu_has_sve(vcpu) ? 0 : ARM64_FEATURE_MASK(ID_AA64PFR0_SVE);
+}
+
static int validate_id_reg(struct kvm_vcpu *vcpu,
const struct id_reg_desc *id_reg, u64 val)
{
@@ -1330,20 +1423,6 @@ static u64 read_id_reg_with_encoding(const struct kvm_vcpu *vcpu, u32 id)
val = read_kvm_id_reg(vcpu->kvm, id);
switch (id) {
- case SYS_ID_AA64PFR0_EL1:
- if (!vcpu_has_sve(vcpu))
- val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE);
- val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_AMU);
- val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2);
- val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
- val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3);
- val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
- if (irqchip_in_kernel(vcpu->kvm) &&
- vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
- val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC);
- val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), 1);
- }
- break;
case SYS_ID_AA64PFR1_EL1:
if (!kvm_has_mte(vcpu->kvm))
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
@@ -1443,48 +1522,6 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
return REG_HIDDEN;
}
-static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *rd,
- const struct kvm_one_reg *reg, void __user *uaddr)
-{
- const u64 id = sys_reg_to_index(rd);
- u8 csv2, csv3;
- int err;
- u64 val;
-
- err = reg_from_user(&val, uaddr, id);
- if (err)
- return err;
-
- /*
- * Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
- * it doesn't promise more than what is actually provided (the
- * guest could otherwise be covered in ectoplasmic residue).
- */
- csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV2_SHIFT);
- if (csv2 > 1 ||
- (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
- return -EINVAL;
-
- /* Same thing for CSV3 */
- csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV3_SHIFT);
- if (csv3 > 1 ||
- (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
- return -EINVAL;
-
- /* We can only differ with CSV[23], and anything else is an error */
- val ^= read_id_reg(vcpu, rd, false);
- val &= ~((0xFUL << ID_AA64PFR0_CSV2_SHIFT) |
- (0xFUL << ID_AA64PFR0_CSV3_SHIFT));
- if (val)
- return -EINVAL;
-
- vcpu->kvm->arch.pfr0_csv2 = csv2;
- vcpu->kvm->arch.pfr0_csv3 = csv3 ;
-
- return 0;
-}
-
/* cpufeature ID register user accessors */
static int __get_id_reg(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd, void __user *uaddr,
@@ -1809,8 +1846,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* AArch64 ID registers */
/* CRm=4 */
- { SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg,
- .get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, },
+ ID_SANITISED(ID_AA64PFR0_EL1),
ID_SANITISED(ID_AA64PFR1_EL1),
ID_UNALLOCATED(4,2),
ID_UNALLOCATED(4,3),
@@ -3175,8 +3211,26 @@ int kvm_set_id_reg_feature(struct kvm *kvm, u32 id, u8 field_shift, u8 fval)
return __modify_kvm_id_reg(kvm, id, val, preserve_mask);
}
+static struct id_reg_desc id_aa64pfr0_el1_desc = {
+ .reg_desc = ID_SANITISED(ID_AA64PFR0_EL1),
+ .ignore_mask = ARM64_FEATURE_MASK(ID_AA64PFR0_GIC),
+ .init = init_id_aa64pfr0_el1_desc,
+ .validate = validate_id_aa64pfr0_el1,
+ .vcpu_mask = vcpu_mask_id_aa64pfr0_el1,
+ .ftr_bits = {
+ S_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, ID_AA64PFR0_FP_NI),
+ S_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, ID_AA64PFR0_ASIMD_NI),
+ }
+};
+
+#define ID_DESC(id_reg_name, id_reg_desc) \
+ [IDREG_IDX(SYS_##id_reg_name)] = (id_reg_desc)
+
/* A table for ID registers's information. */
-static struct id_reg_desc *id_reg_desc_table[KVM_ARM_ID_REG_MAX_NUM] = {};
+static struct id_reg_desc *id_reg_desc_table[KVM_ARM_ID_REG_MAX_NUM] = {
+ /* CRm=4 */
+ ID_DESC(ID_AA64PFR0_EL1, &id_aa64pfr0_el1_desc),
+};
static inline struct id_reg_desc *get_id_reg_desc(u32 id)
{
@@ -117,6 +117,15 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
else
INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
+ if (type == KVM_DEV_TYPE_ARM_VGIC_V3)
+ /*
+ * Set ID_AA64PFR0_EL1.GIC to 1. This shouldn't fail unless
+ * any vCPU in the guest have started.
+ */
+ WARN_ON_ONCE(kvm_set_id_reg_feature(kvm, SYS_ID_AA64PFR0_EL1,
+ ID_AA64PFR0_GIC3,
+ ID_AA64PFR0_GIC_SHIFT));
+
out_unlock:
unlock_all_vcpus(kvm);
return ret;
This patch adds id_reg_desc for ID_AA64PFR0_EL1 to make it writable by userspace. Return an error if userspace tries to set SVE/GIC field of the register to a value that conflicts with SVE/GIC configuration for the guest. SIMD/FP/SVE fields of the requested value are validated according to Arm ARM. Signed-off-by: Reiji Watanabe <reijiw@google.com> --- arch/arm64/include/asm/sysreg.h | 1 + arch/arm64/kvm/sys_regs.c | 172 +++++++++++++++++++++----------- arch/arm64/kvm/vgic/vgic-init.c | 9 ++ 3 files changed, 123 insertions(+), 59 deletions(-)