@@ -131,9 +131,6 @@ struct kvm_arch {
unsigned long *pmu_filter;
unsigned int pmuver;
- u8 pfr0_csv2;
- u8 pfr0_csv3;
-
/* Memory Tagging Extension enabled for the guest */
bool mte_enabled;
};
@@ -114,22 +114,6 @@ static int kvm_arm_default_max_vcpus(void)
return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
}
-static void set_default_spectre(struct kvm *kvm)
-{
- /*
- * The default is to expose CSV2 == 1 if the HW isn't affected.
- * Although this is a per-CPU feature, we make it global because
- * asymmetric systems are just a nuisance.
- *
- * Userspace can override this as long as it doesn't promise
- * the impossible.
- */
- if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
- kvm->arch.pfr0_csv2 = 1;
- if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED)
- kvm->arch.pfr0_csv3 = 1;
-}
-
/**
* kvm_arch_init_vm - initializes a VM data structure
* @kvm: pointer to the KVM struct
@@ -155,8 +139,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
/* The maximum number of VCPUs is limited by the host's GIC model */
kvm->arch.max_vcpus = kvm_arm_default_max_vcpus();
- set_default_spectre(kvm);
-
return ret;
out_free_stage2_pgd:
kvm_free_stage2_pgd(&kvm->arch.mmu);
@@ -399,6 +399,70 @@ static void id_reg_info_init(struct id_reg_info *id_reg)
id_reg->init(id_reg);
}
+static int validate_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
+ const struct id_reg_info *id_reg, u64 val)
+{
+ int fp, simd;
+ bool vcpu_has_sve = vcpu_has_sve(vcpu);
+ bool pfr0_has_sve = id_aa64pfr0_sve(val);
+
+ simd = cpuid_feature_extract_signed_field(val, ID_AA64PFR0_ASIMD_SHIFT);
+ fp = cpuid_feature_extract_signed_field(val, ID_AA64PFR0_FP_SHIFT);
+ if (simd != fp)
+ return -EINVAL;
+
+ /* fp must be supported when sve is supported */
+ if (pfr0_has_sve && (fp < 0))
+ return -EINVAL;
+
+ /* Check if there is a conflict with a request via KVM_ARM_VCPU_INIT */
+ if (vcpu_has_sve ^ pfr0_has_sve)
+ return -EPERM;
+
+ return 0;
+}
+
+static void init_id_aa64pfr0_el1_info(struct id_reg_info *id_reg)
+{
+ u64 limit = id_reg->vcpu_limit_val;
+
+ limit &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_AMU));
+ if (!system_supports_sve())
+ limit &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE);
+
+ /*
+ * The default is to expose CSV2 == 1 and CSV3 == 1 if the HW
+ * isn't affected. Userspace can override this as long as it
+ * doesn't promise the impossible.
+ */
+ limit &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2) |
+ ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3));
+
+ if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
+ limit |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), 1);
+ if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED)
+ limit |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), 1);
+
+ id_reg->vcpu_limit_val = limit;
+}
+
+static u64 get_reset_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
+ const struct id_reg_info *idr)
+{
+ return vcpu_has_sve(vcpu) ?
+ idr->vcpu_limit_val :
+ (idr->vcpu_limit_val & ~(ARM64_FEATURE_MASK(ID_AA64PFR0_SVE)));
+}
+
+static struct id_reg_info id_aa64pfr0_el1_info = {
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
+ .ftr_check_types = S_FCT(ID_AA64PFR0_ASIMD_SHIFT, FCT_LOWER_SAFE) |
+ S_FCT(ID_AA64PFR0_FP_SHIFT, FCT_LOWER_SAFE),
+ .init = init_id_aa64pfr0_el1_info,
+ .validate = validate_id_aa64pfr0_el1,
+ .get_reset_val = get_reset_id_aa64pfr0_el1,
+};
+
/*
* An ID register that needs special handling to control the value for the
* guest must have its own id_reg_info in id_reg_info_table.
@@ -407,7 +471,9 @@ static void id_reg_info_init(struct id_reg_info *id_reg)
* validation, etc.)
*/
#define GET_ID_REG_INFO(id) (id_reg_info_table[IDREG_IDX(id)])
-static struct id_reg_info *id_reg_info_table[KVM_ARM_ID_REG_MAX_NUM] = {};
+static struct id_reg_info *id_reg_info_table[KVM_ARM_ID_REG_MAX_NUM] = {
+ [IDREG_IDX(SYS_ID_AA64PFR0_EL1)] = &id_aa64pfr0_el1_info,
+};
static int validate_id_reg(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd, u64 val)
@@ -1241,15 +1307,6 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
u64 val = raz ? 0 : __vcpu_sys_reg(vcpu, IDREG_SYS_IDX(id));
switch (id) {
- case SYS_ID_AA64PFR0_EL1:
- if (!vcpu_has_sve(vcpu))
- val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE);
- val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_AMU);
- val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2);
- val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
- val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3);
- val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
- break;
case SYS_ID_AA64PFR1_EL1:
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
if (kvm_has_mte(vcpu->kvm)) {
@@ -1366,48 +1423,6 @@ static void reset_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
__vcpu_sys_reg(vcpu, IDREG_SYS_IDX(id)) = val;
}
-static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *rd,
- const struct kvm_one_reg *reg, void __user *uaddr)
-{
- const u64 id = sys_reg_to_index(rd);
- u8 csv2, csv3;
- int err;
- u64 val;
-
- err = reg_from_user(&val, uaddr, id);
- if (err)
- return err;
-
- /*
- * Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
- * it doesn't promise more than what is actually provided (the
- * guest could otherwise be covered in ectoplasmic residue).
- */
- csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV2_SHIFT);
- if (csv2 > 1 ||
- (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
- return -EINVAL;
-
- /* Same thing for CSV3 */
- csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV3_SHIFT);
- if (csv3 > 1 ||
- (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
- return -EINVAL;
-
- /* We can only differ with CSV[23], and anything else is an error */
- val ^= read_id_reg(vcpu, rd, false);
- val &= ~((0xFUL << ID_AA64PFR0_CSV2_SHIFT) |
- (0xFUL << ID_AA64PFR0_CSV3_SHIFT));
- if (val)
- return -EINVAL;
-
- vcpu->kvm->arch.pfr0_csv2 = csv2;
- vcpu->kvm->arch.pfr0_csv3 = csv3 ;
-
- return 0;
-}
-
/* cpufeature ID register user accessors */
static int __get_id_reg(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd, void __user *uaddr,
@@ -1695,8 +1710,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* AArch64 ID registers */
/* CRm=4 */
- { SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg,
- .get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, },
+ ID_SANITISED(ID_AA64PFR0_EL1),
ID_SANITISED(ID_AA64PFR1_EL1),
ID_UNALLOCATED(4,2),
ID_UNALLOCATED(4,3),
This patch adds id_reg_info for ID_AA64PFR0_EL1 to make it writable by userspace. The CSV2/CSV3 fields of the register were already writable and values that were written for them affected all vCPUs before. Now they only affect the vCPU. Return an error if userspace tries to set SVE field of the register to a value that conflicts with SVE configuration for the guest (via KVM_ARM_VCPU_INIT). SIMD/FP/SVE fields of the requested value are validated according to Arm ARM. Signed-off-by: Reiji Watanabe <reijiw@google.com> --- arch/arm64/include/asm/kvm_host.h | 3 - arch/arm64/kvm/arm.c | 18 ----- arch/arm64/kvm/sys_regs.c | 122 +++++++++++++++++------------- 3 files changed, 68 insertions(+), 75 deletions(-)