diff mbox series

[RFC,3/6] KVM: arm64: Introduce a helper to retrieve errata

Message ID 20241011075053.80540-4-shameerali.kolothum.thodi@huawei.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Errata management for VM Live migration | expand

Commit Message

Shameerali Kolothum Thodi Oct. 11, 2024, 7:50 a.m. UTC
And update the errata matches functions to use the target CPU values if
it is set.

Also introduce a "migration_safe_cap" to capabilities structure.
This should be a statically allocated constant for any migration safe errata.
This is because the existing "capability" value is a generated one and may
be renumbered and reordered, hence cannot be used to set the bits in
migration errata bitmap.

Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
---
 arch/arm64/include/asm/cpufeature.h |  8 ++++
 arch/arm64/kernel/cpu_errata.c      | 60 ++++++++++++++++++++++++-----
 arch/arm64/kernel/cpufeature.c      | 14 +++++++
 3 files changed, 72 insertions(+), 10 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index c7b1d3ae469e..eada7b9ac4ff 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -335,6 +335,13 @@  struct arm64_cpu_capabilities {
 	const char *desc;
 	u16 capability;
 	u16 type;
+	/*
+	 * For Erratum only. This should be a static enum value separate from the
+	 * above generated capability value for this erratum. A non-zero value
+	 * here indicates whether this can be safely enabled for migration purposes
+	 * for a specified target CPU.
+	 */
+	u16 migration_safe_cap;
 	bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope,
 			void *target);
 	/*
@@ -625,6 +632,7 @@  void __init setup_system_features(void);
 void __init setup_user_features(void);
 
 void check_local_cpu_capabilities(void);
+void arm_get_migrn_errata_map(void *migrn, unsigned long *errata_map);
 
 u64 read_sanitised_ftr_reg(u32 id);
 u64 __read_sysreg_by_encoding(u32 sys_id);
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 37464f100a21..e0acb473312d 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -8,6 +8,7 @@ 
 #include <linux/arm-smccc.h>
 #include <linux/types.h>
 #include <linux/cpu.h>
+#include <linux/kvm.h>
 #include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/cpufeature.h>
@@ -19,14 +20,26 @@  is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope,
 		       void *target)
 {
 	const struct arm64_midr_revidr *fix;
-	u32 midr = read_cpuid_id(), revidr;
+	struct migrn_target_cpu *t_cpu = target;
+	u32 midr, revidr;
+
+	if (t_cpu) {
+		midr = t_cpu->midr;
+	} else {
+		WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+		midr = read_cpuid_id();
+	}
 
-	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 	if (!is_midr_in_range(midr, &entry->midr_range))
 		return false;
 
 	midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
-	revidr = read_cpuid(REVIDR_EL1);
+
+	if (t_cpu)
+		revidr = t_cpu->revidr;
+	else
+		revidr = read_cpuid(REVIDR_EL1);
+
 	for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
 		if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
 			return false;
@@ -38,18 +51,31 @@  static bool __maybe_unused
 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
 			    int scope, void *target)
 {
-	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
-	return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
+	struct migrn_target_cpu *t_cpu = target;
+	u32 midr;
+
+	if (t_cpu) {
+		midr = t_cpu->midr;
+	} else {
+		WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+		midr = read_cpuid_id();
+	}
+	return is_midr_in_range_list(midr, entry->midr_range_list);
 }
 
 static bool __maybe_unused
 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope, void *target)
 {
+	struct migrn_target_cpu *t_cpu = target;
 	u32 model;
 
-	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+	if (t_cpu) {
+		model = t_cpu->midr;
+	} else {
+		WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+		model = read_cpuid_id();
+	}
 
-	model = read_cpuid_id();
 	model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
 		 MIDR_ARCHITECTURE_MASK;
 
@@ -187,11 +213,25 @@  static bool __maybe_unused
 has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
 				int scope, void *target)
 {
-	u32 midr = read_cpuid_id();
-	bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT);
+	struct migrn_target_cpu *t_cpu = target;
+	u32 midr;
+	bool has_dic;
+
+	if (t_cpu) {
+		midr = t_cpu->midr;
+		/*
+		 * TBD: Should we pass CTR_EL0 as well? or treat this
+		 * as not safe for migration? For now set this as false.
+		 */
+		has_dic = false;
+	} else {
+		WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+		midr = read_cpuid_id();
+		has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT);
+	}
+
 	const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
 
-	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 	return is_midr_in_range(midr, &range) && has_dic;
 }
 
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index ac0cff5ab09d..7b39b0a4aadd 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -3175,6 +3175,20 @@  static void setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
 			cap_set_elf_hwcap(hwcaps);
 }
 
+void arm_get_migrn_errata_map(void *target, unsigned long *errata_map)
+{
+	int i;
+	const struct arm64_cpu_capabilities *caps;
+
+	for (i = 0; i < ARM64_NCAPS; i++) {
+		caps = cpucap_ptrs[i];
+		if (!caps || !caps->migration_safe_cap ||
+		    !caps->matches(caps, cpucap_default_scope(caps), target))
+			continue;
+		__set_bit(caps->migration_safe_cap, errata_map);
+	}
+}
+
 static void update_cpu_capabilities(u16 scope_mask)
 {
 	int i;