@@ -386,6 +386,13 @@ struct arm64_cpu_capabilities {
const struct cpumask *cpus;
};
+#define MIGRN_TARGET_CPUS_HYP_CALL_VER_1 1
+#define MAX_MIGRN_TARGET_CPUS 64
+struct migrn_target {
+ u32 midr;
+ u32 revidr;
+};
+
static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
{
return cap->type & ARM64_CPUCAP_SCOPE_MASK;
@@ -19,11 +19,14 @@ static inline u64 paravirt_steal_clock(int cpu)
}
int __init pv_time_init(void);
+void __init pv_errata_migrn_target_init(void);
#else
#define pv_time_init() do {} while (0)
+#define pv_errata_migrn_target_init() do {} while (0)
+
#endif // CONFIG_PARAVIRT
#endif
@@ -14,6 +14,9 @@
#include <asm/kvm_asm.h>
#include <asm/smp_plat.h>
+u32 __ro_after_init errata_migrn_target_num;
+struct migrn_target __ro_after_init errata_migrn_target_cpus[MAX_MIGRN_TARGET_CPUS];
+
static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
{
@@ -153,6 +153,54 @@ static bool __init has_pv_steal_clock(void)
return (res.a0 == SMCCC_RET_SUCCESS);
}
+extern struct migrn_target __ro_after_init errata_migrn_target_cpus[];
+extern u32 __ro_after_init errata_migrn_target_num;
+void __init pv_errata_migrn_target_init(void)
+{
+ u32 target_num;
+ struct arm_smccc_res res;
+ u32 curr = 0;
+
+ /* Check we have already set errata targets */
+ if (errata_migrn_target_num)
+ return;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_MIGRN_TARGET_NUM_FUNC_ID, &res);
+ if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
+ return;
+
+ target_num = lower_32_bits(res.a0);
+ if (!target_num || target_num > MAX_MIGRN_TARGET_CPUS ||
+ upper_32_bits(res.a0) != MIGRN_TARGET_CPUS_HYP_CALL_VER_1)
+ return;
+
+ while (curr < target_num) {
+ unsigned long *ptr;
+ int j;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_MIGRN_TARGET_CPUS_FUNC_ID, &res);
+ if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
+ break;
+
+ ptr = (unsigned long *)&res;
+ for (j = 0; j < 4; j++, ptr++) {
+ if (!(*ptr))
+ goto out;
+ errata_migrn_target_cpus[curr].midr = lower_32_bits(*ptr);
+ errata_migrn_target_cpus[curr].revidr = upper_32_bits(*ptr);
+ curr++;
+ }
+ }
+out:
+ if (curr != target_num) {
+ pr_warn("Failed to retrieve all migration target CPUs\n");
+ return;
+ }
+
+ errata_migrn_target_num = target_num;
+ pr_info("Migration target CPUs set to %d\n", target_num);
+}
+
int __init pv_time_init(void)
{
int ret;
Provides a paravirt helper function to retrieve any migration target CPUs if available and store it. This will be used in subsequent patch to enable any errata associated with those targets Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> --- ToDo: Not sure it make sense to use kvm_arm_hyp_service_available() here as these hypercalls will be handled by userspace VMM. --- arch/arm64/include/asm/cpufeature.h | 7 +++++ arch/arm64/include/asm/paravirt.h | 3 ++ arch/arm64/kernel/cpu_errata.c | 3 ++ arch/arm64/kernel/paravirt.c | 48 +++++++++++++++++++++++++++++ 4 files changed, 61 insertions(+)