@@ -19,11 +19,15 @@ static inline u64 paravirt_steal_clock(int cpu)
}
int __init pv_time_init(void);
+int __init pv_update_migrn_errata(unsigned long *errata_map);
#else
#define pv_time_init() do {} while (0)
+static inline int pv_update_migrn_errata(unsigned long *errata_map)
+{ return -EINVAL; }
+
#endif // CONFIG_PARAVIRT
#endif
@@ -85,6 +85,7 @@
#include <asm/kvm_host.h>
#include <asm/mmu_context.h>
#include <asm/mte.h>
+#include <asm/paravirt.h>
#include <asm/processor.h>
#include <asm/smp.h>
#include <asm/sysreg.h>
@@ -111,6 +112,8 @@ static struct arm64_cpu_capabilities const __ro_after_init *cpucap_ptrs[ARM64_NC
DECLARE_BITMAP(boot_cpucaps, ARM64_NCAPS);
+DECLARE_BITMAP(migrn_safe_errata_map, ARM64_NCAPS);
+
bool arm64_use_ng_mappings = false;
EXPORT_SYMBOL(arm64_use_ng_mappings);
@@ -3189,6 +3192,29 @@ void arm_get_migrn_errata_map(void *target, unsigned long *errata_map)
}
}
+static void update_migrn_errata(void)
+{
+ int i, j;
+ const struct arm64_cpu_capabilities *caps;
+ u16 scope = ARM64_CPUCAP_LOCAL_CPU_ERRATUM;
+
+ if (pv_update_migrn_errata(migrn_safe_errata_map))
+ return;
+
+ for_each_set_bit(i, migrn_safe_errata_map, ARM64_NCAPS) {
+ for (j = 0; j < ARM64_NCAPS; j++) {
+ caps = cpucap_ptrs[j];
+ if (!caps || !(caps->type & scope) ||
+ caps->migration_safe_cap != i ||
+ cpus_have_cap(caps->capability))
+ continue;
+ if (caps->desc && !caps->cpus)
+ pr_info("Updated with migration errata: %s\n", caps->desc);
+ __set_bit(caps->capability, system_cpucaps);
+ }
+ }
+}
+
static void update_cpu_capabilities(u16 scope_mask)
{
int i;
@@ -3566,6 +3592,7 @@ static void __init setup_system_capabilities(void)
* cpucaps.
*/
update_cpu_capabilities(SCOPE_SYSTEM);
+ update_migrn_errata();
enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
apply_alternatives_all();
@@ -23,6 +23,7 @@
#include <asm/paravirt.h>
#include <asm/pvclock-abi.h>
#include <asm/smp_plat.h>
+#include <asm/hypervisor.h>
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
@@ -153,6 +154,23 @@ static bool __init has_pv_steal_clock(void)
return (res.a0 == SMCCC_RET_SUCCESS);
}
+int __init pv_update_migrn_errata(unsigned long *errata_map)
+{
+ int ret;
+ struct arm_smccc_res res;
+
+ ret = kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_MIGRN_ERRATA);
+ if (ret <= 0)
+ return -EOPNOTSUPP;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_MIGRN_ERRATA, &res);
+ if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
+ return -EINVAL;
+
+ bitmap_from_arr64(errata_map, &res, ARM64_NCAPS);
+ return 0;
+}
+
int __init pv_time_init(void)
{
int ret;
We now have a hypercall to retrieve any migration related erratas for Guest kernels. Use that and update system_cpucaps based on any set errata. Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> --- arch/arm64/include/asm/paravirt.h | 4 ++++ arch/arm64/kernel/cpufeature.c | 27 +++++++++++++++++++++++++++ arch/arm64/kernel/paravirt.c | 18 ++++++++++++++++++ 3 files changed, 49 insertions(+)