@@ -530,6 +530,22 @@ void arm64_set_ssbd_mitigation(bool state);
static inline void arm64_set_ssbd_mitigation(bool state) {}
#endif
+static inline bool is_cpu_meltdown_safe(void)
+{
+ /* List of CPUs that are not vulnerable and don't need KPTI */
+ static const struct midr_range kpti_safe_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+ MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+ { /* sentinel */ }
+ };
+
+ /* Don't force KPTI for CPUs that are not vulnerable */
+ if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+ return true;
+
+ return false;
+}
+
#endif /* __ASSEMBLY__ */
#endif
@@ -865,12 +865,6 @@ static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
int scope)
{
- /* List of CPUs that are not vulnerable and don't need KPTI */
- static const struct midr_range kpti_safe_list[] = {
- MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
- MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
- { /* sentinel */ }
- };
char const *str = "command line option";
/*
@@ -894,8 +888,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
return true;
- /* Don't force KPTI for CPUs that are not vulnerable */
- if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+ if (is_cpu_meltdown_safe())
return false;
/* Defer to CPU feature registers */
Prepare to call it in generic cpu vulnerabilities support. Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de> --- arch/arm64/include/asm/cpufeature.h | 16 ++++++++++++++++ arch/arm64/kernel/cpufeature.c | 9 +-------- 2 files changed, 17 insertions(+), 8 deletions(-)