@@ -558,6 +558,13 @@ static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
default: return CONFIG_ARM64_PA_BITS;
}
}
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+extern bool kpti_is_forced(bool *enabled);
+#else
+static bool kpti_is_forced(bool *enabled) { return false; }
+#endif
+
#endif /* __ASSEMBLY__ */
#endif
@@ -908,13 +908,11 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
-static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
- int scope)
+bool kpti_is_forced(bool *enabled)
{
- /* List of CPUs that are not vulnerable and don't need KPTI */
- static const struct midr_range kpti_safe_list[] = {
- MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
- MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+ static const struct midr_range kpti_blacklist[] = {
+ MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
+ MIDR_RANGE(MIDR_THUNDERX_81XX, 0, 0, 0, 0),
{ /* sentinel */ }
};
char const *str = "command line option";
@@ -924,8 +922,8 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
* ThunderX leads to apparent I-cache corruption of kernel text, which
* ends as well as you might imagine. Don't even try.
*/
- if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
- str = "ARM64_WORKAROUND_CAVIUM_27456";
+ if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456) &&
+ is_midr_in_range_list(read_cpuid_id(), kpti_blacklist)) {
__kpti_forced = -1;
}
@@ -933,12 +931,31 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
if (__kpti_forced) {
pr_info_once("kernel page table isolation forced %s by %s\n",
__kpti_forced > 0 ? "ON" : "OFF", str);
- return __kpti_forced > 0;
+ *enabled = __kpti_forced > 0;
+ return true;
}
/* Useful for KASLR robustness */
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0)
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
+ *enabled = true;
return true;
+ }
+ return false;
+}
+
+static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ /* List of CPUs that are not vulnerable and don't need KPTI */
+ static const struct midr_range kpti_safe_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+ MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+ { /* sentinel */ }
+ };
+ bool enabled;
+
+ if (kpti_is_forced(&enabled))
+ return enabled;
/* Don't force KPTI for CPUs that are not vulnerable */
if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
idmap_kpti_install_ng_mappings() traverses all kernel page tables with the caches off to replace global with non-global attributes, so that KPTI may be enabled safely. This is costly, and can be avoided in cases where we know we will be enabling KPTI regardless of whether any cores are present that are susceptible to Meltdown. So add a helper that tells us whether KPTI was force en/disabled, which we will help use decide whether to use nG mappings when creating the mappings of the kernel address space. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> --- arch/arm64/include/asm/cpufeature.h | 7 ++++ arch/arm64/kernel/cpufeature.c | 37 ++++++++++++++------ 2 files changed, 34 insertions(+), 10 deletions(-)