@@ -112,6 +112,7 @@ void native_play_dead(void);
void play_dead_common(void);
void wbinvd_on_cpu(int cpu);
void wbinvd_on_all_cpus(void);
+void wbnoinvd_on_all_cpus(void);
void smp_kick_mwait_play_dead(void);
@@ -159,6 +160,11 @@ static inline void wbinvd_on_all_cpus(void)
wbinvd();
}
+static inline void wbnoinvd_on_all_cpus(void)
+{
+ wbnoinvd();
+}
+
static inline struct cpumask *cpu_llc_shared_mask(int cpu)
{
return (struct cpumask *)cpumask_of(0);
@@ -117,7 +117,24 @@ static inline void wrpkru(u32 pkru)
static __always_inline void wbinvd(void)
{
- asm volatile("wbinvd": : :"memory");
+ asm volatile("wbinvd" : : : "memory");
+}
+
+/* Instruction encoding provided for binutils backwards compatibility. */
+#define ASM_WBNOINVD _ASM_BYTES(0xf3,0x0f,0x09)
+
+/*
+ * Cheaper version of wbinvd(). Call when caches need to be written back but
+ * not invalidated.
+ */
+static __always_inline void wbnoinvd(void)
+{
+ /*
+ * If WBNOINVD is unavailable, fall back to the compatible but
+ * more destructive WBINVD (which still writes the caches back
+ * but also invalidates them).
+ */
+ alternative("wbinvd", ASM_WBNOINVD, X86_FEATURE_WBNOINVD);
}
static inline unsigned long __read_cr4(void)
@@ -19,3 +19,14 @@ void wbinvd_on_all_cpus(void)
on_each_cpu(__wbinvd, NULL, 1);
}
EXPORT_SYMBOL(wbinvd_on_all_cpus);
+
+static void __wbnoinvd(void *dummy)
+{
+ wbnoinvd();
+}
+
+void wbnoinvd_on_all_cpus(void)
+{
+ on_each_cpu(__wbnoinvd, NULL, 1);
+}
+EXPORT_SYMBOL(wbnoinvd_on_all_cpus);