@@ -1326,6 +1326,24 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
put_cpu();
}
+static inline bool cr3_matches_current_mm(void)
+{
+ struct asi *asi = asi_get_current();
+ pgd_t *cr3_pgd;
+
+ /*
+ * Prevent read_cr3_pa -> [NMI, asi_exit] -> asi_get_current,
+ * otherwise we might find CR3 pointing to the ASI PGD but not
+ * find a current ASI domain.
+ */
+ barrier();
+ cr3_pgd = __va(read_cr3_pa());
+
+ if (cr3_pgd == current->mm->pgd)
+ return true;
+ return asi && (cr3_pgd == asi_pgd(asi));
+}
+
/*
* Blindly accessing user memory from NMI context can be dangerous
* if we're in the middle of switching the current user task or
@@ -1341,10 +1359,10 @@ bool nmi_uaccess_okay(void)
VM_WARN_ON_ONCE(!loaded_mm);
/*
- * The condition we want to check is
- * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
- * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
- * is supposed to be reasonably fast.
+ * The condition we want to check that CR3 points to either
+ * current_mm->pgd or an appropriate ASI PGD. Reading CR3 may be slow,
+ * though, if we're running in a VM with shadow paging, and
+ * nmi_uaccess_okay() is supposed to be reasonably fast.
*
* Instead, we check the almost equivalent but somewhat conservative
* condition below, and we rely on the fact that switch_mm_irqs_off()
@@ -1353,7 +1371,7 @@ bool nmi_uaccess_okay(void)
if (loaded_mm != current_mm)
return false;
- VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
+ VM_WARN_ON_ONCE(!cr3_matches_current_mm());
return true;
}
nmi_uaccess_okay() emits a warning if current CR3 != mm->pgd. Limit the warning to only when ASI is not active. Co-developed-by: Junaid Shahid <junaids@google.com> Signed-off-by: Brendan Jackman <jackmanb@google.com> --- arch/x86/mm/tlb.c | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-)