@@ -1340,6 +1340,22 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
put_cpu();
}
+static inline bool cr3_matches_current_mm(void)
+{
+ struct asi *asi = asi_get_current();
+ pgd_t *pgd_asi = asi_pgd(asi);
+ pgd_t *pgd_cr3;
+
+ /*
+ * Prevent read_cr3_pa -> [NMI, asi_exit] -> asi_get_current,
+ * otherwise we might find CR3 pointing to the ASI PGD but not
+ * find a current ASI domain.
+ */
+ barrier();
+ pgd_cr3 = __va(read_cr3_pa());
+ return pgd_cr3 == current->mm->pgd || pgd_cr3 == pgd_asi;
+}
+
/*
* Blindly accessing user memory from NMI context can be dangerous
* if we're in the middle of switching the current user task or
@@ -1355,10 +1371,10 @@ bool nmi_uaccess_okay(void)
VM_WARN_ON_ONCE(!loaded_mm);
/*
- * The condition we want to check is
- * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
- * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
- * is supposed to be reasonably fast.
+ * The condition we want to check that CR3 points to either
+ * current_mm->pgd or an appropriate ASI PGD. Reading CR3 may be slow,
+ * though, if we're running in a VM with shadow paging, and
+ * nmi_uaccess_okay() is supposed to be reasonably fast.
*
* Instead, we check the almost equivalent but somewhat conservative
* condition below, and we rely on the fact that switch_mm_irqs_off()
@@ -1367,7 +1383,7 @@ bool nmi_uaccess_okay(void)
if (loaded_mm != current_mm)
return false;
- VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
+ VM_WARN_ON_ONCE(!cr3_matches_current_mm());
return true;
}