@@ -345,6 +345,8 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
{
+ might_sleep();
+
lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
if (mm_has_notifiers(range->mm)) {
range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
@@ -368,6 +370,9 @@ mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
static inline void
mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
{
+ if (mmu_notifier_range_blockable(range))
+ might_sleep();
+
if (mm_has_notifiers(range->mm))
__mmu_notifier_invalidate_range_end(range, false);
}
Since mmu notifiers don't exist for more processes, but could block in interesting places, add some annotations. This should help make sure core mm keeps up its end of the mmu notifier contract. The checks here are outside of all notifier checks because of that. They compile away without CONFIG_DEBUG_ATOMIC_SLEEP. Suggested by Jason. Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.com> Cc: David Rientjes <rientjes@google.com> Cc: "Christian König" <christian.koenig@amd.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: "Jérôme Glisse" <jglisse@redhat.com> Cc: linux-mm@kvack.org Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> --- include/linux/mmu_notifier.h | 5 +++++ 1 file changed, 5 insertions(+)