@@ -14,6 +14,9 @@ static inline void mm_init_lock(struct mm_struct *mm)
init_rwsem(&mm->mmap_sem);
}
+static inline void mm_vma_lock(struct mm_struct *mm) {}
+static inline void mm_vma_unlock(struct mm_struct *mm) {}
+
static inline void mm_init_coarse_lock_range(struct mm_lock_range *range) {}
static inline void mm_init_lock_range(struct mm_lock_range *range,
unsigned long start, unsigned long end) {}
@@ -107,6 +110,9 @@ static inline void mm_init_lock(struct mm_struct *mm)
init_rwsem(&mm->mmap_sem);
}
+static inline void mm_vma_lock(struct mm_struct *mm) {}
+static inline void mm_vma_unlock(struct mm_struct *mm) {}
+
static inline void mm_init_coarse_lock_range(struct mm_lock_range *range)
{
range->mm = NULL;
@@ -131,10 +137,11 @@ static inline bool mm_range_is_coarse(struct mm_lock_range *range)
#define __DEP_MAP_MM_LOCK_INITIALIZER(lockname)
#endif
-#define MM_LOCK_INITIALIZER(name) { \
- .mutex = __MUTEX_INITIALIZER(name.mutex), \
- .rb_root = RB_ROOT, \
- __DEP_MAP_MM_LOCK_INITIALIZER(name) \
+#define MM_LOCK_INITIALIZER(name) { \
+ .mutex = __MUTEX_INITIALIZER(name.mutex), \
+ .rb_root = RB_ROOT, \
+ .vma_mutex = __MUTEX_INITIALIZER(name.vma_mutex), \
+ __DEP_MAP_MM_LOCK_INITIALIZER(name) \
}
#define MM_COARSE_LOCK_RANGE_INITIALIZER { \
@@ -148,9 +155,18 @@ static inline void mm_init_lock(struct mm_struct *mm)
mutex_init(&mm->mmap_sem.mutex);
mm->mmap_sem.rb_root = RB_ROOT;
+ mutex_init(&mm->mmap_sem.vma_mutex);
lockdep_init_map(&mm->mmap_sem.dep_map, "&mm->mmap_sem", &__key, 0);
}
+static inline void mm_vma_lock(struct mm_struct *mm) {
+ mutex_lock(&mm->mmap_sem.vma_mutex);
+}
+
+static inline void mm_vma_unlock(struct mm_struct *mm) {
+ mutex_unlock(&mm->mmap_sem.vma_mutex);
+}
+
static inline void mm_init_lock_range(struct mm_lock_range *range,
unsigned long start, unsigned long end) {
range->start = start;
@@ -292,9 +292,11 @@ struct mm_lock {
struct mutex mutex;
struct rb_root rb_root;
unsigned long seq;
+ struct mutex vma_mutex;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
+
};
#endif
This change adds the mm_vma_lock() and mm_vma_unlock() functions, which are to be used to protect per-mm global structures (such as the vma rbtree) when writers only hold a range lock. The functions are no-ops when CONFIG_MM_LOCK_RANGE is not enabled, as mmap_sem already protects such structures in that case. Signed-off-by: Michel Lespinasse <walken@google.com> --- include/linux/mm_lock.h | 24 ++++++++++++++++++++---- include/linux/mm_types.h | 2 ++ 2 files changed, 22 insertions(+), 4 deletions(-)