@@ -15,6 +15,7 @@
#include <linux/atomic.h>
#include <linux/debug_locks.h>
#include <linux/mm_types.h>
+#include <linux/mm_lock.h>
#include <linux/range.h>
#include <linux/pfn.h>
#include <linux/percpu-refcount.h>
new file mode 100644
@@ -0,0 +1,59 @@
+#ifndef _LINUX_MM_LOCK_H
+#define _LINUX_MM_LOCK_H
+
+static inline void mm_init_lock(struct mm_struct *mm)
+{
+ init_rwsem(&mm->mmap_sem);
+}
+
+static inline void mm_write_lock(struct mm_struct *mm)
+{
+ down_write(&mm->mmap_sem);
+}
+
+static inline int mm_write_lock_killable(struct mm_struct *mm)
+{
+ return down_write_killable(&mm->mmap_sem);
+}
+
+static inline bool mm_write_trylock(struct mm_struct *mm)
+{
+ return down_write_trylock(&mm->mmap_sem) != 0;
+}
+
+static inline void mm_write_unlock(struct mm_struct *mm)
+{
+ up_write(&mm->mmap_sem);
+}
+
+static inline void mm_downgrade_write_lock(struct mm_struct *mm)
+{
+ downgrade_write(&mm->mmap_sem);
+}
+
+static inline void mm_read_lock(struct mm_struct *mm)
+{
+ down_read(&mm->mmap_sem);
+}
+
+static inline int mm_read_lock_killable(struct mm_struct *mm)
+{
+ return down_read_killable(&mm->mmap_sem);
+}
+
+static inline bool mm_read_trylock(struct mm_struct *mm)
+{
+ return down_read_trylock(&mm->mmap_sem) != 0;
+}
+
+static inline void mm_read_unlock(struct mm_struct *mm)
+{
+ up_read(&mm->mmap_sem);
+}
+
+static inline bool mm_is_locked(struct mm_struct *mm)
+{
+ return rwsem_is_locked(&mm->mmap_sem) != 0;
+}
+
+#endif /* _LINUX_MM_LOCK_H */
This change wraps the existing mmap_sem related rwsem calls into a new MM locking API. This is in preparation to extending that API to support locking fine grained memory ranges. Signed-off-by: Michel Lespinasse <walken@google.com> --- include/linux/mm.h | 1 + include/linux/mm_lock.h | 59 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) create mode 100644 include/linux/mm_lock.h