diff mbox series

[RFC,v3,09/11] mseal: add MAP_SEALABLE to mmap()

Message ID 20231212231706.2680890-10-jeffxu@chromium.org (mailing list archive)
State New
Headers show
Series Introduce mseal() | expand

Commit Message

Jeff Xu Dec. 12, 2023, 11:17 p.m. UTC
From: Jeff Xu <jeffxu@chromium.org>

The MAP_SEALABLE flag is added to the flags field of mmap().
When present, it marks the map as sealable. A map created
without MAP_SEALABLE will not support sealing; In other words,
mseal() will fail for such a map.

Applications that don't care about sealing will expect their
behavior unchanged. For those that need sealing support, opt-in
by adding MAP_SEALABLE when creating the map.

Signed-off-by: Jeff Xu <jeffxu@chromium.org>
---
 include/linux/mm.h                     | 52 ++++++++++++++++++++++++--
 include/linux/mm_types.h               |  1 +
 include/uapi/asm-generic/mman-common.h |  1 +
 mm/mmap.c                              |  2 +-
 mm/mseal.c                             |  7 +++-
 5 files changed, 57 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 50dda474acc2..6f5dba9fbe21 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -267,6 +267,17 @@  extern unsigned int kobjsize(const void *objp);
 	MM_SEAL_PROT_PKEY | \
 	MM_SEAL_DISCARD_RO_ANON)
 
+/* define VM_SEALABLE in vm_seals of vm_area_struct. */
+#define VM_SEALABLE	_BITUL(31)
+
+/*
+ * VM_SEALS_BITS_ALL marks the bits used for
+ * sealing in vm_seals of vm_area_structure.
+ */
+#define VM_SEALS_BITS_ALL ( \
+	MM_SEAL_ALL | \
+	VM_SEALABLE)
+
 /*
  * PROT_SEAL_ALL is all supported flags in mmap().
  * See include/uapi/asm-generic/mman-common.h.
@@ -3330,9 +3341,17 @@  static inline void mm_populate(unsigned long addr, unsigned long len) {}
 
 #ifdef CONFIG_MSEAL
 /*
- * return the valid sealing (after mask).
+ * return the valid sealing (after mask), this includes sealable bit.
  */
 static inline unsigned long vma_seals(struct vm_area_struct *vma)
+{
+	return (vma->vm_seals & VM_SEALS_BITS_ALL);
+}
+
+/*
+ * return the enabled sealing type (after mask), without sealable bit.
+ */
+static inline unsigned long vma_enabled_seals(struct vm_area_struct *vma)
 {
 	return (vma->vm_seals & MM_SEAL_ALL);
 }
@@ -3342,9 +3361,14 @@  static inline void update_vma_seals(struct vm_area_struct *vma, unsigned long vm
 	vma->vm_seals |= vm_seals;
 }
 
+static inline bool is_vma_sealable(struct vm_area_struct *vma)
+{
+	return vma->vm_seals & VM_SEALABLE;
+}
+
 static inline bool check_vma_seals_mergeable(unsigned long vm_seals1, unsigned long vm_seals2)
 {
-	if ((vm_seals1 & MM_SEAL_ALL) != (vm_seals2 & MM_SEAL_ALL))
+	if ((vm_seals1 & VM_SEALS_BITS_ALL) != (vm_seals2 & VM_SEALS_BITS_ALL))
 		return false;
 
 	return true;
@@ -3384,9 +3408,15 @@  static inline unsigned long convert_mmap_seals(unsigned long prot)
  * check input sealing type from the "prot" field of mmap().
  * for CONFIG_MSEAL case, this always return 0 (successful).
  */
-static inline int check_mmap_seals(unsigned long prot, unsigned long *vm_seals)
+static inline int check_mmap_seals(unsigned long prot, unsigned long *vm_seals,
+	unsigned long flags)
 {
 	*vm_seals = convert_mmap_seals(prot);
+	if (*vm_seals)
+	/* setting one of MM_SEAL_XX means the map is sealable. */
+		*vm_seals |= VM_SEALABLE;
+	else
+		*vm_seals |= (flags & MAP_SEALABLE) ? VM_SEALABLE:0;
 	return 0;
 }
 #else
@@ -3395,6 +3425,16 @@  static inline unsigned long vma_seals(struct vm_area_struct *vma)
 	return 0;
 }
 
+static inline unsigned long vma_enabled_seals(struct vm_area_struct *vma)
+{
+	return 0;
+}
+
+static inline bool is_vma_sealable(struct vm_area_struct *vma)
+{
+	return false;
+}
+
 static inline bool check_vma_seals_mergeable(unsigned long vm_seals1, unsigned long vm_seals2)
 {
 	return true;
@@ -3426,11 +3466,15 @@  static inline void update_vma_seals(struct vm_area_struct *vma, unsigned long vm
  * check input sealing type from the "prot" field of mmap().
  * For not CONFIG_MSEAL, if SEAL flag is set, it will return failure.
  */
-static inline int check_mmap_seals(unsigned long prot, unsigned long *vm_seals)
+static inline int check_mmap_seals(unsigned long prot, unsigned long *vm_seals,
+	unsigned long flags)
 {
 	if (prot & PROT_SEAL_ALL)
 		return -EINVAL;
 
+	if (flags & MAP_SEALABLE)
+		return -EINVAL;
+
 	return 0;
 }
 
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 052799173c86..c9b04c545f39 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -691,6 +691,7 @@  struct vm_area_struct {
 	/*
 	 * bit masks for seal.
 	 * need this since vm_flags is full.
+	 * We could merge this into vm_flags if vm_flags ever get expanded.
 	 */
 	unsigned long vm_seals;		/* seal flags, see mm.h. */
 #endif
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index bf503962409a..57ef4507c00b 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -47,6 +47,7 @@ 
 
 #define MAP_UNINITIALIZED 0x4000000	/* For anonymous mmap, memory could be
 					 * uninitialized */
+#define MAP_SEALABLE	0x8000000	/* map is sealable. */
 
 /*
  * Flags for mlock
diff --git a/mm/mmap.c b/mm/mmap.c
index 6da8d83f2e66..6e35e2070060 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1235,7 +1235,7 @@  unsigned long do_mmap(struct file *file, unsigned long addr,
 	if (flags & MAP_FIXED_NOREPLACE)
 		flags |= MAP_FIXED;
 
-	if (check_mmap_seals(prot, &vm_seals) < 0)
+	if (check_mmap_seals(prot, &vm_seals, flags) < 0)
 		return -EINVAL;
 
 	if (!(flags & MAP_FIXED))
diff --git a/mm/mseal.c b/mm/mseal.c
index 294f48d33db6..5d4cf71b497e 100644
--- a/mm/mseal.c
+++ b/mm/mseal.c
@@ -121,9 +121,13 @@  bool can_modify_mm_madv(struct mm_struct *mm, unsigned long start, unsigned long
  */
 static bool can_add_vma_seals(struct vm_area_struct *vma, unsigned long newSeals)
 {
+	/* if map is not sealable, reject. */
+	if (!is_vma_sealable(vma))
+		return false;
+
 	/* When SEAL_MSEAL is set, reject if a new type of seal is added. */
 	if ((vma->vm_seals & MM_SEAL_SEAL) &&
-	    (newSeals & ~(vma_seals(vma))))
+	    (newSeals & ~(vma_enabled_seals(vma))))
 		return false;
 
 	/*
@@ -185,6 +189,7 @@  static int mseal_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma,
  * 2> end is part of a valid vma.
  * 3> No gap (unallocated address) between start and end.
  * 4> requested seal type can be added in given address range.
+ * 5> map is sealable.
  */
 static int check_mm_seal(unsigned long start, unsigned long end,
 			 unsigned long newtypes)