diff mbox series

[RFC,31/37] mm: arm64: Set PAGE_METADATA_NONE in set_pte_at() if missing metadata storage

Message ID 20230823131350.114942-32-alexandru.elisei@arm.com (mailing list archive)
State New, archived
Headers show
Series [RFC,01/37] mm: page_alloc: Rename gfp_to_alloc_flags_cma -> gfp_to_alloc_flags_fast | expand

Commit Message

Alexandru Elisei Aug. 23, 2023, 1:13 p.m. UTC
When a metadata page is mapped in the process address space and then
mprotect(PROT_MTE) changes the VMA flags to allow the use of tags, the page
is migrated out when it is first accessed.

But this creates an interesting corner case. Let's consider the scenario:

Initial conditions: metadata page M1 and page P1 are mapped in a VMA
without VM_MTE. The metadata storage for page P1 is **metadata page M1**.

1. mprotect(PROT_MTE) changes the VMA, so now all pages must have the
   associated metadata storage reserved. The to-be-tagged pages are marked
   as PAGE_METADATA_NONE.
2. Page P1 is accessed and metadata page M1 must be reserved.
3. Because it is mapped, the metadata storage code will migrate metadata
   page M1. The replacement page for M1, page P2, is allocated without
   metadata storage (__GFP_TAGGED is not set). This is done intentionally
   in reserve_metadata_storage() to avoid recursion and deadlock.
4. Migration finishes and page P2 replaces M1 in a VMA with VM_MTE set.

The result: P2 is mapped in a VM_MTE VMA, but the associated metadata
storage is not reserved.

Fix this by teaching set_pte_at() -> mte_sync_tags() to change the PTE
protection to PAGE_METADATA_NONE when the associated metadata storage is
not reserved.

Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
---
 arch/arm64/include/asm/mte.h     |  4 ++--
 arch/arm64/include/asm/pgtable.h |  2 +-
 arch/arm64/kernel/mte.c          | 14 +++++++++++---
 3 files changed, 14 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 70cfd09b4a11..e89d1fa3f410 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -108,7 +108,7 @@  static inline bool try_page_mte_tagging(struct page *page)
 }
 
 void mte_zero_clear_page_tags(void *addr);
-void mte_sync_tags(pte_t pte);
+void mte_sync_tags(pte_t *pteval);
 void mte_copy_page_tags(void *kto, const void *kfrom);
 void mte_thread_init_user(void);
 void mte_thread_switch(struct task_struct *next);
@@ -140,7 +140,7 @@  static inline bool try_page_mte_tagging(struct page *page)
 static inline void mte_zero_clear_page_tags(void *addr)
 {
 }
-static inline void mte_sync_tags(pte_t pte)
+static inline void mte_sync_tags(pte_t *pteval)
 {
 }
 static inline void mte_copy_page_tags(void *kto, const void *kfrom)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 2e42f7713425..e5e1c23afb14 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -338,7 +338,7 @@  static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
 	 */
 	if (system_supports_mte() && pte_access_permitted(pte, false) &&
 	    !pte_special(pte) && pte_tagged(pte))
-		mte_sync_tags(pte);
+		mte_sync_tags(&pte);
 
 	__check_safe_pte_update(mm, ptep, pte);
 
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 4edecaac8f91..4556989f0b9e 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -20,7 +20,9 @@ 
 
 #include <asm/barrier.h>
 #include <asm/cpufeature.h>
+#include <asm/memory_metadata.h>
 #include <asm/mte.h>
+#include <asm/mte_tag_storage.h>
 #include <asm/ptrace.h>
 #include <asm/sysreg.h>
 
@@ -35,13 +37,19 @@  DEFINE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
 EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode);
 #endif
 
-void mte_sync_tags(pte_t pte)
+void mte_sync_tags(pte_t *pteval)
 {
-	struct page *page = pte_page(pte);
+	struct page *page = pte_page(*pteval);
 	long i, nr_pages = compound_nr(page);
 
-	/* if PG_mte_tagged is set, tags have already been initialised */
 	for (i = 0; i < nr_pages; i++, page++) {
+		if (metadata_storage_enabled() &&
+		    unlikely(!page_tag_storage_reserved(page))) {
+			*pteval = pte_modify(*pteval, PAGE_METADATA_NONE);
+			continue;
+		}
+
+		/* if PG_mte_tagged is set, tags have already been initialised */
 		if (try_page_mte_tagging(page)) {
 			mte_clear_page_tags(page_address(page));
 			set_page_mte_tagged(page);