@@ -36,6 +36,8 @@ void mte_free_tag_storage(char *storage);
/* track which pages have valid allocation tags */
#define PG_mte_tagged PG_arch_2
+/* simple lock to avoid multiple threads tagging the same page */
+#define PG_mte_lock PG_arch_3
static inline void set_page_mte_tagged(struct page *page)
{
@@ -60,6 +62,32 @@ static inline bool page_mte_tagged(struct page *page)
return ret;
}
+/*
+ * Lock the page for tagging and return 'true' if the page can be tagged,
+ * 'false' if already tagged. PG_mte_tagged is never cleared and therefore the
+ * locking only happens once for page initialisation.
+ *
+ * The page MTE lock state:
+ *
+ * Locked: PG_mte_lock && !PG_mte_tagged
+ * Unlocked: !PG_mte_lock || PG_mte_tagged
+ *
+ * Acquire semantics only if the page is tagged (returning 'false').
+ */
+static inline bool try_page_mte_tagging(struct page *page)
+{
+ if (!test_and_set_bit(PG_mte_lock, &page->flags))
+ return true;
+
+ /*
+ * The tags are either being initialised or have already been initialised,
+ * wait for the PG_mte_tagged flag to be set.
+ */
+ smp_cond_load_acquire(&page->flags, VAL & (1UL << PG_mte_tagged));
+
+ return false;
+}
+
void mte_zero_clear_page_tags(void *addr);
void mte_sync_tags(pte_t old_pte, pte_t pte);
void mte_copy_page_tags(void *kto, const void *kfrom);
@@ -84,6 +112,10 @@ static inline bool page_mte_tagged(struct page *page)
{
return false;
}
+static inline bool try_page_mte_tagging(struct page *page)
+{
+ return false;
+}
static inline void mte_zero_clear_page_tags(void *addr)
{
}
@@ -1049,6 +1049,7 @@ static inline void arch_swap_invalidate_area(int type)
#define __HAVE_ARCH_SWAP_RESTORE
static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
{
+ /* mte_restore_tags() takes the PG_mte_lock */
if (system_supports_mte() && mte_restore_tags(entry, &folio->page))
set_page_mte_tagged(&folio->page);
}
@@ -2020,7 +2020,7 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
* Clear the tags in the zero page. This needs to be done via the
* linear map which has the Tagged attribute.
*/
- if (!page_mte_tagged(ZERO_PAGE(0))) {
+ if (try_page_mte_tagging(ZERO_PAGE(0))) {
mte_clear_page_tags(lm_alias(empty_zero_page));
set_page_mte_tagged(ZERO_PAGE(0));
}
@@ -41,6 +41,7 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte,
if (check_swap && is_swap_pte(old_pte)) {
swp_entry_t entry = pte_to_swp_entry(old_pte);
+ /* mte_restore_tags() takes the PG_mte_lock */
if (!non_swap_entry(entry) && mte_restore_tags(entry, page)) {
set_page_mte_tagged(page);
return;
@@ -50,8 +51,10 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte,
if (!pte_is_tagged)
return;
- mte_clear_page_tags(page_address(page));
- set_page_mte_tagged(page);
+ if (try_page_mte_tagging(page)) {
+ mte_clear_page_tags(page_address(page));
+ set_page_mte_tagged(page);
+ }
}
void mte_sync_tags(pte_t old_pte, pte_t pte)
@@ -1067,15 +1067,19 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
clear_user(tags, MTE_GRANULES_PER_PAGE);
kvm_release_pfn_clean(pfn);
} else {
+ /*
+ * Only locking to serialise with a concurrent
+ * set_pte_at() in the VMM but still overriding the
+ * tags, hence ignoring the return value.
+ */
+ try_page_mte_tagging(page);
num_tags = mte_copy_tags_from_user(maddr, tags,
MTE_GRANULES_PER_PAGE);
- /*
- * Set the flag after checking the write
- * completed fully
- */
- if (num_tags == MTE_GRANULES_PER_PAGE)
- set_page_mte_tagged(page);
+ /* uaccess failed, don't leave stale tags */
+ if (num_tags != MTE_GRANULES_PER_PAGE)
+ mte_clear_page_tags(page);
+ set_page_mte_tagged(page);
kvm_release_pfn_dirty(pfn);
}
@@ -1066,7 +1066,7 @@ static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
return;
for (i = 0; i < nr_pages; i++, page++) {
- if (!page_mte_tagged(page)) {
+ if (try_page_mte_tagging(page)) {
mte_clear_page_tags(page_address(page));
set_page_mte_tagged(page);
}
@@ -22,6 +22,8 @@ void copy_highpage(struct page *to, struct page *from)
copy_page(kto, kfrom);
if (system_supports_mte() && page_mte_tagged(from)) {
+ /* It's a new page, shouldn't have been tagged yet */
+ WARN_ON_ONCE(!try_page_mte_tagging(to));
mte_copy_page_tags(kto, kfrom);
set_page_mte_tagged(to);
}
@@ -930,6 +930,8 @@ struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
void tag_clear_highpage(struct page *page)
{
+ /* Newly allocated page, shouldn't have been tagged yet */
+ WARN_ON_ONCE(!try_page_mte_tagging(page));
mte_zero_clear_page_tags(page_address(page));
set_page_mte_tagged(page);
}
@@ -53,6 +53,9 @@ bool mte_restore_tags(swp_entry_t entry, struct page *page)
if (!tags)
return false;
+ /* racing tag restoring? */
+ if (!try_page_mte_tagging(page))
+ return false;
mte_restore_page_tags(page_address(page), tags);
return true;