@@ -1032,6 +1032,9 @@ struct mm_struct {
#ifdef CONFIG_RMAP_ID
int mm_rmap_id;
+ unsigned long mm_rmap_subid_1;
+ unsigned long mm_rmap_subid_2[2];
+ unsigned long mm_rmap_subid_3[3];
#endif /* CONFIG_RMAP_ID */
} __randomize_layout;
@@ -180,12 +180,54 @@ struct anon_vma *folio_get_anon_vma(struct folio *folio);
void free_rmap_id(int id);
int alloc_rmap_id(void);
+#define RMAP_SUBID_1_MAX_ORDER 2
+#define RMAP_SUBID_2_MIN_ORDER 3
+#define RMAP_SUBID_2_MAX_ORDER 5
+#define RMAP_SUBID_3_MIN_ORDER 6
+#define RMAP_SUBID_3_MAX_ORDER 9
+#define RMAP_SUBID_4_MIN_ORDER 10
#define RMAP_SUBID_4_MAX_ORDER 10
#define RMAP_SUBID_5_MIN_ORDER 11
#define RMAP_SUBID_5_MAX_ORDER 12
#define RMAP_SUBID_6_MIN_ORDER 13
#define RMAP_SUBID_6_MAX_ORDER 15
+static inline unsigned long calc_rmap_subid(unsigned int n, unsigned int i)
+{
+ unsigned long nr = 0, mult = 1;
+
+ while (i) {
+ if (i & 1)
+ nr += mult;
+ mult *= (n + 1);
+ i >>= 1;
+ }
+ return nr;
+}
+
+static inline unsigned long calc_rmap_subid_1(int rmap_id)
+{
+ VM_WARN_ON_ONCE(rmap_id < RMAP_ID_MIN || rmap_id > RMAP_ID_MAX);
+
+ return calc_rmap_subid(1u << RMAP_SUBID_1_MAX_ORDER, rmap_id);
+}
+
+static inline unsigned long calc_rmap_subid_2(int rmap_id, int nr)
+{
+ VM_WARN_ON_ONCE(rmap_id < RMAP_ID_MIN || rmap_id > RMAP_ID_MAX || nr > 1);
+
+ return calc_rmap_subid(1u << RMAP_SUBID_2_MAX_ORDER,
+ (rmap_id >> (nr * 12)) & 0xfff);
+}
+
+static inline unsigned long calc_rmap_subid_3(int rmap_id, int nr)
+{
+ VM_WARN_ON_ONCE(rmap_id < RMAP_ID_MIN || rmap_id > RMAP_ID_MAX || nr > 2);
+
+ return calc_rmap_subid(1u << RMAP_SUBID_3_MAX_ORDER,
+ (rmap_id >> (nr * 8)) & 0xff);
+}
+
static inline void __folio_prep_large_rmap(struct folio *folio)
{
const unsigned int order = folio_order(folio);
@@ -202,10 +244,16 @@ static inline void __folio_prep_large_rmap(struct folio *folio)
atomic_long_set(&folio->_rmap_val4, 0);
fallthrough;
#endif
- default:
+ case RMAP_SUBID_4_MIN_ORDER ... RMAP_SUBID_4_MAX_ORDER:
atomic_long_set(&folio->_rmap_val3, 0);
+ fallthrough;
+ case RMAP_SUBID_3_MIN_ORDER ... RMAP_SUBID_3_MAX_ORDER:
atomic_long_set(&folio->_rmap_val2, 0);
+ fallthrough;
+ case RMAP_SUBID_2_MIN_ORDER ... RMAP_SUBID_2_MAX_ORDER:
atomic_long_set(&folio->_rmap_val1, 0);
+ fallthrough;
+ default:
atomic_long_set(&folio->_rmap_val0, 0);
break;
}
@@ -227,10 +275,16 @@ static inline void __folio_undo_large_rmap(struct folio *folio)
VM_WARN_ON_ONCE(atomic_long_read(&folio->_rmap_val4));
fallthrough;
#endif
- default:
+ case RMAP_SUBID_4_MIN_ORDER ... RMAP_SUBID_4_MAX_ORDER:
VM_WARN_ON_ONCE(atomic_long_read(&folio->_rmap_val3));
+ fallthrough;
+ case RMAP_SUBID_3_MIN_ORDER ... RMAP_SUBID_3_MAX_ORDER:
VM_WARN_ON_ONCE(atomic_long_read(&folio->_rmap_val2));
+ fallthrough;
+ case RMAP_SUBID_2_MIN_ORDER ... RMAP_SUBID_2_MAX_ORDER:
VM_WARN_ON_ONCE(atomic_long_read(&folio->_rmap_val1));
+ fallthrough;
+ default:
VM_WARN_ON_ONCE(atomic_long_read(&folio->_rmap_val0));
break;
}
@@ -822,6 +822,12 @@ static inline int mm_alloc_rmap_id(struct mm_struct *mm)
if (id < 0)
return id;
mm->mm_rmap_id = id;
+ mm->mm_rmap_subid_1 = calc_rmap_subid_1(id);
+ mm->mm_rmap_subid_2[0] = calc_rmap_subid_2(id, 0);
+ mm->mm_rmap_subid_2[1] = calc_rmap_subid_2(id, 1);
+ mm->mm_rmap_subid_3[0] = calc_rmap_subid_3(id, 0);
+ mm->mm_rmap_subid_3[1] = calc_rmap_subid_3(id, 1);
+ mm->mm_rmap_subid_3[2] = calc_rmap_subid_3(id, 2);
return 0;
}
@@ -87,6 +87,39 @@ static DEFINE_IDA(rmap_ida);
* involved page tables are locked and stop any page table walkers.
*/
+/*
+ * With 4 (order-2) possible exclusive mappings per folio, we can have
+ * 16777216 = 16M sub-IDs per 64bit value.
+ */
+static unsigned long get_rmap_subid_1(struct mm_struct *mm)
+{
+ return mm->mm_rmap_subid_1;
+}
+
+/*
+ * With 32 (order-5) possible exclusive mappings per folio, we can have
+ * 4096 sub-IDs per 64bit value.
+ *
+ * With 2 such 64bit values, we can support 4096^2 == 16M IDs.
+ */
+static unsigned long get_rmap_subid_2(struct mm_struct *mm, int nr)
+{
+ VM_WARN_ON_ONCE(nr > 1);
+ return mm->mm_rmap_subid_2[nr];
+}
+
+/*
+ * With 512 (order-9) possible exclusive mappings per folio, we can have
+ * 128 sub-IDs per 64bit value.
+ *
+ * With 3 such 64bit values, we can support 128^3 == 16M IDs.
+ */
+static unsigned long get_rmap_subid_3(struct mm_struct *mm, int nr)
+{
+ VM_WARN_ON_ONCE(nr > 2);
+ return mm->mm_rmap_subid_3[nr];
+}
+
/*
* With 1024 (order-10) possible exclusive mappings per folio, we can have 64
* sub-IDs per 64bit value.
@@ -279,12 +312,24 @@ void __folio_set_large_rmap_val(struct folio *folio, int count,
atomic_long_set(&folio->_rmap_val4, get_rmap_subid_5(mm, 4) * count);
break;
#endif
- default:
+ case RMAP_SUBID_4_MIN_ORDER ... RMAP_SUBID_4_MAX_ORDER:
atomic_long_set(&folio->_rmap_val0, get_rmap_subid_4(mm, 0) * count);
atomic_long_set(&folio->_rmap_val1, get_rmap_subid_4(mm, 1) * count);
atomic_long_set(&folio->_rmap_val2, get_rmap_subid_4(mm, 2) * count);
atomic_long_set(&folio->_rmap_val3, get_rmap_subid_4(mm, 3) * count);
break;
+ case RMAP_SUBID_3_MIN_ORDER ... RMAP_SUBID_3_MAX_ORDER:
+ atomic_long_set(&folio->_rmap_val0, get_rmap_subid_3(mm, 0) * count);
+ atomic_long_set(&folio->_rmap_val1, get_rmap_subid_3(mm, 1) * count);
+ atomic_long_set(&folio->_rmap_val2, get_rmap_subid_3(mm, 2) * count);
+ break;
+ case RMAP_SUBID_2_MIN_ORDER ... RMAP_SUBID_2_MAX_ORDER:
+ atomic_long_set(&folio->_rmap_val0, get_rmap_subid_2(mm, 0) * count);
+ atomic_long_set(&folio->_rmap_val1, get_rmap_subid_2(mm, 1) * count);
+ break;
+ default:
+ atomic_long_set(&folio->_rmap_val0, get_rmap_subid_1(mm) * count);
+ break;
}
}
@@ -313,12 +358,24 @@ void __folio_add_large_rmap_val(struct folio *folio, int count,
atomic_long_add(get_rmap_subid_5(mm, 4) * count, &folio->_rmap_val4);
break;
#endif
- default:
+ case RMAP_SUBID_4_MIN_ORDER ... RMAP_SUBID_4_MAX_ORDER:
atomic_long_add(get_rmap_subid_4(mm, 0) * count, &folio->_rmap_val0);
atomic_long_add(get_rmap_subid_4(mm, 1) * count, &folio->_rmap_val1);
atomic_long_add(get_rmap_subid_4(mm, 2) * count, &folio->_rmap_val2);
atomic_long_add(get_rmap_subid_4(mm, 3) * count, &folio->_rmap_val3);
break;
+ case RMAP_SUBID_3_MIN_ORDER ... RMAP_SUBID_3_MAX_ORDER:
+ atomic_long_add(get_rmap_subid_3(mm, 0) * count, &folio->_rmap_val0);
+ atomic_long_add(get_rmap_subid_3(mm, 1) * count, &folio->_rmap_val1);
+ atomic_long_add(get_rmap_subid_3(mm, 2) * count, &folio->_rmap_val2);
+ break;
+ case RMAP_SUBID_2_MIN_ORDER ... RMAP_SUBID_2_MAX_ORDER:
+ atomic_long_add(get_rmap_subid_2(mm, 0) * count, &folio->_rmap_val0);
+ atomic_long_add(get_rmap_subid_2(mm, 1) * count, &folio->_rmap_val1);
+ break;
+ default:
+ atomic_long_add(get_rmap_subid_1(mm) * count, &folio->_rmap_val0);
+ break;
}
}
@@ -330,7 +387,7 @@ bool __folio_has_large_matching_rmap_val(struct folio *folio, int count,
switch (order) {
#if MAX_ORDER >= RMAP_SUBID_6_MIN_ORDER
- case RMAP_SUBID_6_MIN_ORDER .. RMAP_SUBID_6_MAX_ORDER:
+ case RMAP_SUBID_6_MIN_ORDER ... RMAP_SUBID_6_MAX_ORDER:
diff |= atomic_long_read(&folio->_rmap_val0) ^ (get_rmap_subid_6(mm, 0) * count);
diff |= atomic_long_read(&folio->_rmap_val1) ^ (get_rmap_subid_6(mm, 1) * count);
diff |= atomic_long_read(&folio->_rmap_val2) ^ (get_rmap_subid_6(mm, 2) * count);
@@ -340,7 +397,7 @@ bool __folio_has_large_matching_rmap_val(struct folio *folio, int count,
break;
#endif
#if MAX_ORDER >= RMAP_SUBID_5_MIN_ORDER
- case RMAP_SUBID_5_MIN_ORDER .. RMAP_SUBID_5_MAX_ORDER:
+ case RMAP_SUBID_5_MIN_ORDER ... RMAP_SUBID_5_MAX_ORDER:
diff |= atomic_long_read(&folio->_rmap_val0) ^ (get_rmap_subid_5(mm, 0) * count);
diff |= atomic_long_read(&folio->_rmap_val1) ^ (get_rmap_subid_5(mm, 1) * count);
diff |= atomic_long_read(&folio->_rmap_val2) ^ (get_rmap_subid_5(mm, 2) * count);
@@ -348,12 +405,24 @@ bool __folio_has_large_matching_rmap_val(struct folio *folio, int count,
diff |= atomic_long_read(&folio->_rmap_val4) ^ (get_rmap_subid_5(mm, 4) * count);
break;
#endif
- default:
+ case RMAP_SUBID_4_MIN_ORDER ... RMAP_SUBID_4_MAX_ORDER:
diff |= atomic_long_read(&folio->_rmap_val0) ^ (get_rmap_subid_4(mm, 0) * count);
diff |= atomic_long_read(&folio->_rmap_val1) ^ (get_rmap_subid_4(mm, 1) * count);
diff |= atomic_long_read(&folio->_rmap_val2) ^ (get_rmap_subid_4(mm, 2) * count);
diff |= atomic_long_read(&folio->_rmap_val3) ^ (get_rmap_subid_4(mm, 3) * count);
break;
+ case RMAP_SUBID_3_MIN_ORDER ... RMAP_SUBID_3_MAX_ORDER:
+ diff |= atomic_long_read(&folio->_rmap_val0) ^ (get_rmap_subid_3(mm, 0) * count);
+ diff |= atomic_long_read(&folio->_rmap_val1) ^ (get_rmap_subid_3(mm, 1) * count);
+ diff |= atomic_long_read(&folio->_rmap_val2) ^ (get_rmap_subid_3(mm, 2) * count);
+ break;
+ case RMAP_SUBID_2_MIN_ORDER ... RMAP_SUBID_2_MAX_ORDER:
+ diff |= atomic_long_read(&folio->_rmap_val0) ^ (get_rmap_subid_2(mm, 0) * count);
+ diff |= atomic_long_read(&folio->_rmap_val1) ^ (get_rmap_subid_2(mm, 1) * count);
+ break;
+ default:
+ diff |= atomic_long_read(&folio->_rmap_val0) ^ (get_rmap_subid_1(mm) * count);
+ break;
}
return !diff;
}
For smaller folios, we can use less rmap values: * <= order-2: 1x 64bit value * <= order-5: 2x 64bit values * <= order-9: 3x 64bit values We end up with a lot of subids, so we cannot really use lookup tables. Pre-calculate the subids per MM. For order-9 we could think about having a lookup table with 128bit entries. Further, we could calcualte them only when really required. With 2 MiB THP this now implies only 3 instead of 4 values. Signed-off-by: David Hildenbrand <david@redhat.com> --- include/linux/mm_types.h | 3 ++ include/linux/rmap.h | 58 ++++++++++++++++++++++++++++- kernel/fork.c | 6 +++ mm/rmap_id.c | 79 +++++++++++++++++++++++++++++++++++++--- 4 files changed, 139 insertions(+), 7 deletions(-)