@@ -1207,6 +1207,20 @@ static inline void hugetlb_unregister_node(struct node *node)
}
#endif /* CONFIG_HUGETLB_PAGE */
+#ifdef CONFIG_HUGETLB_HIGH_GRANULARITY_MAPPING
+bool hugetlb_hgm_enabled(struct vm_area_struct *vma);
+bool hugetlb_hgm_eligible(struct vm_area_struct *vma);
+#else
+static inline bool hugetlb_hgm_enabled(struct vm_area_struct *vma)
+{
+ return false;
+}
+static inline bool hugetlb_hgm_eligible(struct vm_area_struct *vma)
+{
+ return false;
+}
+#endif
+
static inline spinlock_t *huge_pte_lock(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
{
@@ -6966,6 +6966,10 @@ static bool pmd_sharing_possible(struct vm_area_struct *vma)
#ifdef CONFIG_USERFAULTFD
if (uffd_disable_huge_pmd_share(vma))
return false;
+#endif
+#ifdef CONFIG_HUGETLB_HIGH_GRANULARITY_MAPPING
+ if (hugetlb_hgm_enabled(vma))
+ return false;
#endif
/*
* Only shared VMAs can share PMDs.
@@ -7229,6 +7233,25 @@ __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
+#ifdef CONFIG_HUGETLB_HIGH_GRANULARITY_MAPPING
+bool hugetlb_hgm_eligible(struct vm_area_struct *vma)
+{
+ /*
+ * All shared VMAs may have HGM.
+ *
+ * HGM requires using the VMA lock, which only exists for shared VMAs.
+ * To make HGM work for private VMAs, we would need to use another
+ * scheme to prevent collapsing/splitting from invalidating other
+ * threads' page table walks.
+ */
+ return vma && (vma->vm_flags & VM_MAYSHARE);
+}
+bool hugetlb_hgm_enabled(struct vm_area_struct *vma)
+{
+ return vma && (vma->vm_flags & VM_HUGETLB_HGM);
+}
+#endif /* CONFIG_HUGETLB_HIGH_GRANULARITY_MAPPING */
+
/*
* These functions are overwritable if your architecture needs its own
* behavior.
hugetlb_hgm_eligible indicates that a VMA is eligible to have HGM explicitly enabled via MADV_SPLIT, and hugetlb_hgm_enabled indicates that HGM has been enabled. Signed-off-by: James Houghton <jthoughton@google.com> --- include/linux/hugetlb.h | 14 ++++++++++++++ mm/hugetlb.c | 23 +++++++++++++++++++++++ 2 files changed, 37 insertions(+)