@@ -69,6 +69,18 @@
#define MADV_MEMDEFRAG 20 /* Worth backing with hugepages */
#define MADV_NOMEMDEFRAG 21 /* Not worth backing with hugepages */
+#define MADV_SPLITHUGEPAGE 24 /* Split huge page in range once */
+#define MADV_PROMOTEHUGEPAGE 25 /* Promote range into huge page */
+
+#define MADV_SPLITHUGEMAP 26 /* Split huge page table entry in range once */
+#define MADV_PROMOTEHUGEMAP 27 /* Promote range into huge page table entry */
+
+#define MADV_SPLITHUGEPUDPAGE 28 /* Split huge page in range once */
+#define MADV_PROMOTEHUGEPUDPAGE 29 /* Promote range into huge page */
+
+#define MADV_SPLITHUGEPUDMAP 30 /* Split huge page table entry in range once */
+#define MADV_PROMOTEHUGEPUDMAP 31 /* Promote range into huge page table entry */
+
/* compatibility flags */
#define MAP_FILE 0
@@ -624,6 +624,95 @@ static long madvise_memdefrag(struct vm_area_struct *vma,
*prev = vma;
return memdefrag_madvise(vma, &vma->vm_flags, behavior);
}
+
+static long madvise_split_promote_hugepage(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start, unsigned long end, int behavior)
+{
+ struct page *page;
+ unsigned long addr = start, haddr;
+ int ret = 0;
+ *prev = vma;
+
+ while (addr < end && !ret) {
+ switch (behavior) {
+ case MADV_SPLITHUGEMAP:
+ split_huge_pmd_address(vma, addr, false, NULL);
+ addr += HPAGE_PMD_SIZE;
+ break;
+ case MADV_SPLITHUGEPUDMAP:
+ split_huge_pud_address(vma, addr, false, NULL);
+ addr += HPAGE_PUD_SIZE;
+ break;
+ case MADV_SPLITHUGEPAGE:
+ page = follow_page(vma, addr, FOLL_GET);
+ if (page) {
+ lock_page(page);
+ if (split_huge_page(page)) {
+ pr_debug("%s: fail to split page\n", __func__);
+ ret = -EBUSY;
+ }
+ unlock_page(page);
+ put_page(page);
+ } else
+ ret = -ENODEV;
+ addr += HPAGE_PMD_SIZE;
+ break;
+ case MADV_SPLITHUGEPUDPAGE:
+ page = follow_page(vma, addr, FOLL_GET);
+ if (page) {
+ lock_page(page);
+ if (split_huge_pud_page(page)) {
+ pr_debug("%s: fail to split pud page\n", __func__);
+ ret = -EBUSY;
+ }
+ unlock_page(page);
+ put_page(page);
+ } else
+ ret = -ENODEV;
+ addr += HPAGE_PUD_SIZE;
+ break;
+ case MADV_PROMOTEHUGEMAP:
+ haddr = addr & HPAGE_PMD_MASK;
+ if (haddr >= start && (haddr + HPAGE_PMD_SIZE) <= end)
+ promote_huge_pmd_address(vma, haddr);
+ else
+ ret = -ENODEV;
+ addr += HPAGE_PMD_SIZE;
+ break;
+ case MADV_PROMOTEHUGEPUDMAP:
+ haddr = addr & HPAGE_PUD_MASK;
+ if (haddr >= start && (haddr + HPAGE_PUD_SIZE) <= end)
+ promote_huge_pud_address(vma, haddr);
+ else
+ ret = -ENODEV;
+ addr += HPAGE_PUD_SIZE;
+ break;
+ case MADV_PROMOTEHUGEPAGE:
+ haddr = addr & HPAGE_PMD_MASK;
+ if (haddr >= start && (haddr + HPAGE_PMD_SIZE) <= end)
+ promote_huge_page_address(vma, haddr);
+ else
+ ret = -ENODEV;
+ addr += HPAGE_PMD_SIZE;
+ break;
+ case MADV_PROMOTEHUGEPUDPAGE:
+ haddr = addr & HPAGE_PUD_MASK;
+ if (haddr >= start && (haddr + HPAGE_PUD_SIZE) <= end)
+ promote_huge_pud_page_address(vma, haddr);
+ else
+ ret = -ENODEV;
+ addr += HPAGE_PUD_SIZE;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ return ret;
+}
+
#ifdef CONFIG_MEMORY_FAILURE
/*
* Error injection support for memory error handling.
@@ -708,6 +797,15 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
case MADV_MEMDEFRAG:
case MADV_NOMEMDEFRAG:
return madvise_memdefrag(vma, prev, start, end, behavior);
+ case MADV_SPLITHUGEPAGE:
+ case MADV_PROMOTEHUGEPAGE:
+ case MADV_SPLITHUGEMAP:
+ case MADV_PROMOTEHUGEMAP:
+ case MADV_SPLITHUGEPUDPAGE:
+ case MADV_PROMOTEHUGEPUDPAGE:
+ case MADV_SPLITHUGEPUDMAP:
+ case MADV_PROMOTEHUGEPUDMAP:
+ return madvise_split_promote_hugepage(vma, prev, start, end, behavior);
default:
return madvise_behavior(vma, prev, start, end, behavior);
}
@@ -744,6 +842,14 @@ madvise_behavior_valid(int behavior)
#endif
case MADV_MEMDEFRAG:
case MADV_NOMEMDEFRAG:
+ case MADV_SPLITHUGEPAGE:
+ case MADV_PROMOTEHUGEPAGE:
+ case MADV_SPLITHUGEMAP:
+ case MADV_PROMOTEHUGEMAP:
+ case MADV_SPLITHUGEPUDPAGE:
+ case MADV_PROMOTEHUGEPUDPAGE:
+ case MADV_SPLITHUGEPUDMAP:
+ case MADV_PROMOTEHUGEPUDMAP:
return true;
default: