@@ -791,7 +791,8 @@ extern bool hugetlb_free_vmemmap_enabled;
static inline bool is_hugetlb_free_vmemmap_enabled(void)
{
- return hugetlb_free_vmemmap_enabled;
+ return hugetlb_free_vmemmap_enabled &&
+ is_power_of_2(sizeof(struct page));
}
#else
static inline bool is_hugetlb_free_vmemmap_enabled(void)
@@ -250,6 +250,13 @@ void __init hugetlb_vmemmap_init(struct hstate *h)
BUILD_BUG_ON(NR_USED_SUBPAGE >=
RESERVE_VMEMMAP_SIZE / sizeof(struct page));
+ /*
+ * The compiler can help us to optimize this function to null
+ * when the size of the struct page is not power of 2.
+ */
+ if (!is_power_of_2(sizeof(struct page)))
+ return;
+
if (!hugetlb_free_vmemmap_enabled)
return;
@@ -17,11 +17,12 @@ void hugetlb_vmemmap_init(struct hstate *h);
/*
* How many vmemmap pages associated with a HugeTLB page that can be freed
- * to the buddy allocator.
+ * to the buddy allocator. The checking of the is_power_of_2() aims to let
+ * the compiler help us optimize the code as much as possible.
*/
static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
{
- return h->nr_free_vmemmap_pages;
+ return is_power_of_2(sizeof(struct page)) ? h->nr_free_vmemmap_pages : 0;
}
#else
static inline void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
We cannot optimize if a "struct page" crosses page boundaries. If it is true, we can optimize the code with the help of a compiler. When free_vmemmap_pages_per_hpage() returns zero, most functions are optimized by the compiler. Signed-off-by: Muchun Song <songmuchun@bytedance.com> --- include/linux/hugetlb.h | 3 ++- mm/hugetlb_vmemmap.c | 7 +++++++ mm/hugetlb_vmemmap.h | 5 +++-- 3 files changed, 12 insertions(+), 3 deletions(-)