diff mbox series

[2/6] mm/huge_memory.c: make get_huge_zero_page() return bool

Message ID 20210313103225.16607-4-linmiaohe@huawei.com (mailing list archive)
State New, archived
Headers show
Series Some cleanups for huge_memory | expand

Commit Message

Miaohe Lin March 13, 2021, 10:32 a.m. UTC
It's guaranteed that huge_zero_page will not be NULL if huge_zero_refcount
is increased successfully. When READ_ONCE(huge_zero_page) is returned,
there must be a huge_zero_page and it can be replaced with returning 'true'
when we do not care about the value of huge_zero_page. We can thus make it
return bool to save READ_ONCE cpu cycles as the return value is just used
to check if huge_zero_page exists.

Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
---
 mm/huge_memory.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 3303576489d5..6d13ca5441e2 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -77,18 +77,18 @@  bool transparent_hugepage_enabled(struct vm_area_struct *vma)
 	return false;
 }
 
-static struct page *get_huge_zero_page(void)
+static bool get_huge_zero_page(void)
 {
 	struct page *zero_page;
 retry:
 	if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
-		return READ_ONCE(huge_zero_page);
+		return true;
 
 	zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
 			HPAGE_PMD_ORDER);
 	if (!zero_page) {
 		count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
-		return NULL;
+		return false;
 	}
 	count_vm_event(THP_ZERO_PAGE_ALLOC);
 	preempt_disable();
@@ -101,7 +101,7 @@  static struct page *get_huge_zero_page(void)
 	/* We take additional reference here. It will be put back by shrinker */
 	atomic_set(&huge_zero_refcount, 2);
 	preempt_enable();
-	return READ_ONCE(huge_zero_page);
+	return true;
 }
 
 static void put_huge_zero_page(void)