@@ -505,6 +505,18 @@ static inline void get_page(struct page *page)
atomic_inc(&page->_count);
}
+static inline __must_check bool try_get_page(struct page *page)
+{
+ if (unlikely(PageTail(page)))
+ if (likely(__get_page_tail(page)))
+ return true;
+
+ if (WARN_ON_ONCE(atomic_read(&page->_count) <= 0))
+ return false;
+ atomic_inc(&page->_count);
+ return true;
+}
+
static inline struct page *virt_to_head_page(const void *x)
{
struct page *page = virt_to_page(x);
@@ -112,6 +112,29 @@ static inline void get_page_foll(struct page *page)
}
}
+static inline __must_check bool try_get_page_foll(struct page *page)
+{
+ if (unlikely(PageTail(page))) {
+ if (WARN_ON_ONCE(atomic_read(&compound_head(page)->_count) <= 0))
+ return false;
+ /*
+ * This is safe only because
+ * __split_huge_page_refcount() can't run under
+ * get_page_foll() because we hold the proper PT lock.
+ */
+ __get_page_tail_foll(page, true);
+ } else {
+ /*
+ * Getting a normal page or the head of a compound page
+ * requires to already have an elevated page->_count.
+ */
+ if (WARN_ON_ONCE(atomic_read(&page->_count) <= 0))
+ return false;
+ atomic_inc(&page->_count);
+ }
+ return true;
+}
+
extern unsigned long highest_memmap_pfn;
/*