@@ -8,8 +8,6 @@
#include <linux/tracepoint-defs.h>
DECLARE_TRACEPOINT(page_ref_init);
-DECLARE_TRACEPOINT(page_ref_mod);
-DECLARE_TRACEPOINT(page_ref_mod_and_test);
DECLARE_TRACEPOINT(page_ref_mod_and_return);
DECLARE_TRACEPOINT(page_ref_add_unless);
DECLARE_TRACEPOINT(page_ref_freeze);
@@ -27,8 +25,6 @@ DECLARE_TRACEPOINT(page_ref_unfreeze);
#define page_ref_tracepoint_active(t) tracepoint_enabled(t)
extern void __page_ref_init(struct page *page);
-extern void __page_ref_mod(struct page *page, int v);
-extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
extern void __page_ref_add_unless(struct page *page, int v, int u, int ret);
extern void __page_ref_freeze(struct page *page, int v, int ret);
@@ -41,12 +37,6 @@ extern void __page_ref_unfreeze(struct page *page, int v);
static inline void __page_ref_init(struct page *page)
{
}
-static inline void __page_ref_mod(struct page *page, int v)
-{
-}
-static inline void __page_ref_mod_and_test(struct page *page, int v, int ret)
-{
-}
static inline void __page_ref_mod_and_return(struct page *page, int v, int ret)
{
}
@@ -127,12 +117,7 @@ static inline int page_ref_add_return(struct page *page, int nr)
static inline void page_ref_add(struct page *page, int nr)
{
- int old_val = atomic_fetch_add(nr, &page->_refcount);
- int new_val = old_val + nr;
-
- VM_BUG_ON_PAGE((unsigned int)new_val < (unsigned int)old_val, page);
- if (page_ref_tracepoint_active(page_ref_mod))
- __page_ref_mod(page, nr);
+ page_ref_add_return(page, nr);
}
static inline void folio_ref_add(struct folio *folio, int nr)
@@ -140,30 +125,25 @@ static inline void folio_ref_add(struct folio *folio, int nr)
page_ref_add(&folio->page, nr);
}
-static inline void page_ref_sub(struct page *page, int nr)
+static inline int page_ref_sub_return(struct page *page, int nr)
{
int old_val = atomic_fetch_sub(nr, &page->_refcount);
int new_val = old_val - nr;
VM_BUG_ON_PAGE((unsigned int)new_val > (unsigned int)old_val, page);
- if (page_ref_tracepoint_active(page_ref_mod))
- __page_ref_mod(page, -nr);
+ if (page_ref_tracepoint_active(page_ref_mod_and_return))
+ __page_ref_mod_and_return(page, -nr, new_val);
+ return new_val;
}
-static inline void folio_ref_sub(struct folio *folio, int nr)
+static inline void page_ref_sub(struct page *page, int nr)
{
- page_ref_sub(&folio->page, nr);
+ page_ref_sub_return(page, nr);
}
-static inline int page_ref_sub_return(struct page *page, int nr)
+static inline void folio_ref_sub(struct folio *folio, int nr)
{
- int old_val = atomic_fetch_sub(nr, &page->_refcount);
- int new_val = old_val - nr;
-
- VM_BUG_ON_PAGE((unsigned int)new_val > (unsigned int)old_val, page);
- if (page_ref_tracepoint_active(page_ref_mod_and_return))
- __page_ref_mod_and_return(page, -nr, new_val);
- return new_val;
+ page_ref_sub(&folio->page, nr);
}
static inline int folio_ref_sub_return(struct folio *folio, int nr)
@@ -171,14 +151,20 @@ static inline int folio_ref_sub_return(struct folio *folio, int nr)
return page_ref_sub_return(&folio->page, nr);
}
-static inline void page_ref_inc(struct page *page)
+static inline int page_ref_inc_return(struct page *page)
{
int old_val = atomic_fetch_inc(&page->_refcount);
int new_val = old_val + 1;
VM_BUG_ON_PAGE((unsigned int)new_val < (unsigned int)old_val, page);
- if (page_ref_tracepoint_active(page_ref_mod))
- __page_ref_mod(page, 1);
+ if (page_ref_tracepoint_active(page_ref_mod_and_return))
+ __page_ref_mod_and_return(page, 1, new_val);
+ return new_val;
+}
+
+static inline void page_ref_inc(struct page *page)
+{
+ page_ref_inc_return(page);
}
static inline void folio_ref_inc(struct folio *folio)
@@ -186,14 +172,20 @@ static inline void folio_ref_inc(struct folio *folio)
page_ref_inc(&folio->page);
}
-static inline void page_ref_dec(struct page *page)
+static inline int page_ref_dec_return(struct page *page)
{
int old_val = atomic_fetch_dec(&page->_refcount);
int new_val = old_val - 1;
VM_BUG_ON_PAGE((unsigned int)new_val > (unsigned int)old_val, page);
- if (page_ref_tracepoint_active(page_ref_mod))
- __page_ref_mod(page, -1);
+ if (page_ref_tracepoint_active(page_ref_mod_and_return))
+ __page_ref_mod_and_return(page, -1, new_val);
+ return new_val;
+}
+
+static inline void page_ref_dec(struct page *page)
+{
+ page_ref_dec_return(page);
}
static inline void folio_ref_dec(struct folio *folio)
@@ -203,14 +195,7 @@ static inline void folio_ref_dec(struct folio *folio)
static inline int page_ref_sub_and_test(struct page *page, int nr)
{
- int old_val = atomic_fetch_sub(nr, &page->_refcount);
- int new_val = old_val - nr;
- int ret = new_val == 0;
-
- VM_BUG_ON_PAGE((unsigned int)new_val > (unsigned int)old_val, page);
- if (page_ref_tracepoint_active(page_ref_mod_and_test))
- __page_ref_mod_and_test(page, -nr, ret);
- return ret;
+ return page_ref_sub_return(page, nr) == 0;
}
static inline int folio_ref_sub_and_test(struct folio *folio, int nr)
@@ -218,17 +203,6 @@ static inline int folio_ref_sub_and_test(struct folio *folio, int nr)
return page_ref_sub_and_test(&folio->page, nr);
}
-static inline int page_ref_inc_return(struct page *page)
-{
- int old_val = atomic_fetch_inc(&page->_refcount);
- int new_val = old_val + 1;
-
- VM_BUG_ON_PAGE((unsigned int)new_val < (unsigned int)old_val, page);
- if (page_ref_tracepoint_active(page_ref_mod_and_return))
- __page_ref_mod_and_return(page, 1, new_val);
- return new_val;
-}
-
static inline int folio_ref_inc_return(struct folio *folio)
{
return page_ref_inc_return(&folio->page);
@@ -236,14 +210,7 @@ static inline int folio_ref_inc_return(struct folio *folio)
static inline int page_ref_dec_and_test(struct page *page)
{
- int old_val = atomic_fetch_dec(&page->_refcount);
- int new_val = old_val - 1;
- int ret = new_val == 0;
-
- VM_BUG_ON_PAGE((unsigned int)new_val > (unsigned int)old_val, page);
- if (page_ref_tracepoint_active(page_ref_mod_and_test))
- __page_ref_mod_and_test(page, -1, ret);
- return ret;
+ return page_ref_dec_return(page) == 0;
}
static inline int folio_ref_dec_and_test(struct folio *folio)
@@ -251,17 +218,6 @@ static inline int folio_ref_dec_and_test(struct folio *folio)
return page_ref_dec_and_test(&folio->page);
}
-static inline int page_ref_dec_return(struct page *page)
-{
- int old_val = atomic_fetch_dec(&page->_refcount);
- int new_val = old_val - 1;
-
- VM_BUG_ON_PAGE((unsigned int)new_val > (unsigned int)old_val, page);
- if (page_ref_tracepoint_active(page_ref_mod_and_return))
- __page_ref_mod_and_return(page, -1, new_val);
- return new_val;
-}
-
static inline int folio_ref_dec_return(struct folio *folio)
{
return page_ref_dec_return(&folio->page);
@@ -49,7 +49,7 @@ DEFINE_EVENT(page_ref_init_template, page_ref_init,
TP_ARGS(page)
);
-DECLARE_EVENT_CLASS(page_ref_mod_template,
+DECLARE_EVENT_CLASS(page_ref_unfreeze_template,
TP_PROTO(struct page *page, int v),
@@ -83,14 +83,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_template,
__entry->val)
);
-DEFINE_EVENT(page_ref_mod_template, page_ref_mod,
-
- TP_PROTO(struct page *page, int v),
-
- TP_ARGS(page, v)
-);
-
-DECLARE_EVENT_CLASS(page_ref_mod_and_test_template,
+DECLARE_EVENT_CLASS(page_ref_mod_template,
TP_PROTO(struct page *page, int v, int ret),
@@ -163,14 +156,7 @@ DECLARE_EVENT_CLASS(page_ref_add_unless_template,
__entry->val, __entry->unless, __entry->ret)
);
-DEFINE_EVENT(page_ref_mod_and_test_template, page_ref_mod_and_test,
-
- TP_PROTO(struct page *page, int v, int ret),
-
- TP_ARGS(page, v, ret)
-);
-
-DEFINE_EVENT(page_ref_mod_and_test_template, page_ref_mod_and_return,
+DEFINE_EVENT(page_ref_mod_template, page_ref_mod_and_return,
TP_PROTO(struct page *page, int v, int ret),
@@ -184,14 +170,14 @@ DEFINE_EVENT(page_ref_add_unless_template, page_ref_add_unless,
TP_ARGS(page, v, u, ret)
);
-DEFINE_EVENT(page_ref_mod_and_test_template, page_ref_freeze,
+DEFINE_EVENT(page_ref_mod_template, page_ref_freeze,
TP_PROTO(struct page *page, int v, int ret),
TP_ARGS(page, v, ret)
);
-DEFINE_EVENT(page_ref_mod_template, page_ref_unfreeze,
+DEFINE_EVENT(page_ref_unfreeze_template, page_ref_unfreeze,
TP_PROTO(struct page *page, int v),
@@ -12,20 +12,6 @@ void __page_ref_init(struct page *page)
EXPORT_SYMBOL(__page_ref_init);
EXPORT_TRACEPOINT_SYMBOL(page_ref_init);
-void __page_ref_mod(struct page *page, int v)
-{
- trace_page_ref_mod(page, v);
-}
-EXPORT_SYMBOL(__page_ref_mod);
-EXPORT_TRACEPOINT_SYMBOL(page_ref_mod);
-
-void __page_ref_mod_and_test(struct page *page, int v, int ret)
-{
- trace_page_ref_mod_and_test(page, v, ret);
-}
-EXPORT_SYMBOL(__page_ref_mod_and_test);
-EXPORT_TRACEPOINT_SYMBOL(page_ref_mod_and_test);
-
void __page_ref_mod_and_return(struct page *page, int v, int ret)
{
trace_page_ref_mod_and_return(page, v, ret);
Now, that we are using atomic_fetch* variants to add/sub/inc/dec page _refcount, it makes sense to combined page_ref_* return and non return functions. Also remove some extra trace points for non-return variants. This improves the tracability by always recording the new _refcount value after the modifications has occurred. Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com> --- include/linux/page_ref.h | 102 +++++++++----------------------- include/trace/events/page_ref.h | 24 ++------ mm/debug_page_ref.c | 14 ----- 3 files changed, 34 insertions(+), 106 deletions(-)