@@ -5,6 +5,7 @@
#include <linux/types.h>
#include <linux/stacktrace.h>
#include <linux/stackdepot.h>
+#include <linux/active_vm.h>
struct pglist_data;
struct page_ext_operations {
@@ -34,7 +34,10 @@ static void __init init_active_vm(void)
}
struct active_vm {
- int *slab_data; /* for slab */
+ union {
+ int *slab_data; /* for slab */
+ unsigned long page_data; /* for page */
+ }
};
struct page_ext_operations active_vm_ops = {
@@ -165,3 +168,36 @@ void active_vm_slab_sub(struct kmem_cache *s, struct slab *slab, void **p, int c
}
page_ext_put(page_ext);
}
+
+void page_set_active_vm(struct page *page, unsigned int item, unsigned int order)
+{
+ struct page_ext *page_ext = page_ext_get(page);
+ struct active_vm *av;
+
+ if (unlikely(!page_ext))
+ return;
+
+ av = (void *)(page_ext) + active_vm_ops.offset;
+ WARN_ON_ONCE(av->page_data != 0);
+ av->page_data = item;
+ page_ext_put(page_ext);
+ active_vm_item_add(item, PAGE_SIZE << order);
+}
+
+void page_test_clear_active_vm(struct page *page, unsigned int order)
+{
+ struct page_ext *page_ext = page_ext_get(page);
+ struct active_vm *av;
+
+ if (unlikely(!page_ext))
+ return;
+
+ av = (void *)(page_ext) + active_vm_ops.offset;
+ if (av->page_data <= 0)
+ goto out;
+
+ active_vm_item_sub(av->page_data, PAGE_SIZE << order);
+ av->page_data = 0;
+out:
+ page_ext_put(page_ext);
+}
@@ -10,6 +10,8 @@ extern struct page_ext_operations active_vm_ops;
void active_vm_slab_add(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
void active_vm_slab_sub(struct kmem_cache *s, struct slab *slab, void **p, int cnt);
void active_vm_slab_free(struct slab *slab);
+void page_set_active_vm(struct page *page, unsigned int item, unsigned int order);
+void page_test_clear_active_vm(struct page *page, unsigned int order);
static inline int active_vm_item(void)
{
@@ -33,6 +35,7 @@ static inline void active_vm_item_sub(int item, long delta)
WARN_ON_ONCE(item <= 0);
this_cpu_sub(active_vm_stats.stat[item - 1], delta);
}
+
#else /* CONFIG_ACTIVE_VM */
static inline int active_vm_item(void)
{
@@ -58,5 +61,14 @@ static inline void active_vm_slab_sub(struct kmem_cache *s, struct slab *slab, v
static inline void active_vm_slab_free(struct slab *slab)
{
}
+
+static inline void page_set_active_vm(struct page *page, int item,
+ unsigned int order)
+{
+}
+
+static inline void page_test_clear_active_vm(struct page *page, unsigned int order)
+{
+}
#endif /* CONFIG_ACTIVE_VM */
#endif /* __MM_ACTIVE_VM_H */
@@ -76,6 +76,8 @@
#include <linux/khugepaged.h>
#include <linux/buffer_head.h>
#include <linux/delayacct.h>
+#include <linux/page_ext.h>
+#include <linux/active_vm.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -83,6 +85,7 @@
#include "shuffle.h"
#include "page_reporting.h"
#include "swap.h"
+#include "active_vm.h"
/* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
typedef int __bitwise fpi_t;
@@ -1449,6 +1452,10 @@ static __always_inline bool free_pages_prepare(struct page *page,
page->mapping = NULL;
if (memcg_kmem_enabled() && PageMemcgKmem(page))
__memcg_kmem_uncharge_page(page, order);
+
+ if (active_vm_enabled())
+ page_test_clear_active_vm(page, order);
+
if (check_free && free_page_is_bad(page))
bad++;
if (bad)
@@ -5577,6 +5584,13 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
page = NULL;
}
+ if (active_vm_enabled() && (gfp & __GFP_ACCOUNT) && page) {
+ int active_vm = active_vm_item();
+
+ if (active_vm > 0)
+ page_set_active_vm(page, active_vm, order);
+ }
+
trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
kmsan_alloc_page(page, order, alloc_gfp);
Account active vm for page allocation and unaccount then page is freed. We can reuse the slab_data in struct active_vm to store the information of page allocation. Signed-off-by: Yafang Shao <laoar.shao@gmail.com> --- include/linux/page_ext.h | 1 + mm/active_vm.c | 38 +++++++++++++++++++++++++++++++++++++- mm/active_vm.h | 12 ++++++++++++ mm/page_alloc.c | 14 ++++++++++++++ 4 files changed, 64 insertions(+), 1 deletion(-)