@@ -1,6 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/mm.h>
#include <linux/page_ext.h>
#include <linux/active_vm.h>
+#include <linux/slab.h>
+
+#include "active_vm.h"
+#include "slab.h"
static bool __active_vm_enabled __initdata =
IS_ENABLED(CONFIG_ACTIVE_VM);
@@ -28,7 +33,12 @@ static void __init init_active_vm(void)
static_branch_disable(&active_vm_disabled);
}
+struct active_vm {
+ int *slab_data; /* for slab */
+};
+
struct page_ext_operations active_vm_ops = {
+ .size = sizeof(struct active_vm),
.need = need_active_vm,
.init = init_active_vm,
};
@@ -54,3 +64,104 @@ long active_vm_item_sum(int item)
return sum;
}
+
+static int *active_vm_from_slab(struct page_ext *page_ext)
+{
+ struct active_vm *av;
+
+ if (static_branch_likely(&active_vm_disabled))
+ return NULL;
+
+ av = (void *)(page_ext) + active_vm_ops.offset;
+ return READ_ONCE(av->slab_data);
+}
+
+void active_vm_slab_free(struct slab *slab)
+{
+ struct page_ext *page_ext;
+ struct active_vm *av;
+ struct page *page;
+
+ page = slab_page(slab);
+ page_ext = page_ext_get(page);
+ if (!page_ext)
+ return;
+
+ av = (void *)(page_ext) + active_vm_ops.offset;
+ kfree(av->slab_data);
+ av->slab_data = NULL;
+ page_ext_put(page_ext);
+}
+
+static bool active_vm_slab_cmpxchg(struct page_ext *page_ext, int *new)
+{
+ struct active_vm *av;
+
+ av = (void *)(page_ext) + active_vm_ops.offset;
+ return cmpxchg(&av->slab_data, NULL, new) == NULL;
+}
+
+void active_vm_slab_add(struct kmem_cache *s, gfp_t flags, size_t size, void **p)
+{
+ struct page_ext *page_ext;
+ struct slab *slab;
+ struct page *page;
+ int *vec;
+ int item;
+ int off;
+ int i;
+
+ item = active_vm_item();
+ for (i = 0; i < size; i++) {
+ slab = virt_to_slab(p[i]);
+ page = slab_page(slab);
+ page_ext = page_ext_get(page);
+
+ if (!page_ext)
+ continue;
+
+ off = obj_to_index(s, slab, p[i]);
+ vec = active_vm_from_slab(page_ext);
+ if (!vec) {
+ vec = kcalloc_node(objs_per_slab(s, slab), sizeof(int),
+ flags & ~__GFP_ACCOUNT, slab_nid(slab));
+ if (!vec) {
+ page_ext_put(page_ext);
+ continue;
+ }
+
+ if (!active_vm_slab_cmpxchg(page_ext, vec)) {
+ kfree(vec);
+ vec = active_vm_from_slab(page_ext);
+ }
+ }
+
+ vec[off] = item;
+ active_vm_item_add(item, obj_full_size(s));
+ page_ext_put(page_ext);
+ }
+}
+
+void active_vm_slab_sub(struct kmem_cache *s, struct slab *slab, void **p, int cnt)
+{
+ struct page *page = slab_page(slab);
+ struct page_ext *page_ext = page_ext_get(page);
+ int *vec;
+ int off;
+ int i;
+
+ if (!page_ext)
+ return;
+
+ for (i = 0; i < cnt; i++) {
+ vec = active_vm_from_slab(page_ext);
+ if (vec) {
+ off = obj_to_index(s, slab, p[i]);
+ if (vec[off] > 0) {
+ active_vm_item_sub(vec[off], obj_full_size(s));
+ vec[off] = 0;
+ }
+ }
+ }
+ page_ext_put(page_ext);
+}
@@ -4,8 +4,12 @@
#ifdef CONFIG_ACTIVE_VM
#include <linux/active_vm.h>
+#include <linux/page_ext.h>
extern struct page_ext_operations active_vm_ops;
+void active_vm_slab_add(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
+void active_vm_slab_sub(struct kmem_cache *s, struct slab *slab, void **p, int cnt);
+void active_vm_slab_free(struct slab *slab);
static inline int active_vm_item(void)
{
@@ -42,5 +46,17 @@ static inline void active_vm_item_add(int item, long delta)
static inline void active_vm_item_sub(int item, long delta)
{
}
+
+static inline void active_vm_slab_add(struct kmem_cache *s, gfp_t flags, size_t size, void **p)
+{
+}
+
+static inline void active_vm_slab_sub(struct kmem_cache *s, struct slab *slab, void **p, int cnt)
+{
+}
+
+static inline void active_vm_slab_free(struct slab *slab)
+{
+}
#endif /* CONFIG_ACTIVE_VM */
#endif /* __MM_ACTIVE_VM_H */
@@ -232,6 +232,8 @@ struct kmem_cache {
#include <linux/random.h>
#include <linux/sched/mm.h>
#include <linux/list_lru.h>
+#include <linux/active_vm.h>
+#include "active_vm.h"
/*
* State of the slab allocator.
@@ -644,6 +646,9 @@ static __always_inline void unaccount_slab(struct slab *slab, int order,
if (memcg_kmem_enabled())
memcg_free_slab_cgroups(slab);
+ if (active_vm_enabled())
+ active_vm_slab_free(slab);
+
mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
-(PAGE_SIZE << order));
}
@@ -742,6 +747,8 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
kmsan_slab_alloc(s, p[i], flags);
}
+ if (active_vm_enabled() && (flags & __GFP_ACCOUNT) && active_vm_item() > 0)
+ active_vm_slab_add(s, flags, size, p);
memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
}
@@ -45,6 +45,7 @@
#include <trace/events/kmem.h>
#include "internal.h"
+#include "active_vm.h"
/*
* Lock order:
@@ -3654,6 +3655,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct slab *slab,
unsigned long addr)
{
memcg_slab_free_hook(s, slab, p, cnt);
+ active_vm_slab_sub(s, slab, p, cnt);
/*
* With KASAN enabled slab_free_freelist_hook modifies the freelist
* to remove objects, whose reuse must be delayed.
When a slab object is allocated, we will mark this object in this slab and check it at slab object freeing. That said we need extra memory to store the information of each object in a slab. The information of each object in a slab can be stored in the new introduced page extension active_vm, so a new member is added into struct active_vm. Signed-off-by: Yafang Shao <laoar.shao@gmail.com> --- mm/active_vm.c | 111 +++++++++++++++++++++++++++++++++++++++++++++++++ mm/active_vm.h | 16 +++++++ mm/slab.h | 7 ++++ mm/slub.c | 2 + 4 files changed, 136 insertions(+)