@@ -248,6 +248,7 @@
__field(unsigned long long, delta)
__field(unsigned long, total_scan)
__field(int, priority)
+ __field(unsigned int, cgroup_ino)
),
TP_fast_assign(
@@ -260,9 +261,10 @@
__entry->delta = delta;
__entry->total_scan = total_scan;
__entry->priority = priority;
+ __entry->cgroup_ino = cgroup_ino(sc->memcg->css.cgroup);
),
- TP_printk("%pS %p: nid: %d objects to shrink %ld gfp_flags %s cache items %ld delta %lld total_scan %ld priority %d",
+ TP_printk("%pS %p: nid: %d objects to shrink %ld gfp_flags %s cache items %ld delta %lld total_scan %ld priority %d cgroup_ino %u",
__entry->shrink,
__entry->shr,
__entry->nid,
@@ -271,14 +273,16 @@
__entry->cache_items,
__entry->delta,
__entry->total_scan,
- __entry->priority)
+ __entry->priority,
+ __entry->cgroup_ino)
);
TRACE_EVENT(mm_shrink_slab_end,
- TP_PROTO(struct shrinker *shr, int nid, int shrinker_retval,
- long unused_scan_cnt, long new_scan_cnt, long total_scan),
+ TP_PROTO(struct shrinker *shr, struct shrink_control *sc,
+ int shrinker_retval, long unused_scan_cnt,
+ long new_scan_cnt, long total_scan),
- TP_ARGS(shr, nid, shrinker_retval, unused_scan_cnt, new_scan_cnt,
+ TP_ARGS(shr, sc, shrinker_retval, unused_scan_cnt, new_scan_cnt,
total_scan),
TP_STRUCT__entry(
@@ -289,26 +293,29 @@
__field(long, new_scan)
__field(int, retval)
__field(long, total_scan)
+ __field(unsigned int, cgroup_ino)
),
TP_fast_assign(
__entry->shr = shr;
- __entry->nid = nid;
+ __entry->nid = sc->nid;
__entry->shrink = shr->scan_objects;
__entry->unused_scan = unused_scan_cnt;
__entry->new_scan = new_scan_cnt;
__entry->retval = shrinker_retval;
__entry->total_scan = total_scan;
+ __entry->cgroup_ino = cgroup_ino(sc->memcg->css.cgroup);
),
- TP_printk("%pS %p: nid: %d unused scan count %ld new scan count %ld total_scan %ld last shrinker return val %d",
+ TP_printk("%pS %p: nid: %d unused scan count %ld new scan count %ld total_scan %ld last shrinker return val %d cgroup_ino %u",
__entry->shrink,
__entry->shr,
__entry->nid,
__entry->unused_scan,
__entry->new_scan,
__entry->total_scan,
- __entry->retval)
+ __entry->retval,
+ __entry->cgroup_ino)
);
TRACE_EVENT(mm_vmscan_lru_isolate,
@@ -578,7 +578,8 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
else
new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
- trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
+ trace_mm_shrink_slab_end(shrinker, shrinkctl, freed, nr, new_nr,
+ total_scan);
return freed;
}
There may be many containers deployed on one host. But we only want to trace the slab caches in a speficed container sometimes. The exposed cgroup_ino in mm_shrink_slab_{start, end} tracepoints can help us. It can be used as bellow, step 1, get the inode of the specified cgroup $ ls -di /tmp/cgroupv2/foo step 2, set this inode into tracepoint filter to trace this cgroup only (assume the inode is 11) $ cd /sys/kernel/debug/tracing/events/vmscan/ $ echo 'cgroup_ino == 11' > mm_shrink_slab_start/filter $ echo 'cgroup_ino == 11' > mm_shrink_slab_end/filter Signed-off-by: Yafang Shao <laoar.shao@gmail.com> --- include/trace/events/vmscan.h | 23 +++++++++++++++-------- mm/vmscan.c | 3 ++- 2 files changed, 17 insertions(+), 9 deletions(-)