diff mbox series

[04/11] cgroup: introduce cgroup_rstat_ops

Message ID 20250218031448.46951-5-inwardvessel@gmail.com (mailing list archive)
State New
Headers show
Series cgroup: separate rstat trees | expand

Commit Message

JP Kobryn Feb. 18, 2025, 3:14 a.m. UTC
The cgroup_rstat_ops interface provides a way for type-specific
operations to be hidden from the common rstat operations. Use it to
decouple the cgroup_subsys_type from within the internal rstat
updated/flush routines. The new ops interface allows for greater
extensibility in terms of future changes. i.e. public updated/flush
api's can be created that accept a arbitrary types, as long as that type
has an associated ops interface.

Signed-off-by: JP Kobryn <inwardvessel@gmail.com>
---
 kernel/cgroup/rstat.c | 131 +++++++++++++++++++++++++++---------------
 1 file changed, 85 insertions(+), 46 deletions(-)

Comments

kernel test robot Feb. 19, 2025, 7:21 a.m. UTC | #1
Hi JP,

kernel test robot noticed the following build warnings:

[auto build test WARNING on tj-cgroup/for-next]
[also build test WARNING on bpf-next/master bpf/master linus/master v6.14-rc3 next-20250219]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/JP-Kobryn/cgroup-move-rstat-pointers-into-struct-of-their-own/20250218-111725
base:   https://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git for-next
patch link:    https://lore.kernel.org/r/20250218031448.46951-5-inwardvessel%40gmail.com
patch subject: [PATCH 04/11] cgroup: introduce cgroup_rstat_ops
config: arc-randconfig-002-20250219 (https://download.01.org/0day-ci/archive/20250219/202502191558.xCTZRkPs-lkp@intel.com/config)
compiler: arc-elf-gcc (GCC) 13.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250219/202502191558.xCTZRkPs-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202502191558.xCTZRkPs-lkp@intel.com/

All warnings (new ones prefixed by >>):

>> kernel/cgroup/rstat.c:210: warning: Function parameter or struct member 'ops' not described in 'cgroup_rstat_push_children'
>> kernel/cgroup/rstat.c:269: warning: Function parameter or struct member 'ops' not described in 'cgroup_rstat_updated_list'
>> kernel/cgroup/rstat.c:465: warning: Function parameter or struct member 'ops' not described in '__cgroup_rstat_flush_release'
   kernel/cgroup/rstat.c:465: warning: expecting prototype for cgroup_rstat_flush_release(). Prototype was for __cgroup_rstat_flush_release() instead


vim +210 kernel/cgroup/rstat.c

3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  194  
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  195  /**
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  196   * cgroup_rstat_push_children - push children cgroups into the given list
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  197   * @head: current head of the list (= subtree root)
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  198   * @child: first child of the root
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  199   * @cpu: target cpu
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  200   * Return: A new singly linked list of cgroups to be flush
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  201   *
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  202   * Iteratively traverse down the cgroup_rstat_cpu updated tree level by
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  203   * level and push all the parents first before their next level children
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  204   * into a singly linked list built from the tail backward like "pushing"
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  205   * cgroups into a stack. The root is pushed by the caller.
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  206   */
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  207  static struct cgroup_rstat *cgroup_rstat_push_children(
d67ed623c585f2 kernel/cgroup/rstat.c JP Kobryn       2025-02-17  208  	struct cgroup_rstat *head, struct cgroup_rstat *child, int cpu,
d67ed623c585f2 kernel/cgroup/rstat.c JP Kobryn       2025-02-17  209  	struct cgroup_rstat_ops *ops)
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30 @210  {
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  211  	struct cgroup_rstat *chead = child;	/* Head of child cgroup level */
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  212  	struct cgroup_rstat *ghead = NULL;	/* Head of grandchild cgroup level */
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  213  	struct cgroup_rstat *parent, *grandchild;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  214  	struct cgroup_rstat_cpu *crstatc;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  215  
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  216  	child->rstat_flush_next = NULL;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  217  
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  218  next_level:
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  219  	while (chead) {
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  220  		child = chead;
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  221  		chead = child->rstat_flush_next;
d67ed623c585f2 kernel/cgroup/rstat.c JP Kobryn       2025-02-17  222  		parent = ops->parent_fn(child);
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  223  
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  224  		/* updated_next is parent cgroup terminated */
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  225  		while (child != parent) {
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  226  			child->rstat_flush_next = head;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  227  			head = child;
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  228  			crstatc = rstat_cpu(child, cpu);
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  229  			grandchild = crstatc->updated_children;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  230  			if (grandchild != child) {
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  231  				/* Push the grand child to the next level */
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  232  				crstatc->updated_children = child;
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  233  				grandchild->rstat_flush_next = ghead;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  234  				ghead = grandchild;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  235  			}
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  236  			child = crstatc->updated_next;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  237  			crstatc->updated_next = NULL;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  238  		}
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  239  	}
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  240  
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  241  	if (ghead) {
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  242  		chead = ghead;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  243  		ghead = NULL;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  244  		goto next_level;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  245  	}
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  246  	return head;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  247  }
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  248  
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  249  /**
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  250   * cgroup_rstat_updated_list - return a list of updated cgroups to be flushed
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  251   * @root: root of the cgroup subtree to traverse
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  252   * @cpu: target cpu
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  253   * Return: A singly linked list of cgroups to be flushed
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  254   *
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  255   * Walks the updated rstat_cpu tree on @cpu from @root.  During traversal,
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  256   * each returned cgroup is unlinked from the updated tree.
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  257   *
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  258   * The only ordering guarantee is that, for a parent and a child pair
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  259   * covered by a given traversal, the child is before its parent in
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  260   * the list.
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  261   *
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  262   * Note that updated_children is self terminated and points to a list of
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  263   * child cgroups if not empty. Whereas updated_next is like a sibling link
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  264   * within the children list and terminated by the parent cgroup. An exception
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  265   * here is the cgroup root whose updated_next can be self terminated.
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  266   */
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  267  static struct cgroup_rstat *cgroup_rstat_updated_list(
d67ed623c585f2 kernel/cgroup/rstat.c JP Kobryn       2025-02-17  268  		struct cgroup_rstat *root, int cpu, struct cgroup_rstat_ops *ops)
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25 @269  {
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  270  	raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  271  	struct cgroup_rstat_cpu *rstatc = rstat_cpu(root, cpu);
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  272  	struct cgroup_rstat *head = NULL, *parent, *child;
d67ed623c585f2 kernel/cgroup/rstat.c JP Kobryn       2025-02-17  273  	struct cgroup *cgrp;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  274  	unsigned long flags;
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  275  
d67ed623c585f2 kernel/cgroup/rstat.c JP Kobryn       2025-02-17  276  	cgrp = ops->cgroup_fn(root);
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  277  	flags = _cgroup_rstat_cpu_lock(cpu_lock, cpu, cgrp, false);
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  278  
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  279  	/* Return NULL if this subtree is not on-list */
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  280  	if (!rstatc->updated_next)
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  281  		goto unlock_ret;
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  282  
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  283  	/*
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  284  	 * Unlink @root from its parent. As the updated_children list is
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  285  	 * singly linked, we have to walk it to find the removal point.
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  286  	 */
d67ed623c585f2 kernel/cgroup/rstat.c JP Kobryn       2025-02-17  287  	parent = ops->parent_fn(root);
dc26532aed0ab2 kernel/cgroup/rstat.c Johannes Weiner 2021-04-29  288  	if (parent) {
dc26532aed0ab2 kernel/cgroup/rstat.c Johannes Weiner 2021-04-29  289  		struct cgroup_rstat_cpu *prstatc;
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  290  		struct cgroup_rstat **nextp;
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  291  
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  292  		prstatc = rstat_cpu(parent, cpu);
c58632b3631cb2 kernel/cgroup/rstat.c Tejun Heo       2018-04-26  293  		nextp = &prstatc->updated_children;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  294  		while (*nextp != root) {
dc26532aed0ab2 kernel/cgroup/rstat.c Johannes Weiner 2021-04-29  295  			struct cgroup_rstat_cpu *nrstatc;
dc26532aed0ab2 kernel/cgroup/rstat.c Johannes Weiner 2021-04-29  296  
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  297  			nrstatc = rstat_cpu(*nextp, cpu);
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  298  			WARN_ON_ONCE(*nextp == parent);
c58632b3631cb2 kernel/cgroup/rstat.c Tejun Heo       2018-04-26  299  			nextp = &nrstatc->updated_next;
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  300  		}
c58632b3631cb2 kernel/cgroup/rstat.c Tejun Heo       2018-04-26  301  		*nextp = rstatc->updated_next;
dc26532aed0ab2 kernel/cgroup/rstat.c Johannes Weiner 2021-04-29  302  	}
9a9e97b2f1f27e kernel/cgroup/rstat.c Tejun Heo       2018-04-26  303  
dc26532aed0ab2 kernel/cgroup/rstat.c Johannes Weiner 2021-04-29  304  	rstatc->updated_next = NULL;
e76d28bdf9ba53 kernel/cgroup/rstat.c Waiman Long     2023-11-03  305  
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  306  	/* Push @root to the list first before pushing the children */
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  307  	head = root;
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  308  	root->rstat_flush_next = NULL;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  309  	child = rstatc->updated_children;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  310  	rstatc->updated_children = root;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  311  	if (child != root)
d67ed623c585f2 kernel/cgroup/rstat.c JP Kobryn       2025-02-17  312  		head = cgroup_rstat_push_children(head, child, cpu, ops);
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  313  unlock_ret:
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  314  	_cgroup_rstat_cpu_unlock(cpu_lock, cpu, cgrp, flags, false);
e76d28bdf9ba53 kernel/cgroup/rstat.c Waiman Long     2023-11-03  315  	return head;
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  316  }
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  317
Shakeel Butt Feb. 20, 2025, 5:50 p.m. UTC | #2
On Mon, Feb 17, 2025 at 07:14:41PM -0800, JP Kobryn wrote:
> The cgroup_rstat_ops interface provides a way for type-specific
> operations to be hidden from the common rstat operations. Use it to
> decouple the cgroup_subsys_type from within the internal rstat
> updated/flush routines. The new ops interface allows for greater
> extensibility in terms of future changes. i.e. public updated/flush

Here you might need to be explicit what future changes. Will all
controllers using rstat require this ops interface or some of them?
diff mbox series

Patch

diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index a32bcd7942a5..a8bb304e49c4 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -9,6 +9,12 @@ 
 
 #include <trace/events/cgroup.h>
 
+struct cgroup_rstat_ops {
+	struct cgroup_rstat *(*parent_fn)(struct cgroup_rstat *);
+	struct cgroup *(*cgroup_fn)(struct cgroup_rstat *);
+	void (*flush_fn)(struct cgroup_rstat *, int);
+};
+
 static DEFINE_SPINLOCK(cgroup_rstat_lock);
 static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock);
 
@@ -19,7 +25,17 @@  static struct cgroup_rstat_cpu *rstat_cpu(struct cgroup_rstat *rstat, int cpu)
 	return per_cpu_ptr(rstat->rstat_cpu, cpu);
 }
 
-static struct cgroup_rstat *rstat_parent(struct cgroup_rstat *rstat)
+static inline bool is_base_css(struct cgroup_subsys_state *css)
+{
+	/* css for base stats has no subsystem */
+	if (!css->ss)
+		return true;
+
+	return false;
+}
+
+static struct cgroup_rstat *rstat_parent_via_css(
+		struct cgroup_rstat *rstat)
 {
 	struct cgroup_subsys_state *css = container_of(
 			rstat, typeof(*css), rstat);
@@ -30,6 +46,33 @@  static struct cgroup_rstat *rstat_parent(struct cgroup_rstat *rstat)
 	return &(css->parent->rstat);
 }
 
+static struct cgroup *rstat_cgroup_via_css(struct cgroup_rstat *rstat)
+{
+	struct cgroup_subsys_state *css =
+		container_of(rstat, struct cgroup_subsys_state, rstat);
+
+	return css->cgroup;
+}
+
+static void rstat_flush_via_css(struct cgroup_rstat *rstat, int cpu)
+{
+	struct cgroup_subsys_state *css = container_of(
+			rstat, typeof(*css), rstat);
+
+	if (is_base_css(css)) {
+		cgroup_base_stat_flush(css->cgroup, cpu);
+		return;
+	}
+
+	css->ss->css_rstat_flush(css, cpu);
+}
+
+static struct cgroup_rstat_ops rstat_css_ops = {
+	.parent_fn = rstat_parent_via_css,
+	.cgroup_fn = rstat_cgroup_via_css,
+	.flush_fn = rstat_flush_via_css,
+};
+
 /*
  * Helper functions for rstat per CPU lock (cgroup_rstat_cpu_lock).
  *
@@ -84,11 +127,11 @@  void _cgroup_rstat_cpu_unlock(raw_spinlock_t *cpu_lock, int cpu,
 	raw_spin_unlock_irqrestore(cpu_lock, flags);
 }
 
-static void __cgroup_rstat_updated(struct cgroup_rstat *rstat, int cpu)
+static void __cgroup_rstat_updated(struct cgroup_rstat *rstat, int cpu,
+		struct cgroup_rstat_ops *ops)
 {
-	struct cgroup_subsys_state *css = container_of(
-			rstat, typeof(*css), rstat);
-	struct cgroup *cgrp = css->cgroup;
+	struct cgroup *cgrp;
+
 	raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
 	unsigned long flags;
 
@@ -103,12 +146,13 @@  static void __cgroup_rstat_updated(struct cgroup_rstat *rstat, int cpu)
 	if (data_race(rstat_cpu(rstat, cpu)->updated_next))
 		return;
 
+	cgrp = ops->cgroup_fn(rstat);
 	flags = _cgroup_rstat_cpu_lock(cpu_lock, cpu, cgrp, true);
 
 	/* put @rstat and all ancestors on the corresponding updated lists */
 	while (true) {
 		struct cgroup_rstat_cpu *rstatc = rstat_cpu(rstat, cpu);
-		struct cgroup_rstat *parent = rstat_parent(rstat);
+		struct cgroup_rstat *parent = ops->parent_fn(rstat);
 		struct cgroup_rstat_cpu *prstatc;
 
 		/*
@@ -145,7 +189,7 @@  static void __cgroup_rstat_updated(struct cgroup_rstat *rstat, int cpu)
  */
 __bpf_kfunc void cgroup_rstat_updated(struct cgroup_subsys_state *css, int cpu)
 {
-	__cgroup_rstat_updated(&css->rstat, cpu);
+	__cgroup_rstat_updated(&css->rstat, cpu, &rstat_css_ops);
 }
 
 /**
@@ -161,7 +205,8 @@  __bpf_kfunc void cgroup_rstat_updated(struct cgroup_subsys_state *css, int cpu)
  * cgroups into a stack. The root is pushed by the caller.
  */
 static struct cgroup_rstat *cgroup_rstat_push_children(
-	struct cgroup_rstat *head, struct cgroup_rstat *child, int cpu)
+	struct cgroup_rstat *head, struct cgroup_rstat *child, int cpu,
+	struct cgroup_rstat_ops *ops)
 {
 	struct cgroup_rstat *chead = child;	/* Head of child cgroup level */
 	struct cgroup_rstat *ghead = NULL;	/* Head of grandchild cgroup level */
@@ -174,7 +219,7 @@  static struct cgroup_rstat *cgroup_rstat_push_children(
 	while (chead) {
 		child = chead;
 		chead = child->rstat_flush_next;
-		parent = rstat_parent(child);
+		parent = ops->parent_fn(child);
 
 		/* updated_next is parent cgroup terminated */
 		while (child != parent) {
@@ -220,16 +265,15 @@  static struct cgroup_rstat *cgroup_rstat_push_children(
  * here is the cgroup root whose updated_next can be self terminated.
  */
 static struct cgroup_rstat *cgroup_rstat_updated_list(
-		struct cgroup_rstat *root, int cpu)
+		struct cgroup_rstat *root, int cpu, struct cgroup_rstat_ops *ops)
 {
-	struct cgroup_subsys_state *css = container_of(
-			root, typeof(*css), rstat);
-	struct cgroup *cgrp = css->cgroup;
 	raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
 	struct cgroup_rstat_cpu *rstatc = rstat_cpu(root, cpu);
 	struct cgroup_rstat *head = NULL, *parent, *child;
+	struct cgroup *cgrp;
 	unsigned long flags;
 
+	cgrp = ops->cgroup_fn(root);
 	flags = _cgroup_rstat_cpu_lock(cpu_lock, cpu, cgrp, false);
 
 	/* Return NULL if this subtree is not on-list */
@@ -240,7 +284,7 @@  static struct cgroup_rstat *cgroup_rstat_updated_list(
 	 * Unlink @root from its parent. As the updated_children list is
 	 * singly linked, we have to walk it to find the removal point.
 	 */
-	parent = rstat_parent(root);
+	parent = ops->parent_fn(root);
 	if (parent) {
 		struct cgroup_rstat_cpu *prstatc;
 		struct cgroup_rstat **nextp;
@@ -265,7 +309,7 @@  static struct cgroup_rstat *cgroup_rstat_updated_list(
 	child = rstatc->updated_children;
 	rstatc->updated_children = root;
 	if (child != root)
-		head = cgroup_rstat_push_children(head, child, cpu);
+		head = cgroup_rstat_push_children(head, child, cpu, ops);
 unlock_ret:
 	_cgroup_rstat_cpu_unlock(cpu_lock, cpu, cgrp, flags, false);
 	return head;
@@ -323,34 +367,30 @@  static inline void __cgroup_rstat_unlock(struct cgroup *cgrp, int cpu_in_loop)
 }
 
 /* see cgroup_rstat_flush() */
-static void cgroup_rstat_flush_locked(struct cgroup_rstat *rstat)
+static void cgroup_rstat_flush_locked(struct cgroup_rstat *rstat,
+		struct cgroup_rstat_ops *ops)
 	__releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
 {
-	struct cgroup_subsys_state *css = container_of(
-			rstat, typeof(*css), rstat);
-	struct cgroup *cgrp = css->cgroup;
 	int cpu;
 
 	lockdep_assert_held(&cgroup_rstat_lock);
 
 	for_each_possible_cpu(cpu) {
-		struct cgroup_rstat *pos = cgroup_rstat_updated_list(rstat, cpu);
+		struct cgroup_rstat *pos = cgroup_rstat_updated_list(
+				rstat, cpu, ops);
 
 		for (; pos; pos = pos->rstat_flush_next) {
-			struct cgroup_subsys_state *pos_css = container_of(
-					pos, typeof(*pos_css), rstat);
-			struct cgroup *pos_cgroup = pos_css->cgroup;
-
-			if (!pos_css->ss)
-				cgroup_base_stat_flush(pos_cgroup, cpu);
-			else
-				pos_css->ss->css_rstat_flush(pos_css, cpu);
+			struct cgroup *pos_cgroup = ops->cgroup_fn(pos);
 
+			ops->flush_fn(pos, cpu);
 			bpf_rstat_flush(pos_cgroup, cgroup_parent(pos_cgroup), cpu);
 		}
 
 		/* play nice and yield if necessary */
 		if (need_resched() || spin_needbreak(&cgroup_rstat_lock)) {
+			struct cgroup *cgrp;
+
+			cgrp = ops->cgroup_fn(rstat);
 			__cgroup_rstat_unlock(cgrp, cpu);
 			if (!cond_resched())
 				cpu_relax();
@@ -359,16 +399,15 @@  static void cgroup_rstat_flush_locked(struct cgroup_rstat *rstat)
 	}
 }
 
-static void __cgroup_rstat_flush(struct cgroup_rstat *rstat)
+static void __cgroup_rstat_flush(struct cgroup_rstat *rstat,
+		struct cgroup_rstat_ops *ops)
 {
-	struct cgroup_subsys_state *css = container_of(
-			rstat, typeof(*css), rstat);
-	struct cgroup *cgrp = css->cgroup;
+	struct cgroup *cgrp;
 
 	might_sleep();
-
+	cgrp = ops->cgroup_fn(rstat);
 	__cgroup_rstat_lock(cgrp, -1);
-	cgroup_rstat_flush_locked(rstat);
+	cgroup_rstat_flush_locked(rstat, ops);
 	__cgroup_rstat_unlock(cgrp, -1);
 }
 
@@ -387,19 +426,19 @@  static void __cgroup_rstat_flush(struct cgroup_rstat *rstat)
  */
 __bpf_kfunc void cgroup_rstat_flush(struct cgroup_subsys_state *css)
 {
-	__cgroup_rstat_flush(&css->rstat);
+	__cgroup_rstat_flush(&css->rstat, &rstat_css_ops);
 }
 
-static void __cgroup_rstat_flush_hold(struct cgroup_rstat *rstat)
+static void __cgroup_rstat_flush_hold(struct cgroup_rstat *rstat,
+		struct cgroup_rstat_ops *ops)
 	__acquires(&cgroup_rstat_lock)
 {
-	struct cgroup_subsys_state *css = container_of(
-			rstat, typeof(*css), rstat);
-	struct cgroup *cgrp = css->cgroup;
+	struct cgroup *cgrp;
 
 	might_sleep();
+	cgrp = ops->cgroup_fn(rstat);
 	__cgroup_rstat_lock(cgrp, -1);
-	cgroup_rstat_flush_locked(rstat);
+	cgroup_rstat_flush_locked(rstat, ops);
 }
 
 /**
@@ -413,20 +452,20 @@  static void __cgroup_rstat_flush_hold(struct cgroup_rstat *rstat)
  */
 void cgroup_rstat_flush_hold(struct cgroup_subsys_state *css)
 {
-	__cgroup_rstat_flush_hold(&css->rstat);
+	__cgroup_rstat_flush_hold(&css->rstat, &rstat_css_ops);
 }
 
 /**
  * cgroup_rstat_flush_release - release cgroup_rstat_flush_hold()
  * @rstat: rstat node used to find associated cgroup used by tracepoint
  */
-static void __cgroup_rstat_flush_release(struct cgroup_rstat *rstat)
+static void __cgroup_rstat_flush_release(struct cgroup_rstat *rstat,
+		struct cgroup_rstat_ops *ops)
 	__releases(&cgroup_rstat_lock)
 {
-	struct cgroup_subsys_state *css = container_of(
-			rstat, typeof(*css), rstat);
-	struct cgroup *cgrp = css->cgroup;
+	struct cgroup *cgrp;
 
+	cgrp = ops->cgroup_fn(rstat);
 	__cgroup_rstat_unlock(cgrp, -1);
 }
 
@@ -436,7 +475,7 @@  static void __cgroup_rstat_flush_release(struct cgroup_rstat *rstat)
  */
 void cgroup_rstat_flush_release(struct cgroup_subsys_state *css)
 {
-	__cgroup_rstat_flush_release(&css->rstat);
+	__cgroup_rstat_flush_release(&css->rstat, &rstat_css_ops);
 }
 
 static void __cgroup_rstat_init(struct cgroup_rstat *rstat)