diff mbox series

[08/11] cgroup: rstat cpu lock indirection

Message ID 20250218031448.46951-9-inwardvessel@gmail.com (mailing list archive)
State New
Headers show
Series cgroup: separate rstat trees | expand

Commit Message

JP Kobryn Feb. 18, 2025, 3:14 a.m. UTC
Where functions access the global per-cpu lock, change their signature
to accept the lock instead as a paremeter. Change the code within these
functions to only access the parameter. This indirection allows for
future code to accept different locks, increasing extensibity. For
example, a new lock could be added specifically for the bpf cgroups and
it would not contend with the existing lock.

Signed-off-by: JP Kobryn <inwardvessel@gmail.com>
---
 kernel/cgroup/rstat.c | 74 +++++++++++++++++++++++++------------------
 1 file changed, 43 insertions(+), 31 deletions(-)

Comments

kernel test robot Feb. 19, 2025, 8:48 a.m. UTC | #1
Hi JP,

kernel test robot noticed the following build warnings:

[auto build test WARNING on tj-cgroup/for-next]
[also build test WARNING on bpf-next/master bpf/master linus/master v6.14-rc3 next-20250219]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/JP-Kobryn/cgroup-move-rstat-pointers-into-struct-of-their-own/20250218-111725
base:   https://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git for-next
patch link:    https://lore.kernel.org/r/20250218031448.46951-9-inwardvessel%40gmail.com
patch subject: [PATCH 08/11] cgroup: rstat cpu lock indirection
config: arc-randconfig-002-20250219 (https://download.01.org/0day-ci/archive/20250219/202502191619.0t8nOsuQ-lkp@intel.com/config)
compiler: arc-elf-gcc (GCC) 13.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250219/202502191619.0t8nOsuQ-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202502191619.0t8nOsuQ-lkp@intel.com/

All warnings (new ones prefixed by >>):

   kernel/cgroup/rstat.c:266: warning: Function parameter or struct member 'ops' not described in 'cgroup_rstat_push_children'
   kernel/cgroup/rstat.c:326: warning: Function parameter or struct member 'ops' not described in 'cgroup_rstat_updated_list'
>> kernel/cgroup/rstat.c:326: warning: Function parameter or struct member 'cpu_lock' not described in 'cgroup_rstat_updated_list'
   kernel/cgroup/rstat.c:532: warning: Function parameter or struct member 'ops' not described in '__cgroup_rstat_flush_release'
>> kernel/cgroup/rstat.c:532: warning: Function parameter or struct member 'lock' not described in '__cgroup_rstat_flush_release'
   kernel/cgroup/rstat.c:532: warning: expecting prototype for cgroup_rstat_flush_release(). Prototype was for __cgroup_rstat_flush_release() instead


vim +326 kernel/cgroup/rstat.c

d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  304  
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  305  /**
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  306   * cgroup_rstat_updated_list - return a list of updated cgroups to be flushed
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  307   * @root: root of the cgroup subtree to traverse
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  308   * @cpu: target cpu
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  309   * Return: A singly linked list of cgroups to be flushed
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  310   *
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  311   * Walks the updated rstat_cpu tree on @cpu from @root.  During traversal,
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  312   * each returned cgroup is unlinked from the updated tree.
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  313   *
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  314   * The only ordering guarantee is that, for a parent and a child pair
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  315   * covered by a given traversal, the child is before its parent in
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  316   * the list.
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  317   *
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  318   * Note that updated_children is self terminated and points to a list of
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  319   * child cgroups if not empty. Whereas updated_next is like a sibling link
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  320   * within the children list and terminated by the parent cgroup. An exception
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  321   * here is the cgroup root whose updated_next can be self terminated.
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  322   */
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  323  static struct cgroup_rstat *cgroup_rstat_updated_list(
85c7ff288b9391 kernel/cgroup/rstat.c JP Kobryn       2025-02-17  324  		struct cgroup_rstat *root, int cpu, struct cgroup_rstat_ops *ops,
85c7ff288b9391 kernel/cgroup/rstat.c JP Kobryn       2025-02-17  325  		raw_spinlock_t *cpu_lock)
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25 @326  {
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  327  	struct cgroup_rstat_cpu *rstatc = rstat_cpu(root, cpu);
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  328  	struct cgroup_rstat *head = NULL, *parent, *child;
d67ed623c585f2 kernel/cgroup/rstat.c JP Kobryn       2025-02-17  329  	struct cgroup *cgrp;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  330  	unsigned long flags;
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  331  
d67ed623c585f2 kernel/cgroup/rstat.c JP Kobryn       2025-02-17  332  	cgrp = ops->cgroup_fn(root);
85c7ff288b9391 kernel/cgroup/rstat.c JP Kobryn       2025-02-17  333  	flags = _cgroup_rstat_cpu_lock(cpu_lock, cpu, cgrp, false);
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  334  
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  335  	/* Return NULL if this subtree is not on-list */
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  336  	if (!rstatc->updated_next)
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  337  		goto unlock_ret;
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  338  
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  339  	/*
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  340  	 * Unlink @root from its parent. As the updated_children list is
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  341  	 * singly linked, we have to walk it to find the removal point.
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  342  	 */
d67ed623c585f2 kernel/cgroup/rstat.c JP Kobryn       2025-02-17  343  	parent = ops->parent_fn(root);
dc26532aed0ab2 kernel/cgroup/rstat.c Johannes Weiner 2021-04-29  344  	if (parent) {
dc26532aed0ab2 kernel/cgroup/rstat.c Johannes Weiner 2021-04-29  345  		struct cgroup_rstat_cpu *prstatc;
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  346  		struct cgroup_rstat **nextp;
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  347  
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  348  		prstatc = rstat_cpu(parent, cpu);
c58632b3631cb2 kernel/cgroup/rstat.c Tejun Heo       2018-04-26  349  		nextp = &prstatc->updated_children;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  350  		while (*nextp != root) {
dc26532aed0ab2 kernel/cgroup/rstat.c Johannes Weiner 2021-04-29  351  			struct cgroup_rstat_cpu *nrstatc;
dc26532aed0ab2 kernel/cgroup/rstat.c Johannes Weiner 2021-04-29  352  
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  353  			nrstatc = rstat_cpu(*nextp, cpu);
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  354  			WARN_ON_ONCE(*nextp == parent);
c58632b3631cb2 kernel/cgroup/rstat.c Tejun Heo       2018-04-26  355  			nextp = &nrstatc->updated_next;
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  356  		}
c58632b3631cb2 kernel/cgroup/rstat.c Tejun Heo       2018-04-26  357  		*nextp = rstatc->updated_next;
dc26532aed0ab2 kernel/cgroup/rstat.c Johannes Weiner 2021-04-29  358  	}
9a9e97b2f1f27e kernel/cgroup/rstat.c Tejun Heo       2018-04-26  359  
dc26532aed0ab2 kernel/cgroup/rstat.c Johannes Weiner 2021-04-29  360  	rstatc->updated_next = NULL;
e76d28bdf9ba53 kernel/cgroup/rstat.c Waiman Long     2023-11-03  361  
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  362  	/* Push @root to the list first before pushing the children */
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  363  	head = root;
3d844899ba042a kernel/cgroup/rstat.c JP Kobryn       2025-02-17  364  	root->rstat_flush_next = NULL;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  365  	child = rstatc->updated_children;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  366  	rstatc->updated_children = root;
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  367  	if (child != root)
d67ed623c585f2 kernel/cgroup/rstat.c JP Kobryn       2025-02-17  368  		head = cgroup_rstat_push_children(head, child, cpu, ops);
d499fd418fa159 kernel/cgroup/rstat.c Waiman Long     2023-11-30  369  unlock_ret:
85c7ff288b9391 kernel/cgroup/rstat.c JP Kobryn       2025-02-17  370  	_cgroup_rstat_cpu_unlock(cpu_lock, cpu, cgrp, flags, false);
e76d28bdf9ba53 kernel/cgroup/rstat.c Waiman Long     2023-11-03  371  	return head;
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  372  }
041cd640b2f3c5 kernel/cgroup/stat.c  Tejun Heo       2017-09-25  373
diff mbox series

Patch

diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index 4cb0f3ffc1db..9f6da3ea3c8c 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -177,7 +177,7 @@  void _cgroup_rstat_cpu_unlock(raw_spinlock_t *lock, int cpu,
 }
 
 static void __cgroup_rstat_updated(struct cgroup_rstat *rstat, int cpu,
-		struct cgroup_rstat_ops *ops)
+		struct cgroup_rstat_ops *ops, raw_spinlock_t *cpu_lock)
 {
 	struct cgroup *cgrp;
 	unsigned long flags;
@@ -194,7 +194,7 @@  static void __cgroup_rstat_updated(struct cgroup_rstat *rstat, int cpu,
 		return;
 
 	cgrp = ops->cgroup_fn(rstat);
-	flags = _cgroup_rstat_cpu_lock(&cgroup_rstat_cpu_lock, cpu, cgrp, true);
+	flags = _cgroup_rstat_cpu_lock(cpu_lock, cpu, cgrp, true);
 
 	/* put @rstat and all ancestors on the corresponding updated lists */
 	while (true) {
@@ -222,7 +222,7 @@  static void __cgroup_rstat_updated(struct cgroup_rstat *rstat, int cpu,
 		rstat = parent;
 	}
 
-	_cgroup_rstat_cpu_unlock(&cgroup_rstat_cpu_lock, cpu, cgrp, flags, true);
+	_cgroup_rstat_cpu_unlock(cpu_lock, cpu, cgrp, flags, true);
 }
 
 /**
@@ -236,13 +236,15 @@  static void __cgroup_rstat_updated(struct cgroup_rstat *rstat, int cpu,
  */
 void cgroup_rstat_updated(struct cgroup_subsys_state *css, int cpu)
 {
-	__cgroup_rstat_updated(&css->rstat, cpu, &rstat_css_ops);
+	__cgroup_rstat_updated(&css->rstat, cpu, &rstat_css_ops,
+			&cgroup_rstat_cpu_lock);
 }
 
 #ifdef CONFIG_CGROUP_BPF
 __bpf_kfunc void bpf_cgroup_rstat_updated(struct cgroup *cgroup, int cpu)
 {
-	__cgroup_rstat_updated(&(cgroup->bpf.rstat), cpu, &rstat_bpf_ops);
+	__cgroup_rstat_updated(&(cgroup->bpf.rstat), cpu, &rstat_bpf_ops,
+			&cgroup_rstat_cpu_lock);
 }
 #endif /* CONFIG_CGROUP_BPF */
 
@@ -319,7 +321,8 @@  static struct cgroup_rstat *cgroup_rstat_push_children(
  * here is the cgroup root whose updated_next can be self terminated.
  */
 static struct cgroup_rstat *cgroup_rstat_updated_list(
-		struct cgroup_rstat *root, int cpu, struct cgroup_rstat_ops *ops)
+		struct cgroup_rstat *root, int cpu, struct cgroup_rstat_ops *ops,
+		raw_spinlock_t *cpu_lock)
 {
 	struct cgroup_rstat_cpu *rstatc = rstat_cpu(root, cpu);
 	struct cgroup_rstat *head = NULL, *parent, *child;
@@ -327,7 +330,7 @@  static struct cgroup_rstat *cgroup_rstat_updated_list(
 	unsigned long flags;
 
 	cgrp = ops->cgroup_fn(root);
-	flags = _cgroup_rstat_cpu_lock(&cgroup_rstat_cpu_lock, cpu, cgrp, false);
+	flags = _cgroup_rstat_cpu_lock(cpu_lock, cpu, cgrp, false);
 
 	/* Return NULL if this subtree is not on-list */
 	if (!rstatc->updated_next)
@@ -364,7 +367,7 @@  static struct cgroup_rstat *cgroup_rstat_updated_list(
 	if (child != root)
 		head = cgroup_rstat_push_children(head, child, cpu, ops);
 unlock_ret:
-	_cgroup_rstat_cpu_unlock(&cgroup_rstat_cpu_lock, cpu, cgrp, flags, false);
+	_cgroup_rstat_cpu_unlock(cpu_lock, cpu, cgrp, flags, false);
 	return head;
 }
 
@@ -422,43 +425,46 @@  static inline void __cgroup_rstat_unlock(spinlock_t *lock,
 
 /* see cgroup_rstat_flush() */
 static void cgroup_rstat_flush_locked(struct cgroup_rstat *rstat,
-		struct cgroup_rstat_ops *ops)
-	__releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
+		struct cgroup_rstat_ops *ops, spinlock_t *lock,
+		raw_spinlock_t *cpu_lock)
+	__releases(lock) __acquires(lock)
 {
 	int cpu;
 
-	lockdep_assert_held(&cgroup_rstat_lock);
+	lockdep_assert_held(lock);
 
 	for_each_possible_cpu(cpu) {
 		struct cgroup_rstat *pos = cgroup_rstat_updated_list(
-				rstat, cpu, ops);
+				rstat, cpu, ops, cpu_lock);
 
 		for (; pos; pos = pos->rstat_flush_next)
 			ops->flush_fn(pos, cpu);
 
 		/* play nice and yield if necessary */
-		if (need_resched() || spin_needbreak(&cgroup_rstat_lock)) {
+		if (need_resched() || spin_needbreak(lock)) {
 			struct cgroup *cgrp;
 
 			cgrp = ops->cgroup_fn(rstat);
-			__cgroup_rstat_unlock(&cgroup_rstat_lock, cgrp, cpu);
+			__cgroup_rstat_unlock(lock, cgrp, cpu);
 			if (!cond_resched())
 				cpu_relax();
-			__cgroup_rstat_lock(&cgroup_rstat_lock, cgrp, cpu);
+			__cgroup_rstat_lock(lock, cgrp, cpu);
 		}
 	}
 }
 
 static void __cgroup_rstat_flush(struct cgroup_rstat *rstat,
-		struct cgroup_rstat_ops *ops)
+		struct cgroup_rstat_ops *ops, spinlock_t *lock,
+		raw_spinlock_t *cpu_lock)
+	__acquires(lock) __releases(lock)
 {
 	struct cgroup *cgrp;
 
 	might_sleep();
 	cgrp = ops->cgroup_fn(rstat);
-	__cgroup_rstat_lock(&cgroup_rstat_lock, cgrp, -1);
-	cgroup_rstat_flush_locked(rstat, ops);
-	__cgroup_rstat_unlock(&cgroup_rstat_lock, cgrp, -1);
+	__cgroup_rstat_lock(lock, cgrp, -1);
+	cgroup_rstat_flush_locked(rstat, ops, lock, cpu_lock);
+	__cgroup_rstat_unlock(lock, cgrp, -1);
 }
 
 /**
@@ -476,26 +482,29 @@  static void __cgroup_rstat_flush(struct cgroup_rstat *rstat,
  */
 void cgroup_rstat_flush(struct cgroup_subsys_state *css)
 {
-	__cgroup_rstat_flush(&css->rstat, &rstat_css_ops);
+	__cgroup_rstat_flush(&css->rstat, &rstat_css_ops,
+			&cgroup_rstat_lock, &cgroup_rstat_cpu_lock);
 }
 
 #ifdef CONFIG_CGROUP_BPF
 __bpf_kfunc void bpf_cgroup_rstat_flush(struct cgroup *cgroup)
 {
-	__cgroup_rstat_flush(&(cgroup->bpf.rstat), &rstat_bpf_ops);
+	__cgroup_rstat_flush(&(cgroup->bpf.rstat), &rstat_bpf_ops,
+			&cgroup_rstat_lock, &cgroup_rstat_cpu_lock);
 }
 #endif /* CONFIG_CGROUP_BPF */
 
 static void __cgroup_rstat_flush_hold(struct cgroup_rstat *rstat,
-		struct cgroup_rstat_ops *ops)
-	__acquires(&cgroup_rstat_lock)
+		struct cgroup_rstat_ops *ops, spinlock_t *lock,
+		raw_spinlock_t *cpu_lock)
+	__acquires(lock)
 {
 	struct cgroup *cgrp;
 
 	might_sleep();
 	cgrp = ops->cgroup_fn(rstat);
-	__cgroup_rstat_lock(&cgroup_rstat_lock, cgrp, -1);
-	cgroup_rstat_flush_locked(rstat, ops);
+	__cgroup_rstat_lock(lock, cgrp, -1);
+	cgroup_rstat_flush_locked(rstat, ops, lock, cpu_lock);
 }
 
 /**
@@ -509,7 +518,8 @@  static void __cgroup_rstat_flush_hold(struct cgroup_rstat *rstat,
  */
 void cgroup_rstat_flush_hold(struct cgroup_subsys_state *css)
 {
-	__cgroup_rstat_flush_hold(&css->rstat, &rstat_css_ops);
+	__cgroup_rstat_flush_hold(&css->rstat, &rstat_css_ops,
+			&cgroup_rstat_lock, &cgroup_rstat_cpu_lock);
 }
 
 /**
@@ -517,13 +527,13 @@  void cgroup_rstat_flush_hold(struct cgroup_subsys_state *css)
  * @rstat: rstat node used to find associated cgroup used by tracepoint
  */
 static void __cgroup_rstat_flush_release(struct cgroup_rstat *rstat,
-		struct cgroup_rstat_ops *ops)
-	__releases(&cgroup_rstat_lock)
+		struct cgroup_rstat_ops *ops, spinlock_t *lock)
+	__releases(lock)
 {
 	struct cgroup *cgrp;
 
 	cgrp = ops->cgroup_fn(rstat);
-	__cgroup_rstat_unlock(&cgroup_rstat_lock, cgrp, -1);
+	__cgroup_rstat_unlock(lock, cgrp, -1);
 }
 
 /**
@@ -532,7 +542,8 @@  static void __cgroup_rstat_flush_release(struct cgroup_rstat *rstat,
  */
 void cgroup_rstat_flush_release(struct cgroup_subsys_state *css)
 {
-	__cgroup_rstat_flush_release(&css->rstat, &rstat_css_ops);
+	__cgroup_rstat_flush_release(&css->rstat, &rstat_css_ops,
+			&cgroup_rstat_lock);
 }
 
 static void __cgroup_rstat_init(struct cgroup_rstat *rstat)
@@ -605,7 +616,8 @@  int bpf_cgroup_rstat_init(struct cgroup_bpf *bpf)
 
 void bpf_cgroup_rstat_exit(struct cgroup_bpf *bpf)
 {
-	__cgroup_rstat_flush(&bpf->rstat, &rstat_bpf_ops);
+	__cgroup_rstat_flush(&bpf->rstat, &rstat_bpf_ops,
+			&cgroup_rstat_lock, &cgroup_rstat_cpu_lock);
 	__cgroup_rstat_exit(&bpf->rstat);
 }
 #endif /* CONFIG_CGROUP_BPF */