diff mbox series

[v2,4/4] mm/mempolicy: Fix duplicate node addition in sysfs for weighted interleave

Message ID 20250312075628.648-4-rakie.kim@sk.com (mailing list archive)
State New
Headers show
Series [v2,1/4] mm/mempolicy: Fix memory leaks in mempolicy_sysfs_init() | expand

Commit Message

Rakie Kim March 12, 2025, 7:56 a.m. UTC
Sysfs attributes for interleave control were registered both at initialization
and when new nodes were detected via hotplug, leading to potential duplicates.

This patch ensures that each node is registered only once, preventing conflicts
and redundant sysfs entries.

Signed-off-by: Rakie Kim <rakie.kim@sk.com>
---
 mm/mempolicy.c | 66 +++++++++++++++++++++++++++++++++++---------------
 1 file changed, 46 insertions(+), 20 deletions(-)

Comments

Joshua Hahn March 12, 2025, 3:04 p.m. UTC | #1
Hi Rakie, thank your new revision!

I think this new ordering of the series makes more sense, since the bug exists
even before your patch is applied! IMHO, it might also make sense to take
patch 1 out of this series, and send it separately (and make patches 2-4
their own series). 

I have a nit and a few thoughts about this patch and the way we order locking
and allocating:

>  static void sysfs_wi_release(struct kobject *wi_kobj)
> @@ -3464,35 +3470,54 @@ static const struct kobj_type wi_ktype = {
>  
>  static int sysfs_wi_node_add(int nid)
>  {
> -	struct iw_node_attr *node_attr;
> +	int ret = 0;
>  	char *name;
>  
> -	node_attr = kzalloc(sizeof(*node_attr), GFP_KERNEL);
> -	if (!node_attr)
> -		return -ENOMEM;
> +	if (nid < 0 || nid >= nr_node_ids) {
> +		pr_err("Invalid node id: %d\n", nid);
> +		ret = -EINVAL;
> +		goto out;
> +	}
> +
> +	mutex_lock(&ngrp->kobj_lock);
> +	if (!ngrp->nattrs[nid]) {
> +		ngrp->nattrs[nid] = kzalloc(sizeof(struct iw_node_attr), GFP_KERNEL);

I am unsure if kzallocing with the mutex_lock held is best practice. Even though
two threads won't reach this place simultaneously since *most* calls to this
function are sequential, we should try to keep the code safe since future
patches might overlook this, and later make non-sequential calls : -)

It also doesn't seem wise to directly assign the result of an allocation
without checking for its success (as I explain below).

IMHO it is best to allocate first, then acquire the lock and check for
existence, then assign with the lock still held. We can also reduce this code
section into a single if statement for clarity (but if you think it looks
cleaner with the if-else, please keep it!)

struct iw_node_attr *node_attr;

...

node_attr = kzalloc(sizeof(*node_attr), GFP_KERNEL);
if (!node_attr) {
	ret = -ENOMEM;
	goto out;
}

mutex_lock(&ngrp->kobj_lock);
if (ngrp->nattrs[nid]) {
	mutex_unlock(&ngrp->kobj_lock);
	kfree(node_attr);
	pr_info("Node [%d] already exists\n");
	goto out;
}
ngrp->attrs[nid] = node_attr;
mutex_unlock(&ngrp->kobj_lock):


> +	} else {
> +		mutex_unlock(&ngrp->kobj_lock);
> +		pr_info("Node [%d] is already existed\n", nid);

NIT: To keep consistency with other parts of the kernel, maybe this can be
rephrased to "Node [%d] already exists\n"

> +		goto out;
> +	}
> +	mutex_unlock(&ngrp->kobj_lock);
> +
> +	if (!ngrp->nattrs[nid]) {
> +		ret = -ENOMEM;
> +		goto out;
> +	}

If we make the changes above, we don't have to check for allocation success
*after* already having locked & unlocked and making the unnecessary assignment.

>  
>  	name = kasprintf(GFP_KERNEL, "node%d", nid);
>  	if (!name) {
> -		kfree(node_attr);
> -		return -ENOMEM;
> +		kfree(ngrp->nattrs[nid]);
> +		ret = -ENOMEM;
> +		goto out;
>  	}

For the same reasons above, I think it makes sense to make this allocation
together with the allocation of node_attr above, and free / return -ENOMEM
as early as possible if we can.

[...snip...]

Thank you again for this patch! Please let me know what you think : -)
Have a great day!
Joshua

Sent using hkml (https://github.com/sjp38/hackermail)
diff mbox series

Patch

diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 71aff1276d4d..5f20521036ec 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -3391,6 +3391,7 @@  struct iw_node_attr {
 
 struct iw_node_group {
 	struct kobject *wi_kobj;
+	struct mutex kobj_lock;
 	struct iw_node_attr **nattrs;
 };
 
@@ -3440,12 +3441,17 @@  static ssize_t node_store(struct kobject *kobj, struct kobj_attribute *attr,
 
 static void sysfs_wi_node_release(int nid)
 {
-	if (!ngrp->nattrs[nid])
+	mutex_lock(&ngrp->kobj_lock);
+	if (!ngrp->nattrs[nid]) {
+		mutex_unlock(&ngrp->kobj_lock);
 		return;
+	}
 
 	sysfs_remove_file(ngrp->wi_kobj, &ngrp->nattrs[nid]->kobj_attr.attr);
 	kfree(ngrp->nattrs[nid]->kobj_attr.attr.name);
 	kfree(ngrp->nattrs[nid]);
+	ngrp->nattrs[nid] = NULL;
+	mutex_unlock(&ngrp->kobj_lock);
 }
 
 static void sysfs_wi_release(struct kobject *wi_kobj)
@@ -3464,35 +3470,54 @@  static const struct kobj_type wi_ktype = {
 
 static int sysfs_wi_node_add(int nid)
 {
-	struct iw_node_attr *node_attr;
+	int ret = 0;
 	char *name;
 
-	node_attr = kzalloc(sizeof(*node_attr), GFP_KERNEL);
-	if (!node_attr)
-		return -ENOMEM;
+	if (nid < 0 || nid >= nr_node_ids) {
+		pr_err("Invalid node id: %d\n", nid);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	mutex_lock(&ngrp->kobj_lock);
+	if (!ngrp->nattrs[nid]) {
+		ngrp->nattrs[nid] = kzalloc(sizeof(struct iw_node_attr), GFP_KERNEL);
+	} else {
+		mutex_unlock(&ngrp->kobj_lock);
+		pr_info("Node [%d] is already existed\n", nid);
+		goto out;
+	}
+	mutex_unlock(&ngrp->kobj_lock);
+
+	if (!ngrp->nattrs[nid]) {
+		ret = -ENOMEM;
+		goto out;
+	}
 
 	name = kasprintf(GFP_KERNEL, "node%d", nid);
 	if (!name) {
-		kfree(node_attr);
-		return -ENOMEM;
+		kfree(ngrp->nattrs[nid]);
+		ret = -ENOMEM;
+		goto out;
 	}
 
-	sysfs_attr_init(&node_attr->kobj_attr.attr);
-	node_attr->kobj_attr.attr.name = name;
-	node_attr->kobj_attr.attr.mode = 0644;
-	node_attr->kobj_attr.show = node_show;
-	node_attr->kobj_attr.store = node_store;
-	node_attr->nid = nid;
+	sysfs_attr_init(&ngrp->nattrs[nid]->kobj_attr.attr);
+	ngrp->nattrs[nid]->kobj_attr.attr.name = name;
+	ngrp->nattrs[nid]->kobj_attr.attr.mode = 0644;
+	ngrp->nattrs[nid]->kobj_attr.show = node_show;
+	ngrp->nattrs[nid]->kobj_attr.store = node_store;
+	ngrp->nattrs[nid]->nid = nid;
 
-	if (sysfs_create_file(ngrp->wi_kobj, &node_attr->kobj_attr.attr)) {
-		kfree(node_attr->kobj_attr.attr.name);
-		kfree(node_attr);
-		pr_err("failed to add attribute to weighted_interleave\n");
-		return -ENOMEM;
+	ret = sysfs_create_file(ngrp->wi_kobj, &ngrp->nattrs[nid]->kobj_attr.attr);
+	if (ret) {
+		kfree(ngrp->nattrs[nid]->kobj_attr.attr.name);
+		kfree(ngrp->nattrs[nid]);
+		pr_err("failed to add attribute to weighted_interleave: %d\n", ret);
+		goto out;
 	}
 
-	ngrp->nattrs[nid] = node_attr;
-	return 0;
+out:
+	return ret;
 }
 
 static int wi_node_notifier(struct notifier_block *nb,
@@ -3588,6 +3613,7 @@  static int __init mempolicy_sysfs_init(void)
 		err = -ENOMEM;
 		goto err_out;
 	}
+	mutex_init(&ngrp->kobj_lock);
 
 	ngrp->nattrs = kcalloc(nr_node_ids, sizeof(struct iw_node_attr *),
 			       GFP_KERNEL);