diff mbox series

[v1,02/11] fs/proc/vmcore: replace vmcoredd_mutex by vmcore_mutex

Message ID 20241025151134.1275575-3-david@redhat.com (mailing list archive)
State New
Headers show
Series fs/proc/vmcore: kdump support for virtio-mem on s390 | expand

Commit Message

David Hildenbrand Oct. 25, 2024, 3:11 p.m. UTC
Let's use our new mutex instead.

Signed-off-by: David Hildenbrand <david@redhat.com>
---
 fs/proc/vmcore.c | 17 ++++++++---------
 1 file changed, 8 insertions(+), 9 deletions(-)

Comments

Baoquan He Nov. 15, 2024, 9:32 a.m. UTC | #1
On 10/25/24 at 05:11pm, David Hildenbrand wrote:
> Let's use our new mutex instead.

Is there reason vmcoredd_mutex need be replaced and integrated with the
vmcore_mutex? Is it the reason the concurrent opening of vmcore could
happen with the old vmcoredd_mutex?

> 
> Signed-off-by: David Hildenbrand <david@redhat.com>
> ---
>  fs/proc/vmcore.c | 17 ++++++++---------
>  1 file changed, 8 insertions(+), 9 deletions(-)
> 
> diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
> index 110ce193d20f..b91c304463c9 100644
> --- a/fs/proc/vmcore.c
> +++ b/fs/proc/vmcore.c
> @@ -53,7 +53,6 @@ static struct proc_dir_entry *proc_vmcore;
>  #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
>  /* Device Dump list and mutex to synchronize access to list */
>  static LIST_HEAD(vmcoredd_list);
> -static DEFINE_MUTEX(vmcoredd_mutex);
>  
>  static bool vmcoredd_disabled;
>  core_param(novmcoredd, vmcoredd_disabled, bool, 0);
> @@ -248,7 +247,7 @@ static int vmcoredd_copy_dumps(struct iov_iter *iter, u64 start, size_t size)
>  	size_t tsz;
>  	char *buf;
>  
> -	mutex_lock(&vmcoredd_mutex);
> +	mutex_lock(&vmcore_mutex);
>  	list_for_each_entry(dump, &vmcoredd_list, list) {
>  		if (start < offset + dump->size) {
>  			tsz = min(offset + (u64)dump->size - start, (u64)size);
> @@ -269,7 +268,7 @@ static int vmcoredd_copy_dumps(struct iov_iter *iter, u64 start, size_t size)
>  	}
>  
>  out_unlock:
> -	mutex_unlock(&vmcoredd_mutex);
> +	mutex_unlock(&vmcore_mutex);
>  	return ret;
>  }
>  
> @@ -283,7 +282,7 @@ static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
>  	size_t tsz;
>  	char *buf;
>  
> -	mutex_lock(&vmcoredd_mutex);
> +	mutex_lock(&vmcore_mutex);
>  	list_for_each_entry(dump, &vmcoredd_list, list) {
>  		if (start < offset + dump->size) {
>  			tsz = min(offset + (u64)dump->size - start, (u64)size);
> @@ -306,7 +305,7 @@ static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
>  	}
>  
>  out_unlock:
> -	mutex_unlock(&vmcoredd_mutex);
> +	mutex_unlock(&vmcore_mutex);
>  	return ret;
>  }
>  #endif /* CONFIG_MMU */
> @@ -1517,9 +1516,9 @@ int vmcore_add_device_dump(struct vmcoredd_data *data)
>  	dump->size = data_size;
>  
>  	/* Add the dump to driver sysfs list */
> -	mutex_lock(&vmcoredd_mutex);
> +	mutex_lock(&vmcore_mutex);
>  	list_add_tail(&dump->list, &vmcoredd_list);
> -	mutex_unlock(&vmcoredd_mutex);
> +	mutex_unlock(&vmcore_mutex);
>  
>  	vmcoredd_update_size(data_size);
>  	return 0;
> @@ -1537,7 +1536,7 @@ EXPORT_SYMBOL(vmcore_add_device_dump);
>  static void vmcore_free_device_dumps(void)
>  {
>  #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
> -	mutex_lock(&vmcoredd_mutex);
> +	mutex_lock(&vmcore_mutex);
>  	while (!list_empty(&vmcoredd_list)) {
>  		struct vmcoredd_node *dump;
>  
> @@ -1547,7 +1546,7 @@ static void vmcore_free_device_dumps(void)
>  		vfree(dump->buf);
>  		vfree(dump);
>  	}
> -	mutex_unlock(&vmcoredd_mutex);
> +	mutex_unlock(&vmcore_mutex);
>  #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
>  }
>  
> -- 
> 2.46.1
>
David Hildenbrand Nov. 15, 2024, 10:04 a.m. UTC | #2
On 15.11.24 10:32, Baoquan He wrote:
> On 10/25/24 at 05:11pm, David Hildenbrand wrote:
>> Let's use our new mutex instead.
> 
> Is there reason vmcoredd_mutex need be replaced and integrated with the
> vmcore_mutex? Is it the reason the concurrent opening of vmcore could
> happen with the old vmcoredd_mutex?

Yes, see the next patch in this series. But I consider this valuable on 
its own: there is no need to have two mutexes.

I can make that clearer in the patch description.
Baoquan He Nov. 20, 2024, 8:14 a.m. UTC | #3
On 11/15/24 at 11:04am, David Hildenbrand wrote:
> On 15.11.24 10:32, Baoquan He wrote:
> > On 10/25/24 at 05:11pm, David Hildenbrand wrote:
> > > Let's use our new mutex instead.
> > 
> > Is there reason vmcoredd_mutex need be replaced and integrated with the
> > vmcore_mutex? Is it the reason the concurrent opening of vmcore could
> > happen with the old vmcoredd_mutex?
> 
> Yes, see the next patch in this series. But I consider this valuable on its
> own: there is no need to have two mutexes.
> 
> I can make that clearer in the patch description.

That would be great and more helpful. Because I didn't find the reason
about the lock integration and avoid concurrent opening of vmcore in
cover-letter and logs of the first few patches, I thought there have
been potential problems and the first few patches are used to fix them.
diff mbox series

Patch

diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 110ce193d20f..b91c304463c9 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -53,7 +53,6 @@  static struct proc_dir_entry *proc_vmcore;
 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
 /* Device Dump list and mutex to synchronize access to list */
 static LIST_HEAD(vmcoredd_list);
-static DEFINE_MUTEX(vmcoredd_mutex);
 
 static bool vmcoredd_disabled;
 core_param(novmcoredd, vmcoredd_disabled, bool, 0);
@@ -248,7 +247,7 @@  static int vmcoredd_copy_dumps(struct iov_iter *iter, u64 start, size_t size)
 	size_t tsz;
 	char *buf;
 
-	mutex_lock(&vmcoredd_mutex);
+	mutex_lock(&vmcore_mutex);
 	list_for_each_entry(dump, &vmcoredd_list, list) {
 		if (start < offset + dump->size) {
 			tsz = min(offset + (u64)dump->size - start, (u64)size);
@@ -269,7 +268,7 @@  static int vmcoredd_copy_dumps(struct iov_iter *iter, u64 start, size_t size)
 	}
 
 out_unlock:
-	mutex_unlock(&vmcoredd_mutex);
+	mutex_unlock(&vmcore_mutex);
 	return ret;
 }
 
@@ -283,7 +282,7 @@  static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
 	size_t tsz;
 	char *buf;
 
-	mutex_lock(&vmcoredd_mutex);
+	mutex_lock(&vmcore_mutex);
 	list_for_each_entry(dump, &vmcoredd_list, list) {
 		if (start < offset + dump->size) {
 			tsz = min(offset + (u64)dump->size - start, (u64)size);
@@ -306,7 +305,7 @@  static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
 	}
 
 out_unlock:
-	mutex_unlock(&vmcoredd_mutex);
+	mutex_unlock(&vmcore_mutex);
 	return ret;
 }
 #endif /* CONFIG_MMU */
@@ -1517,9 +1516,9 @@  int vmcore_add_device_dump(struct vmcoredd_data *data)
 	dump->size = data_size;
 
 	/* Add the dump to driver sysfs list */
-	mutex_lock(&vmcoredd_mutex);
+	mutex_lock(&vmcore_mutex);
 	list_add_tail(&dump->list, &vmcoredd_list);
-	mutex_unlock(&vmcoredd_mutex);
+	mutex_unlock(&vmcore_mutex);
 
 	vmcoredd_update_size(data_size);
 	return 0;
@@ -1537,7 +1536,7 @@  EXPORT_SYMBOL(vmcore_add_device_dump);
 static void vmcore_free_device_dumps(void)
 {
 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
-	mutex_lock(&vmcoredd_mutex);
+	mutex_lock(&vmcore_mutex);
 	while (!list_empty(&vmcoredd_list)) {
 		struct vmcoredd_node *dump;
 
@@ -1547,7 +1546,7 @@  static void vmcore_free_device_dumps(void)
 		vfree(dump->buf);
 		vfree(dump);
 	}
-	mutex_unlock(&vmcoredd_mutex);
+	mutex_unlock(&vmcore_mutex);
 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
 }