Message ID | 20241025151134.1275575-5-david@redhat.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | fs/proc/vmcore: kdump support for virtio-mem on s390 | expand |
On 10/25/24 at 05:11pm, David Hildenbrand wrote: > These defines are not related to /proc/kcore, move them to crash_dump.h > instead. While at it, rename "struct vmcore" to "struct > vmcore_mem_node", which is a more fitting name. Agree it's inappropriate to put the defintions in kcore.h. However for 'struct vmcore', it's only used in fs/proc/vmcore.c from my code serching, do you think if we can put it in fs/proc/vmcore.c directly? And 'struct vmcoredd_node' too. And about the renaming, with my understanding each instance of struct vmcore represents one memory region, isn't it a little confusing to be called vmcore_mem_node? I understand you probablly want to unify the vmcore and vmcoredd's naming. I have to admit I don't know vmcoredd well and its naming, while most of people have been knowing vmcore representing memory region very well. > > Signed-off-by: David Hildenbrand <david@redhat.com> > --- > fs/proc/vmcore.c | 20 ++++++++++---------- > include/linux/crash_dump.h | 13 +++++++++++++ > include/linux/kcore.h | 13 ------------- > 3 files changed, 23 insertions(+), 23 deletions(-) > > diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c > index 6371dbaa21be..47652df95202 100644 > --- a/fs/proc/vmcore.c > +++ b/fs/proc/vmcore.c > @@ -304,10 +304,10 @@ static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst, > */ > static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos) > { > + struct vmcore_mem_node *m = NULL; > ssize_t acc = 0, tmp; > size_t tsz; > u64 start; > - struct vmcore *m = NULL; > > if (!iov_iter_count(iter) || *fpos >= vmcore_size) > return 0; > @@ -560,8 +560,8 @@ static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma, > static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) > { > size_t size = vma->vm_end - vma->vm_start; > + struct vmcore_mem_node *m; > u64 start, end, len, tsz; > - struct vmcore *m; > > start = (u64)vma->vm_pgoff << PAGE_SHIFT; > end = start + size; > @@ -683,16 +683,16 @@ static const struct proc_ops vmcore_proc_ops = { > .proc_mmap = mmap_vmcore, > }; > > -static struct vmcore* __init get_new_element(void) > +static struct vmcore_mem_node * __init get_new_element(void) > { > - return kzalloc(sizeof(struct vmcore), GFP_KERNEL); > + return kzalloc(sizeof(struct vmcore_mem_node), GFP_KERNEL); > } > > static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz, > struct list_head *vc_list) > { > + struct vmcore_mem_node *m; > u64 size; > - struct vmcore *m; > > size = elfsz + elfnotesegsz; > list_for_each_entry(m, vc_list, list) { > @@ -1090,11 +1090,11 @@ static int __init process_ptload_program_headers_elf64(char *elfptr, > size_t elfnotes_sz, > struct list_head *vc_list) > { > + struct vmcore_mem_node *new; > int i; > Elf64_Ehdr *ehdr_ptr; > Elf64_Phdr *phdr_ptr; > loff_t vmcore_off; > - struct vmcore *new; > > ehdr_ptr = (Elf64_Ehdr *)elfptr; > phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */ > @@ -1133,11 +1133,11 @@ static int __init process_ptload_program_headers_elf32(char *elfptr, > size_t elfnotes_sz, > struct list_head *vc_list) > { > + struct vmcore_mem_node *new; > int i; > Elf32_Ehdr *ehdr_ptr; > Elf32_Phdr *phdr_ptr; > loff_t vmcore_off; > - struct vmcore *new; > > ehdr_ptr = (Elf32_Ehdr *)elfptr; > phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */ > @@ -1175,8 +1175,8 @@ static int __init process_ptload_program_headers_elf32(char *elfptr, > static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz, > struct list_head *vc_list) > { > + struct vmcore_mem_node *m; > loff_t vmcore_off; > - struct vmcore *m; > > /* Skip ELF header, program headers and ELF note segment. */ > vmcore_off = elfsz + elfnotes_sz; > @@ -1587,9 +1587,9 @@ void vmcore_cleanup(void) > > /* clear the vmcore list. */ > while (!list_empty(&vmcore_list)) { > - struct vmcore *m; > + struct vmcore_mem_node *m; > > - m = list_first_entry(&vmcore_list, struct vmcore, list); > + m = list_first_entry(&vmcore_list, struct vmcore_mem_node, list); > list_del(&m->list); > kfree(m); > } > diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h > index acc55626afdc..5e48ab12c12b 100644 > --- a/include/linux/crash_dump.h > +++ b/include/linux/crash_dump.h > @@ -114,10 +114,23 @@ struct vmcore_cb { > extern void register_vmcore_cb(struct vmcore_cb *cb); > extern void unregister_vmcore_cb(struct vmcore_cb *cb); > > +struct vmcore_mem_node { > + struct list_head list; > + unsigned long long paddr; > + unsigned long long size; > + loff_t offset; > +}; > + > #else /* !CONFIG_CRASH_DUMP */ > static inline bool is_kdump_kernel(void) { return false; } > #endif /* CONFIG_CRASH_DUMP */ > > +struct vmcoredd_node { > + struct list_head list; /* List of dumps */ > + void *buf; /* Buffer containing device's dump */ > + unsigned int size; /* Size of the buffer */ > +}; > + > /* Device Dump information to be filled by drivers */ > struct vmcoredd_data { > char dump_name[VMCOREDD_MAX_NAME_BYTES]; /* Unique name of the dump */ > diff --git a/include/linux/kcore.h b/include/linux/kcore.h > index 86c0f1d18998..9a2fa013c91d 100644 > --- a/include/linux/kcore.h > +++ b/include/linux/kcore.h > @@ -20,19 +20,6 @@ struct kcore_list { > int type; > }; > > -struct vmcore { > - struct list_head list; > - unsigned long long paddr; > - unsigned long long size; > - loff_t offset; > -}; > - > -struct vmcoredd_node { > - struct list_head list; /* List of dumps */ > - void *buf; /* Buffer containing device's dump */ > - unsigned int size; /* Size of the buffer */ > -}; > - > #ifdef CONFIG_PROC_KCORE > void __init kclist_add(struct kcore_list *, void *, size_t, int type); > > -- > 2.46.1 >
On 15.11.24 10:44, Baoquan He wrote: > On 10/25/24 at 05:11pm, David Hildenbrand wrote: >> These defines are not related to /proc/kcore, move them to crash_dump.h >> instead. While at it, rename "struct vmcore" to "struct >> vmcore_mem_node", which is a more fitting name. > > Agree it's inappropriate to put the defintions in kcore.h. However for > 'struct vmcore', it's only used in fs/proc/vmcore.c from my code > serching, do you think if we can put it in fs/proc/vmcore.c directly? > And 'struct vmcoredd_node' too. See the next patches and how virtio-mem will make use of the feactored out functions. Not putting them as inline functions into a header will require exporting symbols just do add a vmcore memory node to the list, which I want to avoid -- overkill for these simple helpers. > > And about the renaming, with my understanding each instance of struct > vmcore represents one memory region, isn't it a little confusing to be > called vmcore_mem_node? I understand you probablly want to unify the > vmcore and vmcoredd's naming. I have to admit I don't know vmcoredd well > and its naming, while most of people have been knowing vmcore representing > memory region very well. I chose "vmcore_mem_node" because it is a memory range stored in a list. Note the symmetry with "vmcoredd_node" If there are strong feelings I can use a different name, but "vmcore_mem_node" really describes what it actually is. Especially now that we have different vmcore nodes.
On 11/15/24 at 10:59am, David Hildenbrand wrote: > On 15.11.24 10:44, Baoquan He wrote: > > On 10/25/24 at 05:11pm, David Hildenbrand wrote: > > > These defines are not related to /proc/kcore, move them to crash_dump.h > > > instead. While at it, rename "struct vmcore" to "struct > > > vmcore_mem_node", which is a more fitting name. > > > > Agree it's inappropriate to put the defintions in kcore.h. However for > > 'struct vmcore', it's only used in fs/proc/vmcore.c from my code > > serching, do you think if we can put it in fs/proc/vmcore.c directly? > > And 'struct vmcoredd_node' too. > > See the next patches and how virtio-mem will make use of the feactored out > functions. Not putting them as inline functions into a header will require > exporting symbols just do add a vmcore memory node to the list, which I want > to avoid -- overkill for these simple helpers. I see. It makes sense to put them in crash_dump.h. Thanks for explanation. > > > > > And about the renaming, with my understanding each instance of struct > > vmcore represents one memory region, isn't it a little confusing to be > > called vmcore_mem_node? I understand you probablly want to unify the > > vmcore and vmcoredd's naming. I have to admit I don't know vmcoredd well > > and its naming, while most of people have been knowing vmcore representing > > memory region very well. > > I chose "vmcore_mem_node" because it is a memory range stored in a list. > Note the symmetry with "vmcoredd_node" I would say the justification of naming "vmcore_mem_node" is to keep symmetry with "vmcoredd_node". If because it is a memory range, it really should not be called vmcore_mem_node. As we know, memory node has specific meaning in kernel, it's the memory range existing on a NUMA node. And vmcoredd is not a widely used feature. At least in fedora/RHEL, we leave it to customers themselves to use and handle, we don't support it. And we add 'novmcoredd' to kdump kernel cmdline by default to disable it in fedora/RHEL. So a rarely used feature should not be taken to decide the naming of a mature and and widely used feature's name. My personal opinion. > > If there are strong feelings I can use a different name, but Yes, I would suggest we better keep the old name or take a more appropriate one if have to change. > "vmcore_mem_node" really describes what it actually is. Especially now that > we have different vmcore nodes. > > -- > Cheers, > > David / dhildenb >
On 20.11.24 10:42, Baoquan He wrote: > On 11/15/24 at 10:59am, David Hildenbrand wrote: >> On 15.11.24 10:44, Baoquan He wrote: >>> On 10/25/24 at 05:11pm, David Hildenbrand wrote: >>>> These defines are not related to /proc/kcore, move them to crash_dump.h >>>> instead. While at it, rename "struct vmcore" to "struct >>>> vmcore_mem_node", which is a more fitting name. >>> >>> Agree it's inappropriate to put the defintions in kcore.h. However for >>> 'struct vmcore', it's only used in fs/proc/vmcore.c from my code >>> serching, do you think if we can put it in fs/proc/vmcore.c directly? >>> And 'struct vmcoredd_node' too. >> >> See the next patches and how virtio-mem will make use of the feactored out >> functions. Not putting them as inline functions into a header will require >> exporting symbols just do add a vmcore memory node to the list, which I want >> to avoid -- overkill for these simple helpers. > > I see. It makes sense to put them in crash_dump.h. Thanks for > explanation. > I'll add these details to the description. >> >>> >>> And about the renaming, with my understanding each instance of struct >>> vmcore represents one memory region, isn't it a little confusing to be >>> called vmcore_mem_node? I understand you probablly want to unify the >>> vmcore and vmcoredd's naming. I have to admit I don't know vmcoredd well >>> and its naming, while most of people have been knowing vmcore representing >>> memory region very well. >> >> I chose "vmcore_mem_node" because it is a memory range stored in a list. >> Note the symmetry with "vmcoredd_node" > > I would say the justification of naming "vmcore_mem_node" is to keep > symmetry with "vmcoredd_node". If because it is a memory range, it really > should not be called vmcore_mem_node. As we know, memory node has > specific meaning in kernel, it's the memory range existing on a NUMA node. > > And vmcoredd is not a widely used feature. At least in fedora/RHEL, we > leave it to customers themselves to use and handle, we don't support it. > And we add 'novmcoredd' to kdump kernel cmdline by default to disable it > in fedora/RHEL. So a rarely used feature should not be taken to decide > the naming of a mature and and widely used feature's name. My personal > opinion. It's a memory range that gets added to a list. So it's a node in a list ... representing a memory range. :) I don't particularly care about the "node" part here. The old "struct vmcore" name is misleading: makes one believe it somehow represents "/proc/vmcore", but it really doesn't. (see below on function naming) > >> >> If there are strong feelings I can use a different name, but > > Yes, I would suggest we better keep the old name or take a more > appropriate one if have to change. In light of patch #5 and #6, really only something like "vmcore_mem_node" makes sense. Alternatively "vmcore_range" or "vmcore_mem_range". Leaving it as "struct vmcore" would mean that we had to do in #5 and #6: * vmcore_alloc_add_mem_node() -> vmcore_alloc_add() * vmcore_free_mem_nodes() -> vmcore_free() Which would *really* be misleading, because we are not "freeing" the vmcore. Would "vmcore_range" work for you? Then we could do: * vmcore_alloc_add_mem_node() -> vmcore_alloc_add_range() * vmcore_free_mem_nodes() -> vmcore_free_ranges()
On 11/20/24 at 11:28am, David Hildenbrand wrote: > On 20.11.24 10:42, Baoquan He wrote: > > On 11/15/24 at 10:59am, David Hildenbrand wrote: > > > On 15.11.24 10:44, Baoquan He wrote: > > > > On 10/25/24 at 05:11pm, David Hildenbrand wrote: > > > > > These defines are not related to /proc/kcore, move them to crash_dump.h > > > > > instead. While at it, rename "struct vmcore" to "struct > > > > > vmcore_mem_node", which is a more fitting name. > > > > > > > > Agree it's inappropriate to put the defintions in kcore.h. However for > > > > 'struct vmcore', it's only used in fs/proc/vmcore.c from my code > > > > serching, do you think if we can put it in fs/proc/vmcore.c directly? > > > > And 'struct vmcoredd_node' too. > > > > > > See the next patches and how virtio-mem will make use of the feactored out > > > functions. Not putting them as inline functions into a header will require > > > exporting symbols just do add a vmcore memory node to the list, which I want > > > to avoid -- overkill for these simple helpers. > > > > I see. It makes sense to put them in crash_dump.h. Thanks for > > explanation. > > > > I'll add these details to the description. Thanks. > > > > > > > > > > > > And about the renaming, with my understanding each instance of struct > > > > vmcore represents one memory region, isn't it a little confusing to be > > > > called vmcore_mem_node? I understand you probablly want to unify the > > > > vmcore and vmcoredd's naming. I have to admit I don't know vmcoredd well > > > > and its naming, while most of people have been knowing vmcore representing > > > > memory region very well. > > > > > > I chose "vmcore_mem_node" because it is a memory range stored in a list. > > > Note the symmetry with "vmcoredd_node" > > > > I would say the justification of naming "vmcore_mem_node" is to keep > > symmetry with "vmcoredd_node". If because it is a memory range, it really > > should not be called vmcore_mem_node. As we know, memory node has > > specific meaning in kernel, it's the memory range existing on a NUMA node. > > > > And vmcoredd is not a widely used feature. At least in fedora/RHEL, we > > leave it to customers themselves to use and handle, we don't support it. > > And we add 'novmcoredd' to kdump kernel cmdline by default to disable it > > in fedora/RHEL. So a rarely used feature should not be taken to decide > > the naming of a mature and and widely used feature's name. My personal > > opinion. > > It's a memory range that gets added to a list. So it's a node in a list ... > representing a memory range. :) I don't particularly care about the "node" > part here. Ah, I missed that about list node. There are list items, list entries and list nodes, I didn't think of list node at tht time. > > The old "struct vmcore" name is misleading: makes one believe it somehow > represents "/proc/vmcore", but it really doesn't. (see below on function > naming) Yeah, agree. struct vmcore is a concept of the whole logical file. > > > > > > > > > If there are strong feelings I can use a different name, but > > > > Yes, I would suggest we better keep the old name or take a more > > appropriate one if have to change. > > In light of patch #5 and #6, really only something like "vmcore_mem_node" > makes sense. Alternatively "vmcore_range" or "vmcore_mem_range". > > Leaving it as "struct vmcore" would mean that we had to do in #5 and #6: > > * vmcore_alloc_add_mem_node() -> vmcore_alloc_add() > * vmcore_free_mem_nodes() -> vmcore_free() > > Which would *really* be misleading, because we are not "freeing" the vmcore. > > Would "vmcore_range" work for you? Then we could do: > > * vmcore_alloc_add_mem_node() -> vmcore_alloc_add_range() > * vmcore_free_mem_nodes() -> vmcore_free_ranges() Yeah, vmcore_range is better, which won't cause misunderstanding. Thanks.
>>>> If there are strong feelings I can use a different name, but >>> >>> Yes, I would suggest we better keep the old name or take a more >>> appropriate one if have to change. >> >> In light of patch #5 and #6, really only something like "vmcore_mem_node" >> makes sense. Alternatively "vmcore_range" or "vmcore_mem_range". >> >> Leaving it as "struct vmcore" would mean that we had to do in #5 and #6: >> >> * vmcore_alloc_add_mem_node() -> vmcore_alloc_add() >> * vmcore_free_mem_nodes() -> vmcore_free() >> >> Which would *really* be misleading, because we are not "freeing" the vmcore. >> >> Would "vmcore_range" work for you? Then we could do: >> >> * vmcore_alloc_add_mem_node() -> vmcore_alloc_add_range() >> * vmcore_free_mem_nodes() -> vmcore_free_ranges() > > Yeah, vmcore_range is better, which won't cause misunderstanding. > Thanks. > Thanks, I'll use that and adjust patch #5 and #6, keeping your ACKs.
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 6371dbaa21be..47652df95202 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -304,10 +304,10 @@ static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst, */ static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos) { + struct vmcore_mem_node *m = NULL; ssize_t acc = 0, tmp; size_t tsz; u64 start; - struct vmcore *m = NULL; if (!iov_iter_count(iter) || *fpos >= vmcore_size) return 0; @@ -560,8 +560,8 @@ static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma, static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) { size_t size = vma->vm_end - vma->vm_start; + struct vmcore_mem_node *m; u64 start, end, len, tsz; - struct vmcore *m; start = (u64)vma->vm_pgoff << PAGE_SHIFT; end = start + size; @@ -683,16 +683,16 @@ static const struct proc_ops vmcore_proc_ops = { .proc_mmap = mmap_vmcore, }; -static struct vmcore* __init get_new_element(void) +static struct vmcore_mem_node * __init get_new_element(void) { - return kzalloc(sizeof(struct vmcore), GFP_KERNEL); + return kzalloc(sizeof(struct vmcore_mem_node), GFP_KERNEL); } static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz, struct list_head *vc_list) { + struct vmcore_mem_node *m; u64 size; - struct vmcore *m; size = elfsz + elfnotesegsz; list_for_each_entry(m, vc_list, list) { @@ -1090,11 +1090,11 @@ static int __init process_ptload_program_headers_elf64(char *elfptr, size_t elfnotes_sz, struct list_head *vc_list) { + struct vmcore_mem_node *new; int i; Elf64_Ehdr *ehdr_ptr; Elf64_Phdr *phdr_ptr; loff_t vmcore_off; - struct vmcore *new; ehdr_ptr = (Elf64_Ehdr *)elfptr; phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */ @@ -1133,11 +1133,11 @@ static int __init process_ptload_program_headers_elf32(char *elfptr, size_t elfnotes_sz, struct list_head *vc_list) { + struct vmcore_mem_node *new; int i; Elf32_Ehdr *ehdr_ptr; Elf32_Phdr *phdr_ptr; loff_t vmcore_off; - struct vmcore *new; ehdr_ptr = (Elf32_Ehdr *)elfptr; phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */ @@ -1175,8 +1175,8 @@ static int __init process_ptload_program_headers_elf32(char *elfptr, static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz, struct list_head *vc_list) { + struct vmcore_mem_node *m; loff_t vmcore_off; - struct vmcore *m; /* Skip ELF header, program headers and ELF note segment. */ vmcore_off = elfsz + elfnotes_sz; @@ -1587,9 +1587,9 @@ void vmcore_cleanup(void) /* clear the vmcore list. */ while (!list_empty(&vmcore_list)) { - struct vmcore *m; + struct vmcore_mem_node *m; - m = list_first_entry(&vmcore_list, struct vmcore, list); + m = list_first_entry(&vmcore_list, struct vmcore_mem_node, list); list_del(&m->list); kfree(m); } diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index acc55626afdc..5e48ab12c12b 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -114,10 +114,23 @@ struct vmcore_cb { extern void register_vmcore_cb(struct vmcore_cb *cb); extern void unregister_vmcore_cb(struct vmcore_cb *cb); +struct vmcore_mem_node { + struct list_head list; + unsigned long long paddr; + unsigned long long size; + loff_t offset; +}; + #else /* !CONFIG_CRASH_DUMP */ static inline bool is_kdump_kernel(void) { return false; } #endif /* CONFIG_CRASH_DUMP */ +struct vmcoredd_node { + struct list_head list; /* List of dumps */ + void *buf; /* Buffer containing device's dump */ + unsigned int size; /* Size of the buffer */ +}; + /* Device Dump information to be filled by drivers */ struct vmcoredd_data { char dump_name[VMCOREDD_MAX_NAME_BYTES]; /* Unique name of the dump */ diff --git a/include/linux/kcore.h b/include/linux/kcore.h index 86c0f1d18998..9a2fa013c91d 100644 --- a/include/linux/kcore.h +++ b/include/linux/kcore.h @@ -20,19 +20,6 @@ struct kcore_list { int type; }; -struct vmcore { - struct list_head list; - unsigned long long paddr; - unsigned long long size; - loff_t offset; -}; - -struct vmcoredd_node { - struct list_head list; /* List of dumps */ - void *buf; /* Buffer containing device's dump */ - unsigned int size; /* Size of the buffer */ -}; - #ifdef CONFIG_PROC_KCORE void __init kclist_add(struct kcore_list *, void *, size_t, int type);
These defines are not related to /proc/kcore, move them to crash_dump.h instead. While at it, rename "struct vmcore" to "struct vmcore_mem_node", which is a more fitting name. Signed-off-by: David Hildenbrand <david@redhat.com> --- fs/proc/vmcore.c | 20 ++++++++++---------- include/linux/crash_dump.h | 13 +++++++++++++ include/linux/kcore.h | 13 ------------- 3 files changed, 23 insertions(+), 23 deletions(-)