Message ID | 20230125233554.153109-6-surenb@google.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | introduce vm_flags modifier functions | expand |
On Wed, Jan 25, 2023 at 03:35:52PM -0800, Suren Baghdasaryan wrote: > Replace indirect modifications to vma->vm_flags with calls to modifier > functions to be able to track flag changes and to keep vma locking > correctness. > > Signed-off-by: Suren Baghdasaryan <surenb@google.com> > Acked-by: Michal Hocko <mhocko@suse.com> > --- > arch/powerpc/kvm/book3s_hv_uvmem.c | 5 ++++- > arch/s390/mm/gmap.c | 5 ++++- > 2 files changed, 8 insertions(+), 2 deletions(-) > > diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c > index 1d67baa5557a..325a7a47d348 100644 > --- a/arch/powerpc/kvm/book3s_hv_uvmem.c > +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c > @@ -393,6 +393,7 @@ static int kvmppc_memslot_page_merge(struct kvm *kvm, > { > unsigned long gfn = memslot->base_gfn; > unsigned long end, start = gfn_to_hva(kvm, gfn); > + unsigned long vm_flags; > int ret = 0; > struct vm_area_struct *vma; > int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE; > @@ -409,12 +410,14 @@ static int kvmppc_memslot_page_merge(struct kvm *kvm, > ret = H_STATE; > break; > } > + vm_flags = vma->vm_flags; > ret = ksm_madvise(vma, vma->vm_start, vma->vm_end, > - merge_flag, &vma->vm_flags); > + merge_flag, &vm_flags); > if (ret) { > ret = H_STATE; > break; > } > + reset_vm_flags(vma, vm_flags); > start = vma->vm_end; > } while (end > vma->vm_end); Add a comment on why the vm_flags are copied in case someone "optimises" this in the future? Something like /* Copy vm_flags to avoid any partial modifications in ksm_madvise. */ > > diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c > index 3a695b8a1e3c..d5eb47dcdacb 100644 > --- a/arch/s390/mm/gmap.c > +++ b/arch/s390/mm/gmap.c > @@ -2587,14 +2587,17 @@ int gmap_mark_unmergeable(void) > { > struct mm_struct *mm = current->mm; > struct vm_area_struct *vma; > + unsigned long vm_flags; > int ret; > VMA_ITERATOR(vmi, mm, 0); > > for_each_vma(vmi, vma) { > + vm_flags = vma->vm_flags; > ret = ksm_madvise(vma, vma->vm_start, vma->vm_end, > - MADV_UNMERGEABLE, &vma->vm_flags); > + MADV_UNMERGEABLE, &vm_flags); > if (ret) > return ret; > + reset_vm_flags(vma, vm_flags); Same. Not necessary as such as there are few users of ksm_madvise and I doubt it'll introduce new surprises. With or without the comment; Acked-by: Mel Gorman <mgorman@techsingularity.net>
On Thu, Jan 26, 2023 at 7:19 AM Mel Gorman <mgorman@techsingularity.net> wrote: > > On Wed, Jan 25, 2023 at 03:35:52PM -0800, Suren Baghdasaryan wrote: > > Replace indirect modifications to vma->vm_flags with calls to modifier > > functions to be able to track flag changes and to keep vma locking > > correctness. > > > > Signed-off-by: Suren Baghdasaryan <surenb@google.com> > > Acked-by: Michal Hocko <mhocko@suse.com> > > --- > > arch/powerpc/kvm/book3s_hv_uvmem.c | 5 ++++- > > arch/s390/mm/gmap.c | 5 ++++- > > 2 files changed, 8 insertions(+), 2 deletions(-) > > > > diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c > > index 1d67baa5557a..325a7a47d348 100644 > > --- a/arch/powerpc/kvm/book3s_hv_uvmem.c > > +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c > > @@ -393,6 +393,7 @@ static int kvmppc_memslot_page_merge(struct kvm *kvm, > > { > > unsigned long gfn = memslot->base_gfn; > > unsigned long end, start = gfn_to_hva(kvm, gfn); > > + unsigned long vm_flags; > > int ret = 0; > > struct vm_area_struct *vma; > > int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE; > > @@ -409,12 +410,14 @@ static int kvmppc_memslot_page_merge(struct kvm *kvm, > > ret = H_STATE; > > break; > > } > > + vm_flags = vma->vm_flags; > > ret = ksm_madvise(vma, vma->vm_start, vma->vm_end, > > - merge_flag, &vma->vm_flags); > > + merge_flag, &vm_flags); > > if (ret) { > > ret = H_STATE; > > break; > > } > > + reset_vm_flags(vma, vm_flags); > > start = vma->vm_end; > > } while (end > vma->vm_end); > > Add a comment on why the vm_flags are copied in case someone "optimises" > this in the future? Something like > > /* Copy vm_flags to avoid any partial modifications in ksm_madvise. */ Ack. > > > > > diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c > > index 3a695b8a1e3c..d5eb47dcdacb 100644 > > --- a/arch/s390/mm/gmap.c > > +++ b/arch/s390/mm/gmap.c > > @@ -2587,14 +2587,17 @@ int gmap_mark_unmergeable(void) > > { > > struct mm_struct *mm = current->mm; > > struct vm_area_struct *vma; > > + unsigned long vm_flags; > > int ret; > > VMA_ITERATOR(vmi, mm, 0); > > > > for_each_vma(vmi, vma) { > > + vm_flags = vma->vm_flags; > > ret = ksm_madvise(vma, vma->vm_start, vma->vm_end, > > - MADV_UNMERGEABLE, &vma->vm_flags); > > + MADV_UNMERGEABLE, &vm_flags); > > if (ret) > > return ret; > > + reset_vm_flags(vma, vm_flags); > > Same. > > Not necessary as such as there are few users of ksm_madvise and I doubt > it'll introduce new surprises. > > With or without the comment; > > Acked-by: Mel Gorman <mgorman@techsingularity.net> Thanks! > > -- > Mel Gorman > SUSE Labs
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 1d67baa5557a..325a7a47d348 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -393,6 +393,7 @@ static int kvmppc_memslot_page_merge(struct kvm *kvm, { unsigned long gfn = memslot->base_gfn; unsigned long end, start = gfn_to_hva(kvm, gfn); + unsigned long vm_flags; int ret = 0; struct vm_area_struct *vma; int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE; @@ -409,12 +410,14 @@ static int kvmppc_memslot_page_merge(struct kvm *kvm, ret = H_STATE; break; } + vm_flags = vma->vm_flags; ret = ksm_madvise(vma, vma->vm_start, vma->vm_end, - merge_flag, &vma->vm_flags); + merge_flag, &vm_flags); if (ret) { ret = H_STATE; break; } + reset_vm_flags(vma, vm_flags); start = vma->vm_end; } while (end > vma->vm_end); diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 3a695b8a1e3c..d5eb47dcdacb 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -2587,14 +2587,17 @@ int gmap_mark_unmergeable(void) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; + unsigned long vm_flags; int ret; VMA_ITERATOR(vmi, mm, 0); for_each_vma(vmi, vma) { + vm_flags = vma->vm_flags; ret = ksm_madvise(vma, vma->vm_start, vma->vm_end, - MADV_UNMERGEABLE, &vma->vm_flags); + MADV_UNMERGEABLE, &vm_flags); if (ret) return ret; + reset_vm_flags(vma, vm_flags); } mm->def_flags &= ~VM_MERGEABLE; return 0;