Message ID | 20210329232402.575396-2-mike.kravetz@oracle.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | make hugetlb put_page safe for all calling contexts | expand |
On Mon, Mar 29, 2021 at 04:23:55PM -0700, Mike Kravetz wrote: > Ideally, cma_release could be called from any context. However, that is > not possible because a mutex is used to protect the per-area bitmap. > Change the bitmap to an irq safe spinlock. > > Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> Acked-by: Roman Gushchin <guro@fb.com> Thanks! > --- > mm/cma.c | 20 +++++++++++--------- > mm/cma.h | 2 +- > mm/cma_debug.c | 10 ++++++---- > 3 files changed, 18 insertions(+), 14 deletions(-) > > diff --git a/mm/cma.c b/mm/cma.c > index b2393b892d3b..80875fd4487b 100644 > --- a/mm/cma.c > +++ b/mm/cma.c > @@ -24,7 +24,6 @@ > #include <linux/memblock.h> > #include <linux/err.h> > #include <linux/mm.h> > -#include <linux/mutex.h> > #include <linux/sizes.h> > #include <linux/slab.h> > #include <linux/log2.h> > @@ -83,13 +82,14 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, > unsigned int count) > { > unsigned long bitmap_no, bitmap_count; > + unsigned long flags; > > bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; > bitmap_count = cma_bitmap_pages_to_bits(cma, count); > > - mutex_lock(&cma->lock); > + spin_lock_irqsave(&cma->lock, flags); > bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); > - mutex_unlock(&cma->lock); > + spin_unlock_irqrestore(&cma->lock, flags); > } > > static void __init cma_activate_area(struct cma *cma) > @@ -118,7 +118,7 @@ static void __init cma_activate_area(struct cma *cma) > pfn += pageblock_nr_pages) > init_cma_reserved_pageblock(pfn_to_page(pfn)); > > - mutex_init(&cma->lock); > + spin_lock_init(&cma->lock); > > #ifdef CONFIG_CMA_DEBUGFS > INIT_HLIST_HEAD(&cma->mem_head); > @@ -391,8 +391,9 @@ static void cma_debug_show_areas(struct cma *cma) > unsigned long start = 0; > unsigned long nr_part, nr_total = 0; > unsigned long nbits = cma_bitmap_maxno(cma); > + unsigned long flags; > > - mutex_lock(&cma->lock); > + spin_lock_irqsave(&cma->lock, flags); > pr_info("number of available pages: "); > for (;;) { > next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); > @@ -407,7 +408,7 @@ static void cma_debug_show_areas(struct cma *cma) > start = next_zero_bit + nr_zero; > } > pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); > - mutex_unlock(&cma->lock); > + spin_unlock_irqrestore(&cma->lock, flags); > } > #else > static inline void cma_debug_show_areas(struct cma *cma) { } > @@ -430,6 +431,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, > unsigned long pfn = -1; > unsigned long start = 0; > unsigned long bitmap_maxno, bitmap_no, bitmap_count; > + unsigned long flags; > size_t i; > struct page *page = NULL; > int ret = -ENOMEM; > @@ -454,12 +456,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, > goto out; > > for (;;) { > - mutex_lock(&cma->lock); > + spin_lock_irqsave(&cma->lock, flags); > bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, > bitmap_maxno, start, bitmap_count, mask, > offset); > if (bitmap_no >= bitmap_maxno) { > - mutex_unlock(&cma->lock); > + spin_unlock_irqrestore(&cma->lock, flags); > break; > } > bitmap_set(cma->bitmap, bitmap_no, bitmap_count); > @@ -468,7 +470,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, > * our exclusive use. If the migration fails we will take the > * lock again and unmark it. > */ > - mutex_unlock(&cma->lock); > + spin_unlock_irqrestore(&cma->lock, flags); > > pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); > ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, > diff --git a/mm/cma.h b/mm/cma.h > index 68ffad4e430d..2c775877eae2 100644 > --- a/mm/cma.h > +++ b/mm/cma.h > @@ -15,7 +15,7 @@ struct cma { > unsigned long count; > unsigned long *bitmap; > unsigned int order_per_bit; /* Order of pages represented by one bit */ > - struct mutex lock; > + spinlock_t lock; > #ifdef CONFIG_CMA_DEBUGFS > struct hlist_head mem_head; > spinlock_t mem_head_lock; > diff --git a/mm/cma_debug.c b/mm/cma_debug.c > index d5bf8aa34fdc..6379cfbfd568 100644 > --- a/mm/cma_debug.c > +++ b/mm/cma_debug.c > @@ -35,11 +35,12 @@ static int cma_used_get(void *data, u64 *val) > { > struct cma *cma = data; > unsigned long used; > + unsigned long flags; > > - mutex_lock(&cma->lock); > + spin_lock_irqsave(&cma->lock, flags); > /* pages counter is smaller than sizeof(int) */ > used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma)); > - mutex_unlock(&cma->lock); > + spin_unlock_irqrestore(&cma->lock, flags); > *val = (u64)used << cma->order_per_bit; > > return 0; > @@ -52,8 +53,9 @@ static int cma_maxchunk_get(void *data, u64 *val) > unsigned long maxchunk = 0; > unsigned long start, end = 0; > unsigned long bitmap_maxno = cma_bitmap_maxno(cma); > + unsigned long flags; > > - mutex_lock(&cma->lock); > + spin_lock_irqsave(&cma->lock, flags); > for (;;) { > start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end); > if (start >= bitmap_maxno) > @@ -61,7 +63,7 @@ static int cma_maxchunk_get(void *data, u64 *val) > end = find_next_bit(cma->bitmap, bitmap_maxno, start); > maxchunk = max(end - start, maxchunk); > } > - mutex_unlock(&cma->lock); > + spin_unlock_irqrestore(&cma->lock, flags); > *val = (u64)maxchunk << cma->order_per_bit; > > return 0; > -- > 2.30.2 >
> -----Original Message----- > From: Mike Kravetz [mailto:mike.kravetz@oracle.com] > Sent: Tuesday, March 30, 2021 12:24 PM > To: linux-mm@kvack.org; linux-kernel@vger.kernel.org > Cc: Roman Gushchin <guro@fb.com>; Michal Hocko <mhocko@suse.com>; Shakeel Butt > <shakeelb@google.com>; Oscar Salvador <osalvador@suse.de>; David Hildenbrand > <david@redhat.com>; Muchun Song <songmuchun@bytedance.com>; David Rientjes > <rientjes@google.com>; linmiaohe <linmiaohe@huawei.com>; Peter Zijlstra > <peterz@infradead.org>; Matthew Wilcox <willy@infradead.org>; HORIGUCHI NAOYA > <naoya.horiguchi@nec.com>; Aneesh Kumar K . V <aneesh.kumar@linux.ibm.com>; > Waiman Long <longman@redhat.com>; Peter Xu <peterx@redhat.com>; Mina Almasry > <almasrymina@google.com>; Hillf Danton <hdanton@sina.com>; Joonsoo Kim > <iamjoonsoo.kim@lge.com>; Song Bao Hua (Barry Song) > <song.bao.hua@hisilicon.com>; Will Deacon <will@kernel.org>; Andrew Morton > <akpm@linux-foundation.org>; Mike Kravetz <mike.kravetz@oracle.com> > Subject: [PATCH v2 1/8] mm/cma: change cma mutex to irq safe spinlock > > Ideally, cma_release could be called from any context. However, that is > not possible because a mutex is used to protect the per-area bitmap. > Change the bitmap to an irq safe spinlock. > > Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> It seems mutex_lock is locking some areas with bitmap operations which should be safe to atomic context. Reviewed-by: Barry Song <song.bao.hua@hisilicon.com> > --- > mm/cma.c | 20 +++++++++++--------- > mm/cma.h | 2 +- > mm/cma_debug.c | 10 ++++++---- > 3 files changed, 18 insertions(+), 14 deletions(-) > > diff --git a/mm/cma.c b/mm/cma.c > index b2393b892d3b..80875fd4487b 100644 > --- a/mm/cma.c > +++ b/mm/cma.c > @@ -24,7 +24,6 @@ > #include <linux/memblock.h> > #include <linux/err.h> > #include <linux/mm.h> > -#include <linux/mutex.h> > #include <linux/sizes.h> > #include <linux/slab.h> > #include <linux/log2.h> > @@ -83,13 +82,14 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long > pfn, > unsigned int count) > { > unsigned long bitmap_no, bitmap_count; > + unsigned long flags; > > bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; > bitmap_count = cma_bitmap_pages_to_bits(cma, count); > > - mutex_lock(&cma->lock); > + spin_lock_irqsave(&cma->lock, flags); > bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); > - mutex_unlock(&cma->lock); > + spin_unlock_irqrestore(&cma->lock, flags); > } > > static void __init cma_activate_area(struct cma *cma) > @@ -118,7 +118,7 @@ static void __init cma_activate_area(struct cma *cma) > pfn += pageblock_nr_pages) > init_cma_reserved_pageblock(pfn_to_page(pfn)); > > - mutex_init(&cma->lock); > + spin_lock_init(&cma->lock); > > #ifdef CONFIG_CMA_DEBUGFS > INIT_HLIST_HEAD(&cma->mem_head); > @@ -391,8 +391,9 @@ static void cma_debug_show_areas(struct cma *cma) > unsigned long start = 0; > unsigned long nr_part, nr_total = 0; > unsigned long nbits = cma_bitmap_maxno(cma); > + unsigned long flags; > > - mutex_lock(&cma->lock); > + spin_lock_irqsave(&cma->lock, flags); > pr_info("number of available pages: "); > for (;;) { > next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); > @@ -407,7 +408,7 @@ static void cma_debug_show_areas(struct cma *cma) > start = next_zero_bit + nr_zero; > } > pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); > - mutex_unlock(&cma->lock); > + spin_unlock_irqrestore(&cma->lock, flags); > } > #else > static inline void cma_debug_show_areas(struct cma *cma) { } > @@ -430,6 +431,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, > unsigned int align, > unsigned long pfn = -1; > unsigned long start = 0; > unsigned long bitmap_maxno, bitmap_no, bitmap_count; > + unsigned long flags; > size_t i; > struct page *page = NULL; > int ret = -ENOMEM; > @@ -454,12 +456,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, > unsigned int align, > goto out; > > for (;;) { > - mutex_lock(&cma->lock); > + spin_lock_irqsave(&cma->lock, flags); > bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, > bitmap_maxno, start, bitmap_count, mask, > offset); > if (bitmap_no >= bitmap_maxno) { > - mutex_unlock(&cma->lock); > + spin_unlock_irqrestore(&cma->lock, flags); > break; > } > bitmap_set(cma->bitmap, bitmap_no, bitmap_count); > @@ -468,7 +470,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, > unsigned int align, > * our exclusive use. If the migration fails we will take the > * lock again and unmark it. > */ > - mutex_unlock(&cma->lock); > + spin_unlock_irqrestore(&cma->lock, flags); > > pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); > ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, > diff --git a/mm/cma.h b/mm/cma.h > index 68ffad4e430d..2c775877eae2 100644 > --- a/mm/cma.h > +++ b/mm/cma.h > @@ -15,7 +15,7 @@ struct cma { > unsigned long count; > unsigned long *bitmap; > unsigned int order_per_bit; /* Order of pages represented by one bit */ > - struct mutex lock; > + spinlock_t lock; > #ifdef CONFIG_CMA_DEBUGFS > struct hlist_head mem_head; > spinlock_t mem_head_lock; > diff --git a/mm/cma_debug.c b/mm/cma_debug.c > index d5bf8aa34fdc..6379cfbfd568 100644 > --- a/mm/cma_debug.c > +++ b/mm/cma_debug.c > @@ -35,11 +35,12 @@ static int cma_used_get(void *data, u64 *val) > { > struct cma *cma = data; > unsigned long used; > + unsigned long flags; > > - mutex_lock(&cma->lock); > + spin_lock_irqsave(&cma->lock, flags); > /* pages counter is smaller than sizeof(int) */ > used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma)); > - mutex_unlock(&cma->lock); > + spin_unlock_irqrestore(&cma->lock, flags); > *val = (u64)used << cma->order_per_bit; > > return 0; > @@ -52,8 +53,9 @@ static int cma_maxchunk_get(void *data, u64 *val) > unsigned long maxchunk = 0; > unsigned long start, end = 0; > unsigned long bitmap_maxno = cma_bitmap_maxno(cma); > + unsigned long flags; > > - mutex_lock(&cma->lock); > + spin_lock_irqsave(&cma->lock, flags); > for (;;) { > start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end); > if (start >= bitmap_maxno) > @@ -61,7 +63,7 @@ static int cma_maxchunk_get(void *data, u64 *val) > end = find_next_bit(cma->bitmap, bitmap_maxno, start); > maxchunk = max(end - start, maxchunk); > } > - mutex_unlock(&cma->lock); > + spin_unlock_irqrestore(&cma->lock, flags); > *val = (u64)maxchunk << cma->order_per_bit; > > return 0; > -- > 2.30.2
On 3/29/21 6:20 PM, Song Bao Hua (Barry Song) wrote: > > >> -----Original Message----- >> From: Mike Kravetz [mailto:mike.kravetz@oracle.com] >> Sent: Tuesday, March 30, 2021 12:24 PM >> To: linux-mm@kvack.org; linux-kernel@vger.kernel.org >> Cc: Roman Gushchin <guro@fb.com>; Michal Hocko <mhocko@suse.com>; Shakeel Butt >> <shakeelb@google.com>; Oscar Salvador <osalvador@suse.de>; David Hildenbrand >> <david@redhat.com>; Muchun Song <songmuchun@bytedance.com>; David Rientjes >> <rientjes@google.com>; linmiaohe <linmiaohe@huawei.com>; Peter Zijlstra >> <peterz@infradead.org>; Matthew Wilcox <willy@infradead.org>; HORIGUCHI NAOYA >> <naoya.horiguchi@nec.com>; Aneesh Kumar K . V <aneesh.kumar@linux.ibm.com>; >> Waiman Long <longman@redhat.com>; Peter Xu <peterx@redhat.com>; Mina Almasry >> <almasrymina@google.com>; Hillf Danton <hdanton@sina.com>; Joonsoo Kim >> <iamjoonsoo.kim@lge.com>; Song Bao Hua (Barry Song) >> <song.bao.hua@hisilicon.com>; Will Deacon <will@kernel.org>; Andrew Morton >> <akpm@linux-foundation.org>; Mike Kravetz <mike.kravetz@oracle.com> >> Subject: [PATCH v2 1/8] mm/cma: change cma mutex to irq safe spinlock >> >> Ideally, cma_release could be called from any context. However, that is >> not possible because a mutex is used to protect the per-area bitmap. >> Change the bitmap to an irq safe spinlock. >> >> Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> > > It seems mutex_lock is locking some areas with bitmap operations which > should be safe to atomic context. > > Reviewed-by: Barry Song <song.bao.hua@hisilicon.com> Thanks Barry, Not sure if you saw questions from Michal in previous series? There was some concern from Joonsoo in the past about lock hold time due to bitmap scans. You may have some insight into the typical size of CMA areas on arm64. I believe the calls to set up the areas specify one bit per page.
On Mon 29-03-21 16:23:55, Mike Kravetz wrote: > Ideally, cma_release could be called from any context. However, that is > not possible because a mutex is used to protect the per-area bitmap. > Change the bitmap to an irq safe spinlock. I would phrase the changelog slightly differerent " cma_release is currently a sleepable operatation because the bitmap manipulation is protected by cma->lock mutex. Hugetlb code which relies on cma_release for CMA backed (giga) hugetlb pages, however, needs to be irq safe. The lock doesn't protect any sleepable operation so it can be changed to a (irq aware) spin lock. The bitmap processing should be quite fast in typical case but if cma sizes grow to TB then we will likely need to replace the lock by a more optimized bitmap implementation. " it seems that you are overusing irqsave variants even from context which are never called from the IRQ context so they do not need storing flags. [...] > @@ -391,8 +391,9 @@ static void cma_debug_show_areas(struct cma *cma) > unsigned long start = 0; > unsigned long nr_part, nr_total = 0; > unsigned long nbits = cma_bitmap_maxno(cma); > + unsigned long flags; > > - mutex_lock(&cma->lock); > + spin_lock_irqsave(&cma->lock, flags); spin_lock_irq should be sufficient. This is only called from the allocation context and that is never called from IRQ context. > pr_info("number of available pages: "); > for (;;) { > next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); > @@ -407,7 +408,7 @@ static void cma_debug_show_areas(struct cma *cma) > start = next_zero_bit + nr_zero; > } > pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); > - mutex_unlock(&cma->lock); > + spin_unlock_irqrestore(&cma->lock, flags); > } > #else > static inline void cma_debug_show_areas(struct cma *cma) { } > @@ -430,6 +431,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, > unsigned long pfn = -1; > unsigned long start = 0; > unsigned long bitmap_maxno, bitmap_no, bitmap_count; > + unsigned long flags; > size_t i; > struct page *page = NULL; > int ret = -ENOMEM; > @@ -454,12 +456,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, > goto out; > > for (;;) { > - mutex_lock(&cma->lock); > + spin_lock_irqsave(&cma->lock, flags); > bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, > bitmap_maxno, start, bitmap_count, mask, > offset); > if (bitmap_no >= bitmap_maxno) { > - mutex_unlock(&cma->lock); > + spin_unlock_irqrestore(&cma->lock, flags); > break; > } > bitmap_set(cma->bitmap, bitmap_no, bitmap_count); same here. > @@ -468,7 +470,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, > * our exclusive use. If the migration fails we will take the > * lock again and unmark it. > */ > - mutex_unlock(&cma->lock); > + spin_unlock_irqrestore(&cma->lock, flags); > > pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); > ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, > diff --git a/mm/cma.h b/mm/cma.h > index 68ffad4e430d..2c775877eae2 100644 > --- a/mm/cma.h > +++ b/mm/cma.h > @@ -15,7 +15,7 @@ struct cma { > unsigned long count; > unsigned long *bitmap; > unsigned int order_per_bit; /* Order of pages represented by one bit */ > - struct mutex lock; > + spinlock_t lock; > #ifdef CONFIG_CMA_DEBUGFS > struct hlist_head mem_head; > spinlock_t mem_head_lock; > diff --git a/mm/cma_debug.c b/mm/cma_debug.c > index d5bf8aa34fdc..6379cfbfd568 100644 > --- a/mm/cma_debug.c > +++ b/mm/cma_debug.c > @@ -35,11 +35,12 @@ static int cma_used_get(void *data, u64 *val) > { > struct cma *cma = data; > unsigned long used; > + unsigned long flags; > > - mutex_lock(&cma->lock); > + spin_lock_irqsave(&cma->lock, flags); > /* pages counter is smaller than sizeof(int) */ > used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma)); > - mutex_unlock(&cma->lock); > + spin_unlock_irqrestore(&cma->lock, flags); > *val = (u64)used << cma->order_per_bit; same here > > return 0; > @@ -52,8 +53,9 @@ static int cma_maxchunk_get(void *data, u64 *val) > unsigned long maxchunk = 0; > unsigned long start, end = 0; > unsigned long bitmap_maxno = cma_bitmap_maxno(cma); > + unsigned long flags; > > - mutex_lock(&cma->lock); > + spin_lock_irqsave(&cma->lock, flags); > for (;;) { > start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end); > if (start >= bitmap_maxno) > @@ -61,7 +63,7 @@ static int cma_maxchunk_get(void *data, u64 *val) > end = find_next_bit(cma->bitmap, bitmap_maxno, start); > maxchunk = max(end - start, maxchunk); > } > - mutex_unlock(&cma->lock); > + spin_unlock_irqrestore(&cma->lock, flags); > *val = (u64)maxchunk << cma->order_per_bit; > > return 0; and here.
On Tue, Mar 30, 2021 at 4:01 PM Michal Hocko <mhocko@suse.com> wrote: > > On Mon 29-03-21 16:23:55, Mike Kravetz wrote: > > Ideally, cma_release could be called from any context. However, that is > > not possible because a mutex is used to protect the per-area bitmap. > > Change the bitmap to an irq safe spinlock. > > I would phrase the changelog slightly differerent > " > cma_release is currently a sleepable operatation because the bitmap > manipulation is protected by cma->lock mutex. Hugetlb code which relies > on cma_release for CMA backed (giga) hugetlb pages, however, needs to be > irq safe. > > The lock doesn't protect any sleepable operation so it can be changed to > a (irq aware) spin lock. The bitmap processing should be quite fast in > typical case but if cma sizes grow to TB then we will likely need to > replace the lock by a more optimized bitmap implementation. > " > > it seems that you are overusing irqsave variants even from context which > are never called from the IRQ context so they do not need storing flags. > > [...] > > @@ -391,8 +391,9 @@ static void cma_debug_show_areas(struct cma *cma) > > unsigned long start = 0; > > unsigned long nr_part, nr_total = 0; > > unsigned long nbits = cma_bitmap_maxno(cma); > > + unsigned long flags; > > > > - mutex_lock(&cma->lock); > > + spin_lock_irqsave(&cma->lock, flags); > > spin_lock_irq should be sufficient. This is only called from the > allocation context and that is never called from IRQ context. This makes me think more. I think that spin_lock should be sufficient. Right? > > > pr_info("number of available pages: "); > > for (;;) { > > next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); > > @@ -407,7 +408,7 @@ static void cma_debug_show_areas(struct cma *cma) > > start = next_zero_bit + nr_zero; > > } > > pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); > > - mutex_unlock(&cma->lock); > > + spin_unlock_irqrestore(&cma->lock, flags); > > } > > #else > > static inline void cma_debug_show_areas(struct cma *cma) { } > > @@ -430,6 +431,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, > > unsigned long pfn = -1; > > unsigned long start = 0; > > unsigned long bitmap_maxno, bitmap_no, bitmap_count; > > + unsigned long flags; > > size_t i; > > struct page *page = NULL; > > int ret = -ENOMEM; > > @@ -454,12 +456,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, > > goto out; > > > > for (;;) { > > - mutex_lock(&cma->lock); > > + spin_lock_irqsave(&cma->lock, flags); > > bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, > > bitmap_maxno, start, bitmap_count, mask, > > offset); > > if (bitmap_no >= bitmap_maxno) { > > - mutex_unlock(&cma->lock); > > + spin_unlock_irqrestore(&cma->lock, flags); > > break; > > } > > bitmap_set(cma->bitmap, bitmap_no, bitmap_count); > > same here. > > > @@ -468,7 +470,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, > > * our exclusive use. If the migration fails we will take the > > * lock again and unmark it. > > */ > > - mutex_unlock(&cma->lock); > > + spin_unlock_irqrestore(&cma->lock, flags); > > > > pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); > > ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, > > diff --git a/mm/cma.h b/mm/cma.h > > index 68ffad4e430d..2c775877eae2 100644 > > --- a/mm/cma.h > > +++ b/mm/cma.h > > @@ -15,7 +15,7 @@ struct cma { > > unsigned long count; > > unsigned long *bitmap; > > unsigned int order_per_bit; /* Order of pages represented by one bit */ > > - struct mutex lock; > > + spinlock_t lock; > > #ifdef CONFIG_CMA_DEBUGFS > > struct hlist_head mem_head; > > spinlock_t mem_head_lock; > > diff --git a/mm/cma_debug.c b/mm/cma_debug.c > > index d5bf8aa34fdc..6379cfbfd568 100644 > > --- a/mm/cma_debug.c > > +++ b/mm/cma_debug.c > > @@ -35,11 +35,12 @@ static int cma_used_get(void *data, u64 *val) > > { > > struct cma *cma = data; > > unsigned long used; > > + unsigned long flags; > > > > - mutex_lock(&cma->lock); > > + spin_lock_irqsave(&cma->lock, flags); > > /* pages counter is smaller than sizeof(int) */ > > used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma)); > > - mutex_unlock(&cma->lock); > > + spin_unlock_irqrestore(&cma->lock, flags); > > *val = (u64)used << cma->order_per_bit; > > same here > > > > > return 0; > > @@ -52,8 +53,9 @@ static int cma_maxchunk_get(void *data, u64 *val) > > unsigned long maxchunk = 0; > > unsigned long start, end = 0; > > unsigned long bitmap_maxno = cma_bitmap_maxno(cma); > > + unsigned long flags; > > > > - mutex_lock(&cma->lock); > > + spin_lock_irqsave(&cma->lock, flags); > > for (;;) { > > start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end); > > if (start >= bitmap_maxno) > > @@ -61,7 +63,7 @@ static int cma_maxchunk_get(void *data, u64 *val) > > end = find_next_bit(cma->bitmap, bitmap_maxno, start); > > maxchunk = max(end - start, maxchunk); > > } > > - mutex_unlock(&cma->lock); > > + spin_unlock_irqrestore(&cma->lock, flags); > > *val = (u64)maxchunk << cma->order_per_bit; > > > > return 0; > > and here. > -- > Michal Hocko > SUSE Labs
> -----Original Message----- > From: Muchun Song [mailto:songmuchun@bytedance.com] > Sent: Tuesday, March 30, 2021 9:09 PM > To: Michal Hocko <mhocko@suse.com> > Cc: Mike Kravetz <mike.kravetz@oracle.com>; Linux Memory Management List > <linux-mm@kvack.org>; LKML <linux-kernel@vger.kernel.org>; Roman Gushchin > <guro@fb.com>; Shakeel Butt <shakeelb@google.com>; Oscar Salvador > <osalvador@suse.de>; David Hildenbrand <david@redhat.com>; David Rientjes > <rientjes@google.com>; linmiaohe <linmiaohe@huawei.com>; Peter Zijlstra > <peterz@infradead.org>; Matthew Wilcox <willy@infradead.org>; HORIGUCHI NAOYA > <naoya.horiguchi@nec.com>; Aneesh Kumar K . V <aneesh.kumar@linux.ibm.com>; > Waiman Long <longman@redhat.com>; Peter Xu <peterx@redhat.com>; Mina Almasry > <almasrymina@google.com>; Hillf Danton <hdanton@sina.com>; Joonsoo Kim > <iamjoonsoo.kim@lge.com>; Song Bao Hua (Barry Song) > <song.bao.hua@hisilicon.com>; Will Deacon <will@kernel.org>; Andrew Morton > <akpm@linux-foundation.org> > Subject: Re: [External] Re: [PATCH v2 1/8] mm/cma: change cma mutex to irq safe > spinlock > > On Tue, Mar 30, 2021 at 4:01 PM Michal Hocko <mhocko@suse.com> wrote: > > > > On Mon 29-03-21 16:23:55, Mike Kravetz wrote: > > > Ideally, cma_release could be called from any context. However, > > > that is not possible because a mutex is used to protect the per-area bitmap. > > > Change the bitmap to an irq safe spinlock. > > > > I would phrase the changelog slightly differerent " > > cma_release is currently a sleepable operatation because the bitmap > > manipulation is protected by cma->lock mutex. Hugetlb code which > > relies on cma_release for CMA backed (giga) hugetlb pages, however, > > needs to be irq safe. > > > > The lock doesn't protect any sleepable operation so it can be changed > > to a (irq aware) spin lock. The bitmap processing should be quite fast > > in typical case but if cma sizes grow to TB then we will likely need > > to replace the lock by a more optimized bitmap implementation. > > " > > > > it seems that you are overusing irqsave variants even from context > > which are never called from the IRQ context so they do not need storing flags. > > > > [...] > > > @@ -391,8 +391,9 @@ static void cma_debug_show_areas(struct cma *cma) > > > unsigned long start = 0; > > > unsigned long nr_part, nr_total = 0; > > > unsigned long nbits = cma_bitmap_maxno(cma); > > > + unsigned long flags; > > > > > > - mutex_lock(&cma->lock); > > > + spin_lock_irqsave(&cma->lock, flags); > > > > spin_lock_irq should be sufficient. This is only called from the > > allocation context and that is never called from IRQ context. > > This makes me think more. I think that spin_lock should be sufficient. Right? > It seems Mike's point is that cma_release might be called from both irq context and process context. If it is running in process context, we need the irq-disable to lock the irq context which might jump to call cma_release at the same time. We have never seen cma_release has been really called in irq context by now, anyway. > > > > > > pr_info("number of available pages: "); > > > for (;;) { > > > next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, > > > start); @@ -407,7 +408,7 @@ static void cma_debug_show_areas(struct cma > *cma) > > > start = next_zero_bit + nr_zero; > > > } > > > pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); > > > - mutex_unlock(&cma->lock); > > > + spin_unlock_irqrestore(&cma->lock, flags); > > > } > > > #else > > > static inline void cma_debug_show_areas(struct cma *cma) { } @@ > > > -430,6 +431,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, > unsigned int align, > > > unsigned long pfn = -1; > > > unsigned long start = 0; > > > unsigned long bitmap_maxno, bitmap_no, bitmap_count; > > > + unsigned long flags; > > > size_t i; > > > struct page *page = NULL; > > > int ret = -ENOMEM; > > > @@ -454,12 +456,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, > unsigned int align, > > > goto out; > > > > > > for (;;) { > > > - mutex_lock(&cma->lock); > > > + spin_lock_irqsave(&cma->lock, flags); > > > bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, > > > bitmap_maxno, start, bitmap_count, mask, > > > offset); > > > if (bitmap_no >= bitmap_maxno) { > > > - mutex_unlock(&cma->lock); > > > + spin_unlock_irqrestore(&cma->lock, flags); > > > break; > > > } > > > bitmap_set(cma->bitmap, bitmap_no, bitmap_count); > > > > same here. > > > > > @@ -468,7 +470,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, > unsigned int align, > > > * our exclusive use. If the migration fails we will take the > > > * lock again and unmark it. > > > */ > > > - mutex_unlock(&cma->lock); > > > + spin_unlock_irqrestore(&cma->lock, flags); > > > > > > pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); > > > ret = alloc_contig_range(pfn, pfn + count, > > > MIGRATE_CMA, diff --git a/mm/cma.h b/mm/cma.h index > > > 68ffad4e430d..2c775877eae2 100644 > > > --- a/mm/cma.h > > > +++ b/mm/cma.h > > > @@ -15,7 +15,7 @@ struct cma { > > > unsigned long count; > > > unsigned long *bitmap; > > > unsigned int order_per_bit; /* Order of pages represented by one bit > */ > > > - struct mutex lock; > > > + spinlock_t lock; > > > #ifdef CONFIG_CMA_DEBUGFS > > > struct hlist_head mem_head; > > > spinlock_t mem_head_lock; > > > diff --git a/mm/cma_debug.c b/mm/cma_debug.c index > > > d5bf8aa34fdc..6379cfbfd568 100644 > > > --- a/mm/cma_debug.c > > > +++ b/mm/cma_debug.c > > > @@ -35,11 +35,12 @@ static int cma_used_get(void *data, u64 *val) { > > > struct cma *cma = data; > > > unsigned long used; > > > + unsigned long flags; > > > > > > - mutex_lock(&cma->lock); > > > + spin_lock_irqsave(&cma->lock, flags); > > > /* pages counter is smaller than sizeof(int) */ > > > used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma)); > > > - mutex_unlock(&cma->lock); > > > + spin_unlock_irqrestore(&cma->lock, flags); > > > *val = (u64)used << cma->order_per_bit; > > > > same here > > > > > > > > return 0; > > > @@ -52,8 +53,9 @@ static int cma_maxchunk_get(void *data, u64 *val) > > > unsigned long maxchunk = 0; > > > unsigned long start, end = 0; > > > unsigned long bitmap_maxno = cma_bitmap_maxno(cma); > > > + unsigned long flags; > > > > > > - mutex_lock(&cma->lock); > > > + spin_lock_irqsave(&cma->lock, flags); > > > for (;;) { > > > start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end); > > > if (start >= bitmap_maxno) > > > @@ -61,7 +63,7 @@ static int cma_maxchunk_get(void *data, u64 *val) > > > end = find_next_bit(cma->bitmap, bitmap_maxno, start); > > > maxchunk = max(end - start, maxchunk); > > > } > > > - mutex_unlock(&cma->lock); > > > + spin_unlock_irqrestore(&cma->lock, flags); > > > *val = (u64)maxchunk << cma->order_per_bit; > > > > > > return 0; > > > > and here. > > -- > > Michal Hocko > > SUSE Labs
On Tue 30-03-21 16:08:36, Muchun Song wrote: > On Tue, Mar 30, 2021 at 4:01 PM Michal Hocko <mhocko@suse.com> wrote: > > > > On Mon 29-03-21 16:23:55, Mike Kravetz wrote: > > > Ideally, cma_release could be called from any context. However, that is > > > not possible because a mutex is used to protect the per-area bitmap. > > > Change the bitmap to an irq safe spinlock. > > > > I would phrase the changelog slightly differerent > > " > > cma_release is currently a sleepable operatation because the bitmap > > manipulation is protected by cma->lock mutex. Hugetlb code which relies > > on cma_release for CMA backed (giga) hugetlb pages, however, needs to be > > irq safe. > > > > The lock doesn't protect any sleepable operation so it can be changed to > > a (irq aware) spin lock. The bitmap processing should be quite fast in > > typical case but if cma sizes grow to TB then we will likely need to > > replace the lock by a more optimized bitmap implementation. > > " > > > > it seems that you are overusing irqsave variants even from context which > > are never called from the IRQ context so they do not need storing flags. > > > > [...] > > > @@ -391,8 +391,9 @@ static void cma_debug_show_areas(struct cma *cma) > > > unsigned long start = 0; > > > unsigned long nr_part, nr_total = 0; > > > unsigned long nbits = cma_bitmap_maxno(cma); > > > + unsigned long flags; > > > > > > - mutex_lock(&cma->lock); > > > + spin_lock_irqsave(&cma->lock, flags); > > > > spin_lock_irq should be sufficient. This is only called from the > > allocation context and that is never called from IRQ context. > > This makes me think more. I think that spin_lock should be > sufficient. Right? Nope. Think of the following scenario spin_lock(cma->lock); <IRQ> put_page __free_huge_page cma_release spin_lock_irqsave() DEADLOCK
On Tue, Mar 30, 2021 at 4:18 PM Michal Hocko <mhocko@suse.com> wrote: > > On Tue 30-03-21 16:08:36, Muchun Song wrote: > > On Tue, Mar 30, 2021 at 4:01 PM Michal Hocko <mhocko@suse.com> wrote: > > > > > > On Mon 29-03-21 16:23:55, Mike Kravetz wrote: > > > > Ideally, cma_release could be called from any context. However, that is > > > > not possible because a mutex is used to protect the per-area bitmap. > > > > Change the bitmap to an irq safe spinlock. > > > > > > I would phrase the changelog slightly differerent > > > " > > > cma_release is currently a sleepable operatation because the bitmap > > > manipulation is protected by cma->lock mutex. Hugetlb code which relies > > > on cma_release for CMA backed (giga) hugetlb pages, however, needs to be > > > irq safe. > > > > > > The lock doesn't protect any sleepable operation so it can be changed to > > > a (irq aware) spin lock. The bitmap processing should be quite fast in > > > typical case but if cma sizes grow to TB then we will likely need to > > > replace the lock by a more optimized bitmap implementation. > > > " > > > > > > it seems that you are overusing irqsave variants even from context which > > > are never called from the IRQ context so they do not need storing flags. > > > > > > [...] > > > > @@ -391,8 +391,9 @@ static void cma_debug_show_areas(struct cma *cma) > > > > unsigned long start = 0; > > > > unsigned long nr_part, nr_total = 0; > > > > unsigned long nbits = cma_bitmap_maxno(cma); > > > > + unsigned long flags; > > > > > > > > - mutex_lock(&cma->lock); > > > > + spin_lock_irqsave(&cma->lock, flags); > > > > > > spin_lock_irq should be sufficient. This is only called from the > > > allocation context and that is never called from IRQ context. > > > > This makes me think more. I think that spin_lock should be > > sufficient. Right? > > Nope. Think of the following scenario > spin_lock(cma->lock); > <IRQ> > put_page > __free_huge_page > cma_release > spin_lock_irqsave() DEADLOCK Got it. Thanks. > -- > Michal Hocko > SUSE Labs
On 3/30/21 1:01 AM, Michal Hocko wrote: > On Mon 29-03-21 16:23:55, Mike Kravetz wrote: >> Ideally, cma_release could be called from any context. However, that is >> not possible because a mutex is used to protect the per-area bitmap. >> Change the bitmap to an irq safe spinlock. > > I would phrase the changelog slightly differerent > " > cma_release is currently a sleepable operatation because the bitmap > manipulation is protected by cma->lock mutex. Hugetlb code which relies > on cma_release for CMA backed (giga) hugetlb pages, however, needs to be > irq safe. > > The lock doesn't protect any sleepable operation so it can be changed to > a (irq aware) spin lock. The bitmap processing should be quite fast in > typical case but if cma sizes grow to TB then we will likely need to > replace the lock by a more optimized bitmap implementation. > " That is better. Thank you. > > it seems that you are overusing irqsave variants even from context which > are never called from the IRQ context so they do not need storing flags. > > [...] Yes. >> @@ -391,8 +391,9 @@ static void cma_debug_show_areas(struct cma *cma) >> unsigned long start = 0; >> unsigned long nr_part, nr_total = 0; >> unsigned long nbits = cma_bitmap_maxno(cma); >> + unsigned long flags; >> >> - mutex_lock(&cma->lock); >> + spin_lock_irqsave(&cma->lock, flags); > > spin_lock_irq should be sufficient. This is only called from the > allocation context and that is never called from IRQ context. > I will change this and those below. Thanks for your continued reviews and patience.
diff --git a/mm/cma.c b/mm/cma.c index b2393b892d3b..80875fd4487b 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -24,7 +24,6 @@ #include <linux/memblock.h> #include <linux/err.h> #include <linux/mm.h> -#include <linux/mutex.h> #include <linux/sizes.h> #include <linux/slab.h> #include <linux/log2.h> @@ -83,13 +82,14 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, unsigned int count) { unsigned long bitmap_no, bitmap_count; + unsigned long flags; bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; bitmap_count = cma_bitmap_pages_to_bits(cma, count); - mutex_lock(&cma->lock); + spin_lock_irqsave(&cma->lock, flags); bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); - mutex_unlock(&cma->lock); + spin_unlock_irqrestore(&cma->lock, flags); } static void __init cma_activate_area(struct cma *cma) @@ -118,7 +118,7 @@ static void __init cma_activate_area(struct cma *cma) pfn += pageblock_nr_pages) init_cma_reserved_pageblock(pfn_to_page(pfn)); - mutex_init(&cma->lock); + spin_lock_init(&cma->lock); #ifdef CONFIG_CMA_DEBUGFS INIT_HLIST_HEAD(&cma->mem_head); @@ -391,8 +391,9 @@ static void cma_debug_show_areas(struct cma *cma) unsigned long start = 0; unsigned long nr_part, nr_total = 0; unsigned long nbits = cma_bitmap_maxno(cma); + unsigned long flags; - mutex_lock(&cma->lock); + spin_lock_irqsave(&cma->lock, flags); pr_info("number of available pages: "); for (;;) { next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); @@ -407,7 +408,7 @@ static void cma_debug_show_areas(struct cma *cma) start = next_zero_bit + nr_zero; } pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); - mutex_unlock(&cma->lock); + spin_unlock_irqrestore(&cma->lock, flags); } #else static inline void cma_debug_show_areas(struct cma *cma) { } @@ -430,6 +431,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, unsigned long pfn = -1; unsigned long start = 0; unsigned long bitmap_maxno, bitmap_no, bitmap_count; + unsigned long flags; size_t i; struct page *page = NULL; int ret = -ENOMEM; @@ -454,12 +456,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, goto out; for (;;) { - mutex_lock(&cma->lock); + spin_lock_irqsave(&cma->lock, flags); bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, bitmap_maxno, start, bitmap_count, mask, offset); if (bitmap_no >= bitmap_maxno) { - mutex_unlock(&cma->lock); + spin_unlock_irqrestore(&cma->lock, flags); break; } bitmap_set(cma->bitmap, bitmap_no, bitmap_count); @@ -468,7 +470,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, * our exclusive use. If the migration fails we will take the * lock again and unmark it. */ - mutex_unlock(&cma->lock); + spin_unlock_irqrestore(&cma->lock, flags); pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, diff --git a/mm/cma.h b/mm/cma.h index 68ffad4e430d..2c775877eae2 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -15,7 +15,7 @@ struct cma { unsigned long count; unsigned long *bitmap; unsigned int order_per_bit; /* Order of pages represented by one bit */ - struct mutex lock; + spinlock_t lock; #ifdef CONFIG_CMA_DEBUGFS struct hlist_head mem_head; spinlock_t mem_head_lock; diff --git a/mm/cma_debug.c b/mm/cma_debug.c index d5bf8aa34fdc..6379cfbfd568 100644 --- a/mm/cma_debug.c +++ b/mm/cma_debug.c @@ -35,11 +35,12 @@ static int cma_used_get(void *data, u64 *val) { struct cma *cma = data; unsigned long used; + unsigned long flags; - mutex_lock(&cma->lock); + spin_lock_irqsave(&cma->lock, flags); /* pages counter is smaller than sizeof(int) */ used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma)); - mutex_unlock(&cma->lock); + spin_unlock_irqrestore(&cma->lock, flags); *val = (u64)used << cma->order_per_bit; return 0; @@ -52,8 +53,9 @@ static int cma_maxchunk_get(void *data, u64 *val) unsigned long maxchunk = 0; unsigned long start, end = 0; unsigned long bitmap_maxno = cma_bitmap_maxno(cma); + unsigned long flags; - mutex_lock(&cma->lock); + spin_lock_irqsave(&cma->lock, flags); for (;;) { start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end); if (start >= bitmap_maxno) @@ -61,7 +63,7 @@ static int cma_maxchunk_get(void *data, u64 *val) end = find_next_bit(cma->bitmap, bitmap_maxno, start); maxchunk = max(end - start, maxchunk); } - mutex_unlock(&cma->lock); + spin_unlock_irqrestore(&cma->lock, flags); *val = (u64)maxchunk << cma->order_per_bit; return 0;
Ideally, cma_release could be called from any context. However, that is not possible because a mutex is used to protect the per-area bitmap. Change the bitmap to an irq safe spinlock. Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> --- mm/cma.c | 20 +++++++++++--------- mm/cma.h | 2 +- mm/cma_debug.c | 10 ++++++---- 3 files changed, 18 insertions(+), 14 deletions(-)