diff mbox series

mm/cma: export total and free number of pages for CMA areas

Message ID 20240709163053.2514760-1-fvdl@google.com (mailing list archive)
State New
Headers show
Series mm/cma: export total and free number of pages for CMA areas | expand

Commit Message

Frank van der Linden July 9, 2024, 4:30 p.m. UTC
In addition to the number of allocations and releases, system
management software may like to be aware of the size of CMA
areas, and how many pages are available in it. This information
is currently not available, so export it in total_page and
available_pages, respectively.

The name 'available_pages' was picked over 'free_pages' because
'free' implies that the pages are unused. But they might not
be, they just haven't been used by cma_alloc

The number of available pages is tracked regardless of
CONFIG_CMA_SYSFS, allowing for a few minor shortcuts in
the code, avoiding bitmap operations.

Signed-off-by: Frank van der Linden <fvdl@google.com>
---
 Documentation/ABI/testing/sysfs-kernel-mm-cma | 13 ++++++++++++
 mm/cma.c                                      | 18 +++++++++++------
 mm/cma.h                                      |  1 +
 mm/cma_debug.c                                |  5 +----
 mm/cma_sysfs.c                                | 20 +++++++++++++++++++
 5 files changed, 47 insertions(+), 10 deletions(-)

Comments

Frank van der Linden July 22, 2024, 4:24 p.m. UTC | #1
On Tue, Jul 9, 2024 at 9:31 AM Frank van der Linden <fvdl@google.com> wrote:
>
> In addition to the number of allocations and releases, system
> management software may like to be aware of the size of CMA
> areas, and how many pages are available in it. This information
> is currently not available, so export it in total_page and
> available_pages, respectively.
>
> The name 'available_pages' was picked over 'free_pages' because
> 'free' implies that the pages are unused. But they might not
> be, they just haven't been used by cma_alloc
>
> The number of available pages is tracked regardless of
> CONFIG_CMA_SYSFS, allowing for a few minor shortcuts in
> the code, avoiding bitmap operations.
>
> Signed-off-by: Frank van der Linden <fvdl@google.com>
> ---
>  Documentation/ABI/testing/sysfs-kernel-mm-cma | 13 ++++++++++++
>  mm/cma.c                                      | 18 +++++++++++------
>  mm/cma.h                                      |  1 +
>  mm/cma_debug.c                                |  5 +----
>  mm/cma_sysfs.c                                | 20 +++++++++++++++++++
>  5 files changed, 47 insertions(+), 10 deletions(-)
>
> diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-cma b/Documentation/ABI/testing/sysfs-kernel-mm-cma
> index dfd755201142..aaf2a5d8b13b 100644
> --- a/Documentation/ABI/testing/sysfs-kernel-mm-cma
> +++ b/Documentation/ABI/testing/sysfs-kernel-mm-cma
> @@ -29,3 +29,16 @@ Date:                Feb 2024
>  Contact:       Anshuman Khandual <anshuman.khandual@arm.com>
>  Description:
>                 the number of pages CMA API succeeded to release
> +
> +What:          /sys/kernel/mm/cma/<cma-heap-name>/total_pages
> +Date:          Jun 2024
> +Contact:       Frank van der Linden <fvdl@google.com>
> +Description:
> +               The size of the CMA area in pages.
> +
> +What:          /sys/kernel/mm/cma/<cma-heap-name>/available_pages
> +Date:          Jun 2024
> +Contact:       Frank van der Linden <fvdl@google.com>
> +Description:
> +               The number of pages in the CMA area that are still
> +               available for CMA allocation.
> diff --git a/mm/cma.c b/mm/cma.c
> index 3e9724716bad..727e8c04d53a 100644
> --- a/mm/cma.c
> +++ b/mm/cma.c
> @@ -86,6 +86,7 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
>
>         spin_lock_irqsave(&cma->lock, flags);
>         bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
> +       cma->available_count += count;
>         spin_unlock_irqrestore(&cma->lock, flags);
>  }
>
> @@ -133,7 +134,7 @@ static void __init cma_activate_area(struct cma *cma)
>                         free_reserved_page(pfn_to_page(pfn));
>         }
>         totalcma_pages -= cma->count;
> -       cma->count = 0;
> +       cma->available_count = cma->count = 0;
>         pr_err("CMA area %s could not be activated\n", cma->name);
>         return;
>  }
> @@ -198,7 +199,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
>                 snprintf(cma->name, CMA_MAX_NAME,  "cma%d\n", cma_area_count);
>
>         cma->base_pfn = PFN_DOWN(base);
> -       cma->count = size >> PAGE_SHIFT;
> +       cma->available_count = cma->count = size >> PAGE_SHIFT;
>         cma->order_per_bit = order_per_bit;
>         *res_cma = cma;
>         cma_area_count++;
> @@ -382,7 +383,7 @@ static void cma_debug_show_areas(struct cma *cma)
>  {
>         unsigned long next_zero_bit, next_set_bit, nr_zero;
>         unsigned long start = 0;
> -       unsigned long nr_part, nr_total = 0;
> +       unsigned long nr_part;
>         unsigned long nbits = cma_bitmap_maxno(cma);
>
>         spin_lock_irq(&cma->lock);
> @@ -394,12 +395,12 @@ static void cma_debug_show_areas(struct cma *cma)
>                 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
>                 nr_zero = next_set_bit - next_zero_bit;
>                 nr_part = nr_zero << cma->order_per_bit;
> -               pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
> +               pr_cont("%s%lu@%lu", start ? "+" : "", nr_part,
>                         next_zero_bit);
> -               nr_total += nr_part;
>                 start = next_zero_bit + nr_zero;
>         }
> -       pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
> +       pr_cont("=> %lu free of %lu total pages\n", cma->available_count,
> +                       cma->count);
>         spin_unlock_irq(&cma->lock);
>  }
>
> @@ -446,6 +447,10 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
>
>         for (;;) {
>                 spin_lock_irq(&cma->lock);
> +               if (count > cma->available_count) {
> +                       spin_unlock_irq(&cma->lock);
> +                       break;
> +               }
>                 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
>                                 bitmap_maxno, start, bitmap_count, mask,
>                                 offset);
> @@ -454,6 +459,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
>                         break;
>                 }
>                 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
> +               cma->available_count -= count;
>                 /*
>                  * It's safe to drop the lock here. We've marked this region for
>                  * our exclusive use. If the migration fails we will take the
> diff --git a/mm/cma.h b/mm/cma.h
> index ad61cc6dd439..d111f3d51868 100644
> --- a/mm/cma.h
> +++ b/mm/cma.h
> @@ -13,6 +13,7 @@ struct cma_kobject {
>  struct cma {
>         unsigned long   base_pfn;
>         unsigned long   count;
> +       unsigned long   available_count;
>         unsigned long   *bitmap;
>         unsigned int order_per_bit; /* Order of pages represented by one bit */
>         spinlock_t      lock;
> diff --git a/mm/cma_debug.c b/mm/cma_debug.c
> index 602fff89b15f..89236f22230a 100644
> --- a/mm/cma_debug.c
> +++ b/mm/cma_debug.c
> @@ -34,13 +34,10 @@ DEFINE_DEBUGFS_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
>  static int cma_used_get(void *data, u64 *val)
>  {
>         struct cma *cma = data;
> -       unsigned long used;
>
>         spin_lock_irq(&cma->lock);
> -       /* pages counter is smaller than sizeof(int) */
> -       used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
> +       *val = cma->count - cma->available_count;
>         spin_unlock_irq(&cma->lock);
> -       *val = (u64)used << cma->order_per_bit;
>
>         return 0;
>  }
> diff --git a/mm/cma_sysfs.c b/mm/cma_sysfs.c
> index f50db3973171..97acd3e5a6a5 100644
> --- a/mm/cma_sysfs.c
> +++ b/mm/cma_sysfs.c
> @@ -62,6 +62,24 @@ static ssize_t release_pages_success_show(struct kobject *kobj,
>  }
>  CMA_ATTR_RO(release_pages_success);
>
> +static ssize_t total_pages_show(struct kobject *kobj,
> +                                         struct kobj_attribute *attr, char *buf)
> +{
> +       struct cma *cma = cma_from_kobj(kobj);
> +
> +       return sysfs_emit(buf, "%lu\n", cma->count);
> +}
> +CMA_ATTR_RO(total_pages);
> +
> +static ssize_t available_pages_show(struct kobject *kobj,
> +                                         struct kobj_attribute *attr, char *buf)
> +{
> +       struct cma *cma = cma_from_kobj(kobj);
> +
> +       return sysfs_emit(buf, "%lu\n", cma->available_count);
> +}
> +CMA_ATTR_RO(available_pages);
> +
>  static void cma_kobj_release(struct kobject *kobj)
>  {
>         struct cma *cma = cma_from_kobj(kobj);
> @@ -75,6 +93,8 @@ static struct attribute *cma_attrs[] = {
>         &alloc_pages_success_attr.attr,
>         &alloc_pages_fail_attr.attr,
>         &release_pages_success_attr.attr,
> +       &total_pages_attr.attr,
> +       &available_pages_attr.attr,
>         NULL,
>  };
>  ATTRIBUTE_GROUPS(cma);
> --
> 2.45.2.803.g4e1b14247a-goog
>

I realize that this isn't the most exciting patch, but.. any comments? :)

- Frank
Andrew Morton July 24, 2024, 7:48 p.m. UTC | #2
On Tue,  9 Jul 2024 16:30:53 +0000 Frank van der Linden <fvdl@google.com> wrote:

> In addition to the number of allocations and releases, system
> management software may like to be aware of the size of CMA
> areas, and how many pages are available in it. This information
> is currently not available, so export it in total_page and
> available_pages, respectively.
> 
> The name 'available_pages' was picked over 'free_pages' because
> 'free' implies that the pages are unused. But they might not
> be, they just haven't been used by cma_alloc
> 
> The number of available pages is tracked regardless of
> CONFIG_CMA_SYSFS, allowing for a few minor shortcuts in
> the code, avoiding bitmap operations.
> 
> ...
>
> @@ -382,7 +383,7 @@ static void cma_debug_show_areas(struct cma *cma)
>  {
>  	unsigned long next_zero_bit, next_set_bit, nr_zero;
>  	unsigned long start = 0;
> -	unsigned long nr_part, nr_total = 0;
> +	unsigned long nr_part;
>  	unsigned long nbits = cma_bitmap_maxno(cma);
>  
>  	spin_lock_irq(&cma->lock);
> @@ -394,12 +395,12 @@ static void cma_debug_show_areas(struct cma *cma)
>  		next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
>  		nr_zero = next_set_bit - next_zero_bit;
>  		nr_part = nr_zero << cma->order_per_bit;
> -		pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
> +		pr_cont("%s%lu@%lu", start ? "+" : "", nr_part,
>  			next_zero_bit);
> -		nr_total += nr_part;
>  		start = next_zero_bit + nr_zero;
>  	}

Can you please explain the above change?

> -	pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
> +	pr_cont("=> %lu free of %lu total pages\n", cma->available_count,
> +			cma->count);
>  	spin_unlock_irq(&cma->lock);
>  }
>  
> @@ -446,6 +447,10 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
>  
>  	for (;;) {
>  		spin_lock_irq(&cma->lock);
> +		if (count > cma->available_count) {

Right here would be a nice place for a comment?

> +			spin_unlock_irq(&cma->lock);
> +			break;
> +		}
>  		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
>  				bitmap_maxno, start, bitmap_count, mask,
>  				offset);
Andrew Morton July 24, 2024, 7:50 p.m. UTC | #3
On Mon, 22 Jul 2024 09:24:58 -0700 Frank van der Linden <fvdl@google.com> wrote:

> I realize that this isn't the most exciting patch, but.. any comments? :)

It arrived quite late in the -rc cycle.

I suggest a resend, and add some Cc:s for likely reviewers.
Frank van der Linden July 25, 2024, 4:18 p.m. UTC | #4
On Wed, Jul 24, 2024 at 12:49 PM Andrew Morton
<akpm@linux-foundation.org> wrote:
>
> On Tue,  9 Jul 2024 16:30:53 +0000 Frank van der Linden <fvdl@google.com> wrote:
>
> > In addition to the number of allocations and releases, system
> > management software may like to be aware of the size of CMA
> > areas, and how many pages are available in it. This information
> > is currently not available, so export it in total_page and
> > available_pages, respectively.
> >
> > The name 'available_pages' was picked over 'free_pages' because
> > 'free' implies that the pages are unused. But they might not
> > be, they just haven't been used by cma_alloc
> >
> > The number of available pages is tracked regardless of
> > CONFIG_CMA_SYSFS, allowing for a few minor shortcuts in
> > the code, avoiding bitmap operations.
> >
> > ...
> >
> > @@ -382,7 +383,7 @@ static void cma_debug_show_areas(struct cma *cma)
> >  {
> >       unsigned long next_zero_bit, next_set_bit, nr_zero;
> >       unsigned long start = 0;
> > -     unsigned long nr_part, nr_total = 0;
> > +     unsigned long nr_part;
> >       unsigned long nbits = cma_bitmap_maxno(cma);
> >
> >       spin_lock_irq(&cma->lock);
> > @@ -394,12 +395,12 @@ static void cma_debug_show_areas(struct cma *cma)
> >               next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
> >               nr_zero = next_set_bit - next_zero_bit;
> >               nr_part = nr_zero << cma->order_per_bit;
> > -             pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
> > +             pr_cont("%s%lu@%lu", start ? "+" : "", nr_part,
> >                       next_zero_bit);
> > -             nr_total += nr_part;
> >               start = next_zero_bit + nr_zero;
> >       }
>
> Can you please explain the above change?

Sure - there's no need anymore to keep a count of available pages for
the debug output, since that's already tracked in the available_count
field. So, 'nr_total' can be removed. But you still need to check for
this being the first instance of the loop, so use 'start' for that
instead.
>
> > -     pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
> > +     pr_cont("=> %lu free of %lu total pages\n", cma->available_count,
> > +                     cma->count);
> >       spin_unlock_irq(&cma->lock);
> >  }
> >
> > @@ -446,6 +447,10 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
> >
> >       for (;;) {
> >               spin_lock_irq(&cma->lock);
> > +             if (count > cma->available_count) {
>
> Right here would be a nice place for a comment?
>
Yes, I'll add one.

> > +                     spin_unlock_irq(&cma->lock);
> > +                     break;
> > +             }
> >               bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
> >                               bitmap_maxno, start, bitmap_count, mask,
> >                               offset);
>

I'll re-send a v2 with some Cc: lines added.

Thanks!

- Frank
diff mbox series

Patch

diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-cma b/Documentation/ABI/testing/sysfs-kernel-mm-cma
index dfd755201142..aaf2a5d8b13b 100644
--- a/Documentation/ABI/testing/sysfs-kernel-mm-cma
+++ b/Documentation/ABI/testing/sysfs-kernel-mm-cma
@@ -29,3 +29,16 @@  Date:		Feb 2024
 Contact:	Anshuman Khandual <anshuman.khandual@arm.com>
 Description:
 		the number of pages CMA API succeeded to release
+
+What:		/sys/kernel/mm/cma/<cma-heap-name>/total_pages
+Date:		Jun 2024
+Contact:	Frank van der Linden <fvdl@google.com>
+Description:
+		The size of the CMA area in pages.
+
+What:		/sys/kernel/mm/cma/<cma-heap-name>/available_pages
+Date:		Jun 2024
+Contact:	Frank van der Linden <fvdl@google.com>
+Description:
+		The number of pages in the CMA area that are still
+		available for CMA allocation.
diff --git a/mm/cma.c b/mm/cma.c
index 3e9724716bad..727e8c04d53a 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -86,6 +86,7 @@  static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
 
 	spin_lock_irqsave(&cma->lock, flags);
 	bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
+	cma->available_count += count;
 	spin_unlock_irqrestore(&cma->lock, flags);
 }
 
@@ -133,7 +134,7 @@  static void __init cma_activate_area(struct cma *cma)
 			free_reserved_page(pfn_to_page(pfn));
 	}
 	totalcma_pages -= cma->count;
-	cma->count = 0;
+	cma->available_count = cma->count = 0;
 	pr_err("CMA area %s could not be activated\n", cma->name);
 	return;
 }
@@ -198,7 +199,7 @@  int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
 		snprintf(cma->name, CMA_MAX_NAME,  "cma%d\n", cma_area_count);
 
 	cma->base_pfn = PFN_DOWN(base);
-	cma->count = size >> PAGE_SHIFT;
+	cma->available_count = cma->count = size >> PAGE_SHIFT;
 	cma->order_per_bit = order_per_bit;
 	*res_cma = cma;
 	cma_area_count++;
@@ -382,7 +383,7 @@  static void cma_debug_show_areas(struct cma *cma)
 {
 	unsigned long next_zero_bit, next_set_bit, nr_zero;
 	unsigned long start = 0;
-	unsigned long nr_part, nr_total = 0;
+	unsigned long nr_part;
 	unsigned long nbits = cma_bitmap_maxno(cma);
 
 	spin_lock_irq(&cma->lock);
@@ -394,12 +395,12 @@  static void cma_debug_show_areas(struct cma *cma)
 		next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
 		nr_zero = next_set_bit - next_zero_bit;
 		nr_part = nr_zero << cma->order_per_bit;
-		pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
+		pr_cont("%s%lu@%lu", start ? "+" : "", nr_part,
 			next_zero_bit);
-		nr_total += nr_part;
 		start = next_zero_bit + nr_zero;
 	}
-	pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
+	pr_cont("=> %lu free of %lu total pages\n", cma->available_count,
+			cma->count);
 	spin_unlock_irq(&cma->lock);
 }
 
@@ -446,6 +447,10 @@  struct page *cma_alloc(struct cma *cma, unsigned long count,
 
 	for (;;) {
 		spin_lock_irq(&cma->lock);
+		if (count > cma->available_count) {
+			spin_unlock_irq(&cma->lock);
+			break;
+		}
 		bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
 				bitmap_maxno, start, bitmap_count, mask,
 				offset);
@@ -454,6 +459,7 @@  struct page *cma_alloc(struct cma *cma, unsigned long count,
 			break;
 		}
 		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
+		cma->available_count -= count;
 		/*
 		 * It's safe to drop the lock here. We've marked this region for
 		 * our exclusive use. If the migration fails we will take the
diff --git a/mm/cma.h b/mm/cma.h
index ad61cc6dd439..d111f3d51868 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -13,6 +13,7 @@  struct cma_kobject {
 struct cma {
 	unsigned long   base_pfn;
 	unsigned long   count;
+	unsigned long	available_count;
 	unsigned long   *bitmap;
 	unsigned int order_per_bit; /* Order of pages represented by one bit */
 	spinlock_t	lock;
diff --git a/mm/cma_debug.c b/mm/cma_debug.c
index 602fff89b15f..89236f22230a 100644
--- a/mm/cma_debug.c
+++ b/mm/cma_debug.c
@@ -34,13 +34,10 @@  DEFINE_DEBUGFS_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
 static int cma_used_get(void *data, u64 *val)
 {
 	struct cma *cma = data;
-	unsigned long used;
 
 	spin_lock_irq(&cma->lock);
-	/* pages counter is smaller than sizeof(int) */
-	used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
+	*val = cma->count - cma->available_count;
 	spin_unlock_irq(&cma->lock);
-	*val = (u64)used << cma->order_per_bit;
 
 	return 0;
 }
diff --git a/mm/cma_sysfs.c b/mm/cma_sysfs.c
index f50db3973171..97acd3e5a6a5 100644
--- a/mm/cma_sysfs.c
+++ b/mm/cma_sysfs.c
@@ -62,6 +62,24 @@  static ssize_t release_pages_success_show(struct kobject *kobj,
 }
 CMA_ATTR_RO(release_pages_success);
 
+static ssize_t total_pages_show(struct kobject *kobj,
+					  struct kobj_attribute *attr, char *buf)
+{
+	struct cma *cma = cma_from_kobj(kobj);
+
+	return sysfs_emit(buf, "%lu\n", cma->count);
+}
+CMA_ATTR_RO(total_pages);
+
+static ssize_t available_pages_show(struct kobject *kobj,
+					  struct kobj_attribute *attr, char *buf)
+{
+	struct cma *cma = cma_from_kobj(kobj);
+
+	return sysfs_emit(buf, "%lu\n", cma->available_count);
+}
+CMA_ATTR_RO(available_pages);
+
 static void cma_kobj_release(struct kobject *kobj)
 {
 	struct cma *cma = cma_from_kobj(kobj);
@@ -75,6 +93,8 @@  static struct attribute *cma_attrs[] = {
 	&alloc_pages_success_attr.attr,
 	&alloc_pages_fail_attr.attr,
 	&release_pages_success_attr.attr,
+	&total_pages_attr.attr,
+	&available_pages_attr.attr,
 	NULL,
 };
 ATTRIBUTE_GROUPS(cma);