diff mbox series

cma: fail if fixed declaration can't be honored

Message ID 1561422051-16142-1-git-send-email-opendmb@gmail.com (mailing list archive)
State New, archived
Headers show
Series cma: fail if fixed declaration can't be honored | expand

Commit Message

Doug Berger June 25, 2019, 12:20 a.m. UTC
The description of the cma_declare_contiguous() function indicates
that if the 'fixed' argument is true the reserved contiguous area
must be exactly at the address of the 'base' argument.

However, the function currently allows the 'base', 'size', and
'limit' arguments to be silently adjusted to meet alignment
constraints. This commit enforces the documented behavior through
explicit checks that return an error if the region does not fit
within a specified region.

Fixes: 5ea3b1b2f8ad ("cma: add placement specifier for "cma=" kernel parameter")
Signed-off-by: Doug Berger <opendmb@gmail.com>
---
 mm/cma.c | 13 +++++++++++++
 1 file changed, 13 insertions(+)

Comments

MichaƂ Nazarewicz June 25, 2019, 9:24 a.m. UTC | #1
On Tue, 25 Jun 2019 at 01:22, Doug Berger <opendmb@gmail.com> wrote:

> The description of the cma_declare_contiguous() function indicates
> that if the 'fixed' argument is true the reserved contiguous area
> must be exactly at the address of the 'base' argument.
>
> However, the function currently allows the 'base', 'size', and
> 'limit' arguments to be silently adjusted to meet alignment
> constraints. This commit enforces the documented behavior through
> explicit checks that return an error if the region does not fit
> within a specified region.
>
> Fixes: 5ea3b1b2f8ad ("cma: add placement specifier for "cma=" kernel
> parameter")
> Signed-off-by: Doug Berger <opendmb@gmail.com>
>

Acked-by: Michal Nazarewicz <mina86@mina86.com>


> ---
>  mm/cma.c | 13 +++++++++++++
>  1 file changed, 13 insertions(+)
>
> diff --git a/mm/cma.c b/mm/cma.c
> index 3340ef34c154..4973d253dc83 100644
> --- a/mm/cma.c
> +++ b/mm/cma.c
> @@ -278,6 +278,12 @@ int __init cma_declare_contiguous(phys_addr_t base,
>          */
>         alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
>                           max_t(unsigned long, MAX_ORDER - 1,
> pageblock_order));
> +       if (fixed && base & (alignment - 1)) {
> +               ret = -EINVAL;
> +               pr_err("Region at %pa must be aligned to %pa bytes\n",
> +                       &base, &alignment);
> +               goto err;
> +       }
>
        base = ALIGN(base, alignment);
>         size = ALIGN(size, alignment);
>         limit &= ~(alignment - 1);
> @@ -308,6 +314,13 @@ int __init cma_declare_contiguous(phys_addr_t base,
>         if (limit == 0 || limit > memblock_end)
>                 limit = memblock_end;
>
> +       if (base + size > limit) {
> +               ret = -EINVAL;
> +               pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
> +                       &size, &base, &limit);
> +               goto err;
> +       }
> +
>         /* Reserve memory */
>         if (fixed) {
>                 if (memblock_is_region_reserved(base, size) ||
> --
> 2.7.4
>
>
Andrew Morton June 25, 2019, 8:23 p.m. UTC | #2
On Mon, 24 Jun 2019 17:20:51 -0700 Doug Berger <opendmb@gmail.com> wrote:

> The description of the cma_declare_contiguous() function indicates
> that if the 'fixed' argument is true the reserved contiguous area
> must be exactly at the address of the 'base' argument.
> 
> However, the function currently allows the 'base', 'size', and
> 'limit' arguments to be silently adjusted to meet alignment
> constraints. This commit enforces the documented behavior through
> explicit checks that return an error if the region does not fit
> within a specified region.
> 
> ...
>
> --- a/mm/cma.c
> +++ b/mm/cma.c
> @@ -278,6 +278,12 @@ int __init cma_declare_contiguous(phys_addr_t base,
>  	 */
>  	alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
>  			  max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
> +	if (fixed && base & (alignment - 1)) {
> +		ret = -EINVAL;
> +		pr_err("Region at %pa must be aligned to %pa bytes\n",
> +			&base, &alignment);

CMA functions do like to use pr_err() when the caller messed something
up.  It should be using WARN_ON() or WARN_ON_ONCE(), mainly so we get a
backtrace to find out which caller messed up.

There are probably other sites which should be converted, but I think
it would be best to get these new ones correct.  So something like

	if (WARN_ONCE(fixed && base & (alignment - 1)),
		      "region at %pa must be aligned to %pa bytes",
		      &base, &alignment) {
		ret = -EINVAL;
		goto err;
	}
diff mbox series

Patch

diff --git a/mm/cma.c b/mm/cma.c
index 3340ef34c154..4973d253dc83 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -278,6 +278,12 @@  int __init cma_declare_contiguous(phys_addr_t base,
 	 */
 	alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
 			  max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
+	if (fixed && base & (alignment - 1)) {
+		ret = -EINVAL;
+		pr_err("Region at %pa must be aligned to %pa bytes\n",
+			&base, &alignment);
+		goto err;
+	}
 	base = ALIGN(base, alignment);
 	size = ALIGN(size, alignment);
 	limit &= ~(alignment - 1);
@@ -308,6 +314,13 @@  int __init cma_declare_contiguous(phys_addr_t base,
 	if (limit == 0 || limit > memblock_end)
 		limit = memblock_end;
 
+	if (base + size > limit) {
+		ret = -EINVAL;
+		pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
+			&size, &base, &limit);
+		goto err;
+	}
+
 	/* Reserve memory */
 	if (fixed) {
 		if (memblock_is_region_reserved(base, size) ||