@@ -123,6 +123,11 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
&& !nomap) {
/* Need adjust the alignment to satisfy the CMA requirement */
align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES);
+ } else if (IS_ENABLED(CONFIG_CMA)
+ && of_flat_dt_is_compatible(node, "shared-dmb-pool")) {
+ /* Need adjust the alignment to satisfy CMA/DMB requirements */
+ align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES);
+ align = max_t(phys_addr_t, align, DMB_MIN_ALIGNMENT_BYTES);
}
prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
@@ -50,6 +50,7 @@
#include <linux/sizes.h>
#include <linux/dma-map-ops.h>
#include <linux/cma.h>
+#include <linux/dmb.h>
#ifdef CONFIG_CMA_SIZE_MBYTES
#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
@@ -397,10 +398,11 @@ static const struct reserved_mem_ops rmem_cma_ops = {
.device_release = rmem_cma_device_release,
};
-static int __init rmem_cma_setup(struct reserved_mem *rmem)
+static int __init _rmem_cma_setup(struct reserved_mem *rmem, bool in_dmb)
{
unsigned long node = rmem->fdt_node;
bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
+ phys_addr_t align = CMA_MIN_ALIGNMENT_BYTES;
struct cma *cma;
int err;
@@ -414,16 +416,25 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
of_get_flat_dt_prop(node, "no-map", NULL))
return -EINVAL;
- if (!IS_ALIGNED(rmem->base | rmem->size, CMA_MIN_ALIGNMENT_BYTES)) {
+ if (in_dmb) {
+ if (default_cma) {
+ pr_err("Reserved memory: cma-default cannot be DMB\n");
+ return -EINVAL;
+ }
+ align = max_t(phys_addr_t, align, DMB_MIN_ALIGNMENT_BYTES);
+ }
+ if (!IS_ALIGNED(rmem->base | rmem->size, align)) {
pr_err("Reserved memory: incorrect alignment of CMA region\n");
return -EINVAL;
}
- err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
+ err = __cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name,
+ &cma, in_dmb);
if (err) {
pr_err("Reserved memory: unable to setup CMA region\n");
return err;
}
+
/* Architecture specific contiguous memory fixup. */
dma_contiguous_early_fixup(rmem->base, rmem->size);
@@ -433,10 +444,22 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
rmem->ops = &rmem_cma_ops;
rmem->priv = cma;
- pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
- &rmem->base, (unsigned long)rmem->size / SZ_1M);
+ pr_info("Reserved memory: created %s memory pool at %pa, size %ld MiB\n",
+ in_dmb ? "DMB" : "CMA", &rmem->base,
+ (unsigned long)rmem->size / SZ_1M);
return 0;
}
+
+static int __init rmem_cma_setup(struct reserved_mem *rmem)
+{
+ return _rmem_cma_setup(rmem, false);
+}
RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
+
+static int __init rmem_cma_in_dmb_setup(struct reserved_mem *rmem)
+{
+ return _rmem_cma_setup(rmem, true);
+}
+RESERVEDMEM_OF_DECLARE(cma_in_dmb, "shared-dmb-pool", rmem_cma_in_dmb_setup);
#endif
A 'shared-dmb-pool' reserved-memory device tree node defines a Designated Movable Block for use by an associated Contiguous Memory Allocator. Devices access the CMA region in the same manner as a 'shared- dma-pool', but the kernel page allocator is free to use the memory like any other ZONE_MOVABLE memory. Signed-off-by: Doug Berger <opendmb@gmail.com> --- drivers/of/of_reserved_mem.c | 5 +++++ kernel/dma/contiguous.c | 33 ++++++++++++++++++++++++++++----- 2 files changed, 33 insertions(+), 5 deletions(-)