Message ID | 1402543307-29800-6-git-send-email-iamjoonsoo.kim@lge.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Thu, Jun 12, 2014 at 12:21:42PM +0900, Joonsoo Kim wrote: > ppc kvm's cma region management requires arbitrary bitmap granularity, > since they want to reserve very large memory and manage this region > with bitmap that one bit for several pages to reduce management overheads. > So support arbitrary bitmap granularity for following generalization. > > Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> > > diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c > index bc4c171..9bc9340 100644 > --- a/drivers/base/dma-contiguous.c > +++ b/drivers/base/dma-contiguous.c > @@ -38,6 +38,7 @@ struct cma { > unsigned long base_pfn; > unsigned long count; > unsigned long *bitmap; > + int order_per_bit; /* Order of pages represented by one bit */ Hmm, I'm not sure it's good as *general* interface even though it covers existing usecases. It forces a cma area should be handled by same size unit. Right? It's really important point for this patchset's motivation so I will stop review and wait other opinions. > struct mutex lock; > }; > > @@ -157,9 +158,38 @@ void __init dma_contiguous_reserve(phys_addr_t limit) > > static DEFINE_MUTEX(cma_mutex); > > +static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) > +{ > + return (1 << (align_order >> cma->order_per_bit)) - 1; > +} > + > +static unsigned long cma_bitmap_maxno(struct cma *cma) > +{ > + return cma->count >> cma->order_per_bit; > +} > + > +static unsigned long cma_bitmap_pages_to_bits(struct cma *cma, > + unsigned long pages) > +{ > + return ALIGN(pages, 1 << cma->order_per_bit) >> cma->order_per_bit; > +} > + > +static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) > +{ > + unsigned long bitmapno, nr_bits; > + > + bitmapno = (pfn - cma->base_pfn) >> cma->order_per_bit; > + nr_bits = cma_bitmap_pages_to_bits(cma, count); > + > + mutex_lock(&cma->lock); > + bitmap_clear(cma->bitmap, bitmapno, nr_bits); > + mutex_unlock(&cma->lock); > +} > + > static int __init cma_activate_area(struct cma *cma) > { > - int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long); > + int bitmap_maxno = cma_bitmap_maxno(cma); > + int bitmap_size = BITS_TO_LONGS(bitmap_maxno) * sizeof(long); > unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; > unsigned i = cma->count >> pageblock_order; > struct zone *zone; > @@ -221,6 +251,7 @@ core_initcall(cma_init_reserved_areas); > * @base: Base address of the reserved area optional, use 0 for any > * @limit: End address of the reserved memory (optional, 0 for any). > * @alignment: Alignment for the contiguous memory area, should be power of 2 > + * @order_per_bit: Order of pages represented by one bit on bitmap. > * @res_cma: Pointer to store the created cma region. > * @fixed: hint about where to place the reserved area > * > @@ -235,7 +266,7 @@ core_initcall(cma_init_reserved_areas); > */ > static int __init __dma_contiguous_reserve_area(phys_addr_t size, > phys_addr_t base, phys_addr_t limit, > - phys_addr_t alignment, > + phys_addr_t alignment, int order_per_bit, > struct cma **res_cma, bool fixed) > { > struct cma *cma = &cma_areas[cma_area_count]; > @@ -269,6 +300,8 @@ static int __init __dma_contiguous_reserve_area(phys_addr_t size, > base = ALIGN(base, alignment); > size = ALIGN(size, alignment); > limit &= ~(alignment - 1); > + /* size should be aligned with order_per_bit */ > + BUG_ON(!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)); > > /* Reserve memory */ > if (base && fixed) { > @@ -294,6 +327,7 @@ static int __init __dma_contiguous_reserve_area(phys_addr_t size, > */ > cma->base_pfn = PFN_DOWN(base); > cma->count = size >> PAGE_SHIFT; > + cma->order_per_bit = order_per_bit; > *res_cma = cma; > cma_area_count++; > > @@ -313,7 +347,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, > { > int ret; > > - ret = __dma_contiguous_reserve_area(size, base, limit, 0, > + ret = __dma_contiguous_reserve_area(size, base, limit, 0, 0, > res_cma, fixed); > if (ret) > return ret; > @@ -324,13 +358,6 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, > return 0; > } > > -static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) > -{ > - mutex_lock(&cma->lock); > - bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); > - mutex_unlock(&cma->lock); > -} > - > /** > * dma_alloc_from_contiguous() - allocate pages from contiguous area > * @dev: Pointer to device for which the allocation is performed. > @@ -345,7 +372,8 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) > static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, > unsigned int align) > { > - unsigned long mask, pfn, pageno, start = 0; > + unsigned long mask, pfn, start = 0; > + unsigned long bitmap_maxno, bitmapno, nr_bits; > struct page *page = NULL; > int ret; > > @@ -358,18 +386,19 @@ static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, > if (!count) > return NULL; > > - mask = (1 << align) - 1; > - > + mask = cma_bitmap_aligned_mask(cma, align); > + bitmap_maxno = cma_bitmap_maxno(cma); > + nr_bits = cma_bitmap_pages_to_bits(cma, count); > > for (;;) { > mutex_lock(&cma->lock); > - pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, > - start, count, mask); > - if (pageno >= cma->count) { > + bitmapno = bitmap_find_next_zero_area(cma->bitmap, > + bitmap_maxno, start, nr_bits, mask); > + if (bitmapno >= bitmap_maxno) { > mutex_unlock(&cma->lock); > break; > } > - bitmap_set(cma->bitmap, pageno, count); > + bitmap_set(cma->bitmap, bitmapno, nr_bits); > /* > * It's safe to drop the lock here. We've marked this region for > * our exclusive use. If the migration fails we will take the > @@ -377,7 +406,7 @@ static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, > */ > mutex_unlock(&cma->lock); > > - pfn = cma->base_pfn + pageno; > + pfn = cma->base_pfn + (bitmapno << cma->order_per_bit); > mutex_lock(&cma_mutex); > ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); > mutex_unlock(&cma_mutex); > @@ -392,7 +421,7 @@ static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, > pr_debug("%s(): memory range at %p is busy, retrying\n", > __func__, pfn_to_page(pfn)); > /* try again with a bit different memory target */ > - start = pageno + mask + 1; > + start = bitmapno + mask + 1; > } > > pr_debug("%s(): returned %p\n", __func__, page); > -- > 1.7.9.5
On Thu, Jun 12, 2014 at 03:43:55PM +0900, Joonsoo Kim wrote: > On Thu, Jun 12, 2014 at 03:06:10PM +0900, Minchan Kim wrote: > > On Thu, Jun 12, 2014 at 12:21:42PM +0900, Joonsoo Kim wrote: > > > ppc kvm's cma region management requires arbitrary bitmap granularity, > > > since they want to reserve very large memory and manage this region > > > with bitmap that one bit for several pages to reduce management overheads. > > > So support arbitrary bitmap granularity for following generalization. > > > > > > Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> > > > > > > diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c > > > index bc4c171..9bc9340 100644 > > > --- a/drivers/base/dma-contiguous.c > > > +++ b/drivers/base/dma-contiguous.c > > > @@ -38,6 +38,7 @@ struct cma { > > > unsigned long base_pfn; > > > unsigned long count; > > > unsigned long *bitmap; > > > + int order_per_bit; /* Order of pages represented by one bit */ > > > > Hmm, I'm not sure it's good as *general* interface even though it covers > > existing usecases. > > > > It forces a cma area should be handled by same size unit. Right? > > It's really important point for this patchset's motivation so I will stop > > review and wait other opinions. > > If you pass 0 to order_per_bit, you can manage cma area in every > size(page unit) you want. If you pass certain number to order_per_bit, > you can allocate and release cma area in multiple of such page order. > > I think that this is more general implementation than previous versions. Fair enough.
On Thu, Jun 12, 2014 at 03:06:10PM +0900, Minchan Kim wrote: > On Thu, Jun 12, 2014 at 12:21:42PM +0900, Joonsoo Kim wrote: > > ppc kvm's cma region management requires arbitrary bitmap granularity, > > since they want to reserve very large memory and manage this region > > with bitmap that one bit for several pages to reduce management overheads. > > So support arbitrary bitmap granularity for following generalization. > > > > Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> > > > > diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c > > index bc4c171..9bc9340 100644 > > --- a/drivers/base/dma-contiguous.c > > +++ b/drivers/base/dma-contiguous.c > > @@ -38,6 +38,7 @@ struct cma { > > unsigned long base_pfn; > > unsigned long count; > > unsigned long *bitmap; > > + int order_per_bit; /* Order of pages represented by one bit */ > > Hmm, I'm not sure it's good as *general* interface even though it covers > existing usecases. > > It forces a cma area should be handled by same size unit. Right? > It's really important point for this patchset's motivation so I will stop > review and wait other opinions. If you pass 0 to order_per_bit, you can manage cma area in every size(page unit) you want. If you pass certain number to order_per_bit, you can allocate and release cma area in multiple of such page order. I think that this is more general implementation than previous versions. Thanks. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Thu, Jun 12, 2014 at 12:21:42PM +0900, Joonsoo Kim wrote: > ppc kvm's cma region management requires arbitrary bitmap granularity, > since they want to reserve very large memory and manage this region > with bitmap that one bit for several pages to reduce management overheads. > So support arbitrary bitmap granularity for following generalization. > > Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Minchan Kim <minchan@kernel.org> Just a nit below. > > diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c > index bc4c171..9bc9340 100644 > --- a/drivers/base/dma-contiguous.c > +++ b/drivers/base/dma-contiguous.c > @@ -38,6 +38,7 @@ struct cma { > unsigned long base_pfn; > unsigned long count; > unsigned long *bitmap; > + int order_per_bit; /* Order of pages represented by one bit */ > struct mutex lock; > }; > > @@ -157,9 +158,38 @@ void __init dma_contiguous_reserve(phys_addr_t limit) > > static DEFINE_MUTEX(cma_mutex); > > +static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) > +{ > + return (1 << (align_order >> cma->order_per_bit)) - 1; > +} > + > +static unsigned long cma_bitmap_maxno(struct cma *cma) > +{ > + return cma->count >> cma->order_per_bit; > +} > + > +static unsigned long cma_bitmap_pages_to_bits(struct cma *cma, > + unsigned long pages) > +{ > + return ALIGN(pages, 1 << cma->order_per_bit) >> cma->order_per_bit; > +} > + > +static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) > +{ > + unsigned long bitmapno, nr_bits; > + > + bitmapno = (pfn - cma->base_pfn) >> cma->order_per_bit; > + nr_bits = cma_bitmap_pages_to_bits(cma, count); > + > + mutex_lock(&cma->lock); > + bitmap_clear(cma->bitmap, bitmapno, nr_bits); > + mutex_unlock(&cma->lock); > +} > + > static int __init cma_activate_area(struct cma *cma) > { > - int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long); > + int bitmap_maxno = cma_bitmap_maxno(cma); > + int bitmap_size = BITS_TO_LONGS(bitmap_maxno) * sizeof(long); > unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; > unsigned i = cma->count >> pageblock_order; > struct zone *zone; > @@ -221,6 +251,7 @@ core_initcall(cma_init_reserved_areas); > * @base: Base address of the reserved area optional, use 0 for any > * @limit: End address of the reserved memory (optional, 0 for any). > * @alignment: Alignment for the contiguous memory area, should be power of 2 > + * @order_per_bit: Order of pages represented by one bit on bitmap. > * @res_cma: Pointer to store the created cma region. > * @fixed: hint about where to place the reserved area > * > @@ -235,7 +266,7 @@ core_initcall(cma_init_reserved_areas); > */ > static int __init __dma_contiguous_reserve_area(phys_addr_t size, > phys_addr_t base, phys_addr_t limit, > - phys_addr_t alignment, > + phys_addr_t alignment, int order_per_bit, > struct cma **res_cma, bool fixed) > { > struct cma *cma = &cma_areas[cma_area_count]; > @@ -269,6 +300,8 @@ static int __init __dma_contiguous_reserve_area(phys_addr_t size, > base = ALIGN(base, alignment); > size = ALIGN(size, alignment); > limit &= ~(alignment - 1); > + /* size should be aligned with order_per_bit */ > + BUG_ON(!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)); > > /* Reserve memory */ > if (base && fixed) { > @@ -294,6 +327,7 @@ static int __init __dma_contiguous_reserve_area(phys_addr_t size, > */ > cma->base_pfn = PFN_DOWN(base); > cma->count = size >> PAGE_SHIFT; > + cma->order_per_bit = order_per_bit; > *res_cma = cma; > cma_area_count++; > > @@ -313,7 +347,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, > { > int ret; > > - ret = __dma_contiguous_reserve_area(size, base, limit, 0, > + ret = __dma_contiguous_reserve_area(size, base, limit, 0, 0, > res_cma, fixed); > if (ret) > return ret; > @@ -324,13 +358,6 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, > return 0; > } > > -static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) > -{ > - mutex_lock(&cma->lock); > - bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); > - mutex_unlock(&cma->lock); > -} > - > /** > * dma_alloc_from_contiguous() - allocate pages from contiguous area > * @dev: Pointer to device for which the allocation is performed. > @@ -345,7 +372,8 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) > static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, > unsigned int align) > { > - unsigned long mask, pfn, pageno, start = 0; > + unsigned long mask, pfn, start = 0; > + unsigned long bitmap_maxno, bitmapno, nr_bits; Just Nit: bitmap_maxno, bitmap_no or something consistent. I know you love consistent when I read description in first patch in this patchset. ;-) > struct page *page = NULL; > int ret; > > @@ -358,18 +386,19 @@ static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, > if (!count) > return NULL; > > - mask = (1 << align) - 1; > - > + mask = cma_bitmap_aligned_mask(cma, align); > + bitmap_maxno = cma_bitmap_maxno(cma); > + nr_bits = cma_bitmap_pages_to_bits(cma, count); > > for (;;) { > mutex_lock(&cma->lock); > - pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, > - start, count, mask); > - if (pageno >= cma->count) { > + bitmapno = bitmap_find_next_zero_area(cma->bitmap, > + bitmap_maxno, start, nr_bits, mask); > + if (bitmapno >= bitmap_maxno) { > mutex_unlock(&cma->lock); > break; > } > - bitmap_set(cma->bitmap, pageno, count); > + bitmap_set(cma->bitmap, bitmapno, nr_bits); > /* > * It's safe to drop the lock here. We've marked this region for > * our exclusive use. If the migration fails we will take the > @@ -377,7 +406,7 @@ static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, > */ > mutex_unlock(&cma->lock); > > - pfn = cma->base_pfn + pageno; > + pfn = cma->base_pfn + (bitmapno << cma->order_per_bit); > mutex_lock(&cma_mutex); > ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); > mutex_unlock(&cma_mutex); > @@ -392,7 +421,7 @@ static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, > pr_debug("%s(): memory range at %p is busy, retrying\n", > __func__, pfn_to_page(pfn)); > /* try again with a bit different memory target */ > - start = pageno + mask + 1; > + start = bitmapno + mask + 1; > } > > pr_debug("%s(): returned %p\n", __func__, page); > -- > 1.7.9.5
On 06/12/2014 03:08 PM, Minchan Kim wrote: > On Thu, Jun 12, 2014 at 12:21:42PM +0900, Joonsoo Kim wrote: >> ppc kvm's cma region management requires arbitrary bitmap granularity, >> since they want to reserve very large memory and manage this region >> with bitmap that one bit for several pages to reduce management overheads. >> So support arbitrary bitmap granularity for following generalization. >> >> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> > Acked-by: Minchan Kim <minchan@kernel.org> > > Just a nit below. > >> >> diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c >> index bc4c171..9bc9340 100644 >> --- a/drivers/base/dma-contiguous.c >> +++ b/drivers/base/dma-contiguous.c >> @@ -38,6 +38,7 @@ struct cma { >> unsigned long base_pfn; >> unsigned long count; >> unsigned long *bitmap; >> + int order_per_bit; /* Order of pages represented by one bit */ >> struct mutex lock; >> }; >> >> @@ -157,9 +158,38 @@ void __init dma_contiguous_reserve(phys_addr_t limit) >> >> static DEFINE_MUTEX(cma_mutex); >> >> +static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) >> +{ >> + return (1 << (align_order >> cma->order_per_bit)) - 1; >> +} >> + >> +static unsigned long cma_bitmap_maxno(struct cma *cma) >> +{ >> + return cma->count >> cma->order_per_bit; >> +} >> + >> +static unsigned long cma_bitmap_pages_to_bits(struct cma *cma, >> + unsigned long pages) >> +{ >> + return ALIGN(pages, 1 << cma->order_per_bit) >> cma->order_per_bit; >> +} >> + >> +static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) >> +{ >> + unsigned long bitmapno, nr_bits; >> + >> + bitmapno = (pfn - cma->base_pfn) >> cma->order_per_bit; >> + nr_bits = cma_bitmap_pages_to_bits(cma, count); >> + >> + mutex_lock(&cma->lock); >> + bitmap_clear(cma->bitmap, bitmapno, nr_bits); >> + mutex_unlock(&cma->lock); >> +} >> + >> static int __init cma_activate_area(struct cma *cma) >> { >> - int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long); >> + int bitmap_maxno = cma_bitmap_maxno(cma); >> + int bitmap_size = BITS_TO_LONGS(bitmap_maxno) * sizeof(long); >> unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; >> unsigned i = cma->count >> pageblock_order; >> struct zone *zone; >> @@ -221,6 +251,7 @@ core_initcall(cma_init_reserved_areas); >> * @base: Base address of the reserved area optional, use 0 for any >> * @limit: End address of the reserved memory (optional, 0 for any). >> * @alignment: Alignment for the contiguous memory area, should be power of 2 >> + * @order_per_bit: Order of pages represented by one bit on bitmap. >> * @res_cma: Pointer to store the created cma region. >> * @fixed: hint about where to place the reserved area >> * >> @@ -235,7 +266,7 @@ core_initcall(cma_init_reserved_areas); >> */ >> static int __init __dma_contiguous_reserve_area(phys_addr_t size, >> phys_addr_t base, phys_addr_t limit, >> - phys_addr_t alignment, >> + phys_addr_t alignment, int order_per_bit, >> struct cma **res_cma, bool fixed) >> { >> struct cma *cma = &cma_areas[cma_area_count]; >> @@ -269,6 +300,8 @@ static int __init __dma_contiguous_reserve_area(phys_addr_t size, >> base = ALIGN(base, alignment); >> size = ALIGN(size, alignment); >> limit &= ~(alignment - 1); >> + /* size should be aligned with order_per_bit */ >> + BUG_ON(!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)); >> >> /* Reserve memory */ >> if (base && fixed) { >> @@ -294,6 +327,7 @@ static int __init __dma_contiguous_reserve_area(phys_addr_t size, >> */ >> cma->base_pfn = PFN_DOWN(base); >> cma->count = size >> PAGE_SHIFT; >> + cma->order_per_bit = order_per_bit; >> *res_cma = cma; >> cma_area_count++; >> >> @@ -313,7 +347,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, >> { >> int ret; >> >> - ret = __dma_contiguous_reserve_area(size, base, limit, 0, >> + ret = __dma_contiguous_reserve_area(size, base, limit, 0, 0, >> res_cma, fixed); >> if (ret) >> return ret; >> @@ -324,13 +358,6 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, >> return 0; >> } >> >> -static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) >> -{ >> - mutex_lock(&cma->lock); >> - bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); >> - mutex_unlock(&cma->lock); >> -} >> - >> /** >> * dma_alloc_from_contiguous() - allocate pages from contiguous area >> * @dev: Pointer to device for which the allocation is performed. >> @@ -345,7 +372,8 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) >> static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, >> unsigned int align) >> { >> - unsigned long mask, pfn, pageno, start = 0; >> + unsigned long mask, pfn, start = 0; >> + unsigned long bitmap_maxno, bitmapno, nr_bits; > > Just Nit: bitmap_maxno, bitmap_no or something consistent. > I know you love consistent when I read description in first patch > in this patchset. ;-) > Yeah, not only in this patchset, I saw Joonsoo trying to unify all kinds of things in the MM. This is great for newbies, IMO.
On Thu, Jun 12, 2014 at 04:08:11PM +0900, Minchan Kim wrote: > On Thu, Jun 12, 2014 at 12:21:42PM +0900, Joonsoo Kim wrote: > > ppc kvm's cma region management requires arbitrary bitmap granularity, > > since they want to reserve very large memory and manage this region > > with bitmap that one bit for several pages to reduce management overheads. > > So support arbitrary bitmap granularity for following generalization. > > > > Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> > Acked-by: Minchan Kim <minchan@kernel.org> > Thanks. [snip...] > > /** > > * dma_alloc_from_contiguous() - allocate pages from contiguous area > > * @dev: Pointer to device for which the allocation is performed. > > @@ -345,7 +372,8 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) > > static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, > > unsigned int align) > > { > > - unsigned long mask, pfn, pageno, start = 0; > > + unsigned long mask, pfn, start = 0; > > + unsigned long bitmap_maxno, bitmapno, nr_bits; > > Just Nit: bitmap_maxno, bitmap_no or something consistent. > I know you love consistent when I read description in first patch > in this patchset. ;-) Yeah, I will fix it. :) Thanks. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 06/12/2014 11:21 AM, Joonsoo Kim wrote: > ppc kvm's cma region management requires arbitrary bitmap granularity, > since they want to reserve very large memory and manage this region > with bitmap that one bit for several pages to reduce management overheads. > So support arbitrary bitmap granularity for following generalization. > > Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> > > diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c > index bc4c171..9bc9340 100644 > --- a/drivers/base/dma-contiguous.c > +++ b/drivers/base/dma-contiguous.c > @@ -38,6 +38,7 @@ struct cma { > unsigned long base_pfn; > unsigned long count; > unsigned long *bitmap; > + int order_per_bit; /* Order of pages represented by one bit */ > struct mutex lock; > }; > > @@ -157,9 +158,38 @@ void __init dma_contiguous_reserve(phys_addr_t limit) > > static DEFINE_MUTEX(cma_mutex); > > +static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) > +{ > + return (1 << (align_order >> cma->order_per_bit)) - 1; > +} > + > +static unsigned long cma_bitmap_maxno(struct cma *cma) > +{ > + return cma->count >> cma->order_per_bit; > +} > + > +static unsigned long cma_bitmap_pages_to_bits(struct cma *cma, > + unsigned long pages) > +{ > + return ALIGN(pages, 1 << cma->order_per_bit) >> cma->order_per_bit; > +} > + > +static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) > +{ > + unsigned long bitmapno, nr_bits; > + > + bitmapno = (pfn - cma->base_pfn) >> cma->order_per_bit; > + nr_bits = cma_bitmap_pages_to_bits(cma, count); > + > + mutex_lock(&cma->lock); > + bitmap_clear(cma->bitmap, bitmapno, nr_bits); > + mutex_unlock(&cma->lock); > +} > + > static int __init cma_activate_area(struct cma *cma) > { > - int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long); > + int bitmap_maxno = cma_bitmap_maxno(cma); > + int bitmap_size = BITS_TO_LONGS(bitmap_maxno) * sizeof(long); > unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; > unsigned i = cma->count >> pageblock_order; > struct zone *zone; > @@ -221,6 +251,7 @@ core_initcall(cma_init_reserved_areas); > * @base: Base address of the reserved area optional, use 0 for any > * @limit: End address of the reserved memory (optional, 0 for any). > * @alignment: Alignment for the contiguous memory area, should be power of 2 > + * @order_per_bit: Order of pages represented by one bit on bitmap. > * @res_cma: Pointer to store the created cma region. > * @fixed: hint about where to place the reserved area > * > @@ -235,7 +266,7 @@ core_initcall(cma_init_reserved_areas); > */ > static int __init __dma_contiguous_reserve_area(phys_addr_t size, > phys_addr_t base, phys_addr_t limit, > - phys_addr_t alignment, > + phys_addr_t alignment, int order_per_bit, > struct cma **res_cma, bool fixed) > { > struct cma *cma = &cma_areas[cma_area_count]; > @@ -269,6 +300,8 @@ static int __init __dma_contiguous_reserve_area(phys_addr_t size, > base = ALIGN(base, alignment); > size = ALIGN(size, alignment); > limit &= ~(alignment - 1); > + /* size should be aligned with order_per_bit */ > + BUG_ON(!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)); > > /* Reserve memory */ > if (base && fixed) { > @@ -294,6 +327,7 @@ static int __init __dma_contiguous_reserve_area(phys_addr_t size, > */ > cma->base_pfn = PFN_DOWN(base); > cma->count = size >> PAGE_SHIFT; > + cma->order_per_bit = order_per_bit; > *res_cma = cma; > cma_area_count++; > > @@ -313,7 +347,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, > { > int ret; > > - ret = __dma_contiguous_reserve_area(size, base, limit, 0, > + ret = __dma_contiguous_reserve_area(size, base, limit, 0, 0, > res_cma, fixed); > if (ret) > return ret; > @@ -324,13 +358,6 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, > return 0; > } > > -static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) > -{ > - mutex_lock(&cma->lock); > - bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); > - mutex_unlock(&cma->lock); > -} > - > /** > * dma_alloc_from_contiguous() - allocate pages from contiguous area > * @dev: Pointer to device for which the allocation is performed. > @@ -345,7 +372,8 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) > static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, > unsigned int align) > { > - unsigned long mask, pfn, pageno, start = 0; > + unsigned long mask, pfn, start = 0; > + unsigned long bitmap_maxno, bitmapno, nr_bits; > struct page *page = NULL; > int ret; > > @@ -358,18 +386,19 @@ static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, > if (!count) > return NULL; > > - mask = (1 << align) - 1; > - > + mask = cma_bitmap_aligned_mask(cma, align); > + bitmap_maxno = cma_bitmap_maxno(cma); > + nr_bits = cma_bitmap_pages_to_bits(cma, count); > > for (;;) { > mutex_lock(&cma->lock); > - pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, > - start, count, mask); > - if (pageno >= cma->count) { > + bitmapno = bitmap_find_next_zero_area(cma->bitmap, > + bitmap_maxno, start, nr_bits, mask); > + if (bitmapno >= bitmap_maxno) { > mutex_unlock(&cma->lock); > break; > } > - bitmap_set(cma->bitmap, pageno, count); > + bitmap_set(cma->bitmap, bitmapno, nr_bits); > /* > * It's safe to drop the lock here. We've marked this region for > * our exclusive use. If the migration fails we will take the > @@ -377,7 +406,7 @@ static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, > */ > mutex_unlock(&cma->lock); > > - pfn = cma->base_pfn + pageno; > + pfn = cma->base_pfn + (bitmapno << cma->order_per_bit); > mutex_lock(&cma_mutex); > ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); > mutex_unlock(&cma_mutex); > @@ -392,7 +421,7 @@ static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, > pr_debug("%s(): memory range at %p is busy, retrying\n", > __func__, pfn_to_page(pfn)); > /* try again with a bit different memory target */ > - start = pageno + mask + 1; > + start = bitmapno + mask + 1; > } > > pr_debug("%s(): returned %p\n", __func__, page); >
On Thu, Jun 12 2014, Joonsoo Kim <iamjoonsoo.kim@lge.com> wrote: > ppc kvm's cma region management requires arbitrary bitmap granularity, > since they want to reserve very large memory and manage this region > with bitmap that one bit for several pages to reduce management overheads. > So support arbitrary bitmap granularity for following generalization. > > Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> > diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c > index bc4c171..9bc9340 100644 > --- a/drivers/base/dma-contiguous.c > +++ b/drivers/base/dma-contiguous.c > @@ -38,6 +38,7 @@ struct cma { > unsigned long base_pfn; > unsigned long count; Have you considered replacing count with maxno? > unsigned long *bitmap; > + int order_per_bit; /* Order of pages represented by one bit */ I'd make it unsigned. > struct mutex lock; > }; > > +static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int > count) For consistency cma_clear_bitmap would make more sense I think. On the other hand, you're just moving stuff around so perhaps renaming the function at this point is not worth it any more. > +{ > + unsigned long bitmapno, nr_bits; > + > + bitmapno = (pfn - cma->base_pfn) >> cma->order_per_bit; > + nr_bits = cma_bitmap_pages_to_bits(cma, count); > + > + mutex_lock(&cma->lock); > + bitmap_clear(cma->bitmap, bitmapno, nr_bits); > + mutex_unlock(&cma->lock); > +} > + > static int __init cma_activate_area(struct cma *cma) > { > - int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long); > + int bitmap_maxno = cma_bitmap_maxno(cma); > + int bitmap_size = BITS_TO_LONGS(bitmap_maxno) * sizeof(long); > unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; > unsigned i = cma->count >> pageblock_order; > struct zone *zone; bitmap_maxno is never used again, perhaps: + int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); instead? Up to you.
Joonsoo Kim <iamjoonsoo.kim@lge.com> writes: > ppc kvm's cma region management requires arbitrary bitmap granularity, > since they want to reserve very large memory and manage this region > with bitmap that one bit for several pages to reduce management overheads. > So support arbitrary bitmap granularity for following generalization. > > Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> > > diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c > index bc4c171..9bc9340 100644 > --- a/drivers/base/dma-contiguous.c > +++ b/drivers/base/dma-contiguous.c > @@ -38,6 +38,7 @@ struct cma { > unsigned long base_pfn; > unsigned long count; > unsigned long *bitmap; > + int order_per_bit; /* Order of pages represented by one bit */ > struct mutex lock; > }; > > @@ -157,9 +158,38 @@ void __init dma_contiguous_reserve(phys_addr_t limit) > > static DEFINE_MUTEX(cma_mutex); > > +static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) > +{ > + return (1 << (align_order >> cma->order_per_bit)) - 1; > +} > + > +static unsigned long cma_bitmap_maxno(struct cma *cma) > +{ > + return cma->count >> cma->order_per_bit; > +} > + > +static unsigned long cma_bitmap_pages_to_bits(struct cma *cma, > + unsigned long pages) > +{ > + return ALIGN(pages, 1 << cma->order_per_bit) >> cma->order_per_bit; > +} > + > +static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) > +{ > + unsigned long bitmapno, nr_bits; > + > + bitmapno = (pfn - cma->base_pfn) >> cma->order_per_bit; > + nr_bits = cma_bitmap_pages_to_bits(cma, count); > + > + mutex_lock(&cma->lock); > + bitmap_clear(cma->bitmap, bitmapno, nr_bits); > + mutex_unlock(&cma->lock); > +} > + > static int __init cma_activate_area(struct cma *cma) > { > - int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long); > + int bitmap_maxno = cma_bitmap_maxno(cma); > + int bitmap_size = BITS_TO_LONGS(bitmap_maxno) * sizeof(long); > unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; > unsigned i = cma->count >> pageblock_order; > struct zone *zone; > @@ -221,6 +251,7 @@ core_initcall(cma_init_reserved_areas); > * @base: Base address of the reserved area optional, use 0 for any > * @limit: End address of the reserved memory (optional, 0 for any). > * @alignment: Alignment for the contiguous memory area, should be power of 2 > + * @order_per_bit: Order of pages represented by one bit on bitmap. > * @res_cma: Pointer to store the created cma region. > * @fixed: hint about where to place the reserved area > * > @@ -235,7 +266,7 @@ core_initcall(cma_init_reserved_areas); > */ > static int __init __dma_contiguous_reserve_area(phys_addr_t size, > phys_addr_t base, phys_addr_t limit, > - phys_addr_t alignment, > + phys_addr_t alignment, int order_per_bit, > struct cma **res_cma, bool fixed) > { > struct cma *cma = &cma_areas[cma_area_count]; > @@ -269,6 +300,8 @@ static int __init __dma_contiguous_reserve_area(phys_addr_t size, > base = ALIGN(base, alignment); > size = ALIGN(size, alignment); > limit &= ~(alignment - 1); > + /* size should be aligned with order_per_bit */ > + BUG_ON(!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)); > > /* Reserve memory */ > if (base && fixed) { > @@ -294,6 +327,7 @@ static int __init __dma_contiguous_reserve_area(phys_addr_t size, > */ > cma->base_pfn = PFN_DOWN(base); > cma->count = size >> PAGE_SHIFT; > + cma->order_per_bit = order_per_bit; > *res_cma = cma; > cma_area_count++; > > @@ -313,7 +347,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, > { > int ret; > > - ret = __dma_contiguous_reserve_area(size, base, limit, 0, > + ret = __dma_contiguous_reserve_area(size, base, limit, 0, 0, > res_cma, fixed); > if (ret) > return ret; > @@ -324,13 +358,6 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, > return 0; > } > > -static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) > -{ > - mutex_lock(&cma->lock); > - bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); > - mutex_unlock(&cma->lock); > -} > - > /** > * dma_alloc_from_contiguous() - allocate pages from contiguous area > * @dev: Pointer to device for which the allocation is performed. > @@ -345,7 +372,8 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) > static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, > unsigned int align) > { > - unsigned long mask, pfn, pageno, start = 0; > + unsigned long mask, pfn, start = 0; > + unsigned long bitmap_maxno, bitmapno, nr_bits; > struct page *page = NULL; > int ret; > > @@ -358,18 +386,19 @@ static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, > if (!count) > return NULL; > > - mask = (1 << align) - 1; > - > + mask = cma_bitmap_aligned_mask(cma, align); > + bitmap_maxno = cma_bitmap_maxno(cma); > + nr_bits = cma_bitmap_pages_to_bits(cma, count); > > for (;;) { > mutex_lock(&cma->lock); > - pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, > - start, count, mask); > - if (pageno >= cma->count) { > + bitmapno = bitmap_find_next_zero_area(cma->bitmap, > + bitmap_maxno, start, nr_bits, mask); > + if (bitmapno >= bitmap_maxno) { > mutex_unlock(&cma->lock); > break; > } > - bitmap_set(cma->bitmap, pageno, count); > + bitmap_set(cma->bitmap, bitmapno, nr_bits); > /* > * It's safe to drop the lock here. We've marked this region for > * our exclusive use. If the migration fails we will take the > @@ -377,7 +406,7 @@ static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, > */ > mutex_unlock(&cma->lock); > > - pfn = cma->base_pfn + pageno; > + pfn = cma->base_pfn + (bitmapno << cma->order_per_bit); > mutex_lock(&cma_mutex); > ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); > mutex_unlock(&cma_mutex); > @@ -392,7 +421,7 @@ static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, > pr_debug("%s(): memory range at %p is busy, retrying\n", > __func__, pfn_to_page(pfn)); > /* try again with a bit different memory target */ > - start = pageno + mask + 1; > + start = bitmapno + mask + 1; > } > > pr_debug("%s(): returned %p\n", __func__, page); > -- > 1.7.9.5 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Thu, Jun 12, 2014 at 12:19:54PM +0200, Michal Nazarewicz wrote: > On Thu, Jun 12 2014, Joonsoo Kim <iamjoonsoo.kim@lge.com> wrote: > > ppc kvm's cma region management requires arbitrary bitmap granularity, > > since they want to reserve very large memory and manage this region > > with bitmap that one bit for several pages to reduce management overheads. > > So support arbitrary bitmap granularity for following generalization. > > > > Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> > > Acked-by: Michal Nazarewicz <mina86@mina86.com> > > > diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c > > index bc4c171..9bc9340 100644 > > --- a/drivers/base/dma-contiguous.c > > +++ b/drivers/base/dma-contiguous.c > > @@ -38,6 +38,7 @@ struct cma { > > unsigned long base_pfn; > > unsigned long count; > > Have you considered replacing count with maxno? No, I haven't. I think that count is better than maxno, since it represent number of pages in this region. > > > unsigned long *bitmap; > > + int order_per_bit; /* Order of pages represented by one bit */ > > I'd make it unsigned. Will fix it. > > struct mutex lock; > > }; > > > > +static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int > > count) > > For consistency cma_clear_bitmap would make more sense I think. On the > other hand, you're just moving stuff around so perhaps renaming the > function at this point is not worth it any more. Will fix it. > > +{ > > + unsigned long bitmapno, nr_bits; > > + > > + bitmapno = (pfn - cma->base_pfn) >> cma->order_per_bit; > > + nr_bits = cma_bitmap_pages_to_bits(cma, count); > > + > > + mutex_lock(&cma->lock); > > + bitmap_clear(cma->bitmap, bitmapno, nr_bits); > > + mutex_unlock(&cma->lock); > > +} > > + > > static int __init cma_activate_area(struct cma *cma) > > { > > - int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long); > > + int bitmap_maxno = cma_bitmap_maxno(cma); > > + int bitmap_size = BITS_TO_LONGS(bitmap_maxno) * sizeof(long); > > unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; > > unsigned i = cma->count >> pageblock_order; > > struct zone *zone; > > bitmap_maxno is never used again, perhaps: > > + int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); > > instead? Up to you. Okay!! Thanks. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index bc4c171..9bc9340 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -38,6 +38,7 @@ struct cma { unsigned long base_pfn; unsigned long count; unsigned long *bitmap; + int order_per_bit; /* Order of pages represented by one bit */ struct mutex lock; }; @@ -157,9 +158,38 @@ void __init dma_contiguous_reserve(phys_addr_t limit) static DEFINE_MUTEX(cma_mutex); +static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) +{ + return (1 << (align_order >> cma->order_per_bit)) - 1; +} + +static unsigned long cma_bitmap_maxno(struct cma *cma) +{ + return cma->count >> cma->order_per_bit; +} + +static unsigned long cma_bitmap_pages_to_bits(struct cma *cma, + unsigned long pages) +{ + return ALIGN(pages, 1 << cma->order_per_bit) >> cma->order_per_bit; +} + +static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) +{ + unsigned long bitmapno, nr_bits; + + bitmapno = (pfn - cma->base_pfn) >> cma->order_per_bit; + nr_bits = cma_bitmap_pages_to_bits(cma, count); + + mutex_lock(&cma->lock); + bitmap_clear(cma->bitmap, bitmapno, nr_bits); + mutex_unlock(&cma->lock); +} + static int __init cma_activate_area(struct cma *cma) { - int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long); + int bitmap_maxno = cma_bitmap_maxno(cma); + int bitmap_size = BITS_TO_LONGS(bitmap_maxno) * sizeof(long); unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; unsigned i = cma->count >> pageblock_order; struct zone *zone; @@ -221,6 +251,7 @@ core_initcall(cma_init_reserved_areas); * @base: Base address of the reserved area optional, use 0 for any * @limit: End address of the reserved memory (optional, 0 for any). * @alignment: Alignment for the contiguous memory area, should be power of 2 + * @order_per_bit: Order of pages represented by one bit on bitmap. * @res_cma: Pointer to store the created cma region. * @fixed: hint about where to place the reserved area * @@ -235,7 +266,7 @@ core_initcall(cma_init_reserved_areas); */ static int __init __dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, phys_addr_t limit, - phys_addr_t alignment, + phys_addr_t alignment, int order_per_bit, struct cma **res_cma, bool fixed) { struct cma *cma = &cma_areas[cma_area_count]; @@ -269,6 +300,8 @@ static int __init __dma_contiguous_reserve_area(phys_addr_t size, base = ALIGN(base, alignment); size = ALIGN(size, alignment); limit &= ~(alignment - 1); + /* size should be aligned with order_per_bit */ + BUG_ON(!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)); /* Reserve memory */ if (base && fixed) { @@ -294,6 +327,7 @@ static int __init __dma_contiguous_reserve_area(phys_addr_t size, */ cma->base_pfn = PFN_DOWN(base); cma->count = size >> PAGE_SHIFT; + cma->order_per_bit = order_per_bit; *res_cma = cma; cma_area_count++; @@ -313,7 +347,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, { int ret; - ret = __dma_contiguous_reserve_area(size, base, limit, 0, + ret = __dma_contiguous_reserve_area(size, base, limit, 0, 0, res_cma, fixed); if (ret) return ret; @@ -324,13 +358,6 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, return 0; } -static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) -{ - mutex_lock(&cma->lock); - bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); - mutex_unlock(&cma->lock); -} - /** * dma_alloc_from_contiguous() - allocate pages from contiguous area * @dev: Pointer to device for which the allocation is performed. @@ -345,7 +372,8 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, unsigned int align) { - unsigned long mask, pfn, pageno, start = 0; + unsigned long mask, pfn, start = 0; + unsigned long bitmap_maxno, bitmapno, nr_bits; struct page *page = NULL; int ret; @@ -358,18 +386,19 @@ static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, if (!count) return NULL; - mask = (1 << align) - 1; - + mask = cma_bitmap_aligned_mask(cma, align); + bitmap_maxno = cma_bitmap_maxno(cma); + nr_bits = cma_bitmap_pages_to_bits(cma, count); for (;;) { mutex_lock(&cma->lock); - pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, - start, count, mask); - if (pageno >= cma->count) { + bitmapno = bitmap_find_next_zero_area(cma->bitmap, + bitmap_maxno, start, nr_bits, mask); + if (bitmapno >= bitmap_maxno) { mutex_unlock(&cma->lock); break; } - bitmap_set(cma->bitmap, pageno, count); + bitmap_set(cma->bitmap, bitmapno, nr_bits); /* * It's safe to drop the lock here. We've marked this region for * our exclusive use. If the migration fails we will take the @@ -377,7 +406,7 @@ static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, */ mutex_unlock(&cma->lock); - pfn = cma->base_pfn + pageno; + pfn = cma->base_pfn + (bitmapno << cma->order_per_bit); mutex_lock(&cma_mutex); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); mutex_unlock(&cma_mutex); @@ -392,7 +421,7 @@ static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count, pr_debug("%s(): memory range at %p is busy, retrying\n", __func__, pfn_to_page(pfn)); /* try again with a bit different memory target */ - start = pageno + mask + 1; + start = bitmapno + mask + 1; } pr_debug("%s(): returned %p\n", __func__, page);
ppc kvm's cma region management requires arbitrary bitmap granularity, since they want to reserve very large memory and manage this region with bitmap that one bit for several pages to reduce management overheads. So support arbitrary bitmap granularity for following generalization. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>