diff mbox series

[kvm-unit-tests,RFC,v1,1/5] lib/vmalloc: vmalloc support for handling allocation metadata

Message ID 20200814151009.55845-2-imbrenda@linux.ibm.com (mailing list archive)
State New, archived
Headers show
Series Rewrite the allocators | expand

Commit Message

Claudio Imbrenda Aug. 14, 2020, 3:10 p.m. UTC
Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
---
 lib/vmalloc.c | 105 +++++++++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 95 insertions(+), 10 deletions(-)

Comments

Janosch Frank Aug. 19, 2020, 2:36 p.m. UTC | #1
On 8/14/20 5:10 PM, Claudio Imbrenda wrote:

LGTM, some smaller nits/questions below:

Commit message?

> Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
> ---
>  lib/vmalloc.c | 105 +++++++++++++++++++++++++++++++++++++++++++++-----
>  1 file changed, 95 insertions(+), 10 deletions(-)
> 
> diff --git a/lib/vmalloc.c b/lib/vmalloc.c
> index e0c7b6b..aca0876 100644
> --- a/lib/vmalloc.c
> +++ b/lib/vmalloc.c
> @@ -15,6 +15,13 @@
>  #include <bitops.h>
>  #include "vmalloc.h"
>  
> +#define VM_MAGIC 0x7E57C0DE
> +
> +struct metadata {
> +	unsigned long npages;
> +	unsigned long magic;
> +};
> +
>  static struct spinlock lock;
>  static void *vfree_top = 0;
>  static void *page_root;
> @@ -25,8 +32,14 @@ static void *page_root;
>   *
>   * nr is the number of pages to allocate
>   * alignment_pages is the alignment of the allocation *in pages*
> + * metadata indicates whether an extra (unaligned) page needs to be allocated
> + * right before the main (aligned) allocation.
> + *
> + * The return value points to the first allocated virtual page, which will
> + * be the (potentially unaligned) metadata page if the metadata flag is
> + * specified.
>   */
> -void *alloc_vpages_aligned(ulong nr, unsigned int align_order)
> +static void *do_alloc_vpages(ulong nr, unsigned int align_order, bool metadata)
>  {
>  	uintptr_t ptr;
>  
> @@ -34,6 +47,8 @@ void *alloc_vpages_aligned(ulong nr, unsigned int align_order)
>  	ptr = (uintptr_t)vfree_top;
>  	ptr -= PAGE_SIZE * nr;
>  	ptr &= GENMASK_ULL(63, PAGE_SHIFT + align_order);
> +	if (metadata)
> +		ptr -= PAGE_SIZE;
>  	vfree_top = (void *)ptr;
>  	spin_unlock(&lock);
>  
> @@ -41,6 +56,11 @@ void *alloc_vpages_aligned(ulong nr, unsigned int align_order)
>  	return (void *)ptr;
>  }
>  
> +void *alloc_vpages_aligned(ulong nr, unsigned int align_order)
> +{
> +	return do_alloc_vpages(nr, align_order, false);
> +}
> +
>  void *alloc_vpages(ulong nr)
>  {
>  	return alloc_vpages_aligned(nr, 0);
> @@ -69,35 +89,100 @@ void *vmap(phys_addr_t phys, size_t size)
>  	return mem;
>  }
>  
> +/*
> + * Allocate one page, for an object with specified alignment.
> + * The resulting pointer will be aligned to the required alignment, but
> + * intentionally not page-aligned.
> + */
> +static void *vm_alloc_one_page(size_t alignment)
> +{
> +	void *p;
> +
> +	assert(alignment >= sizeof(uintptr_t));
> +	assert(alignment < PAGE_SIZE);
> +	p = alloc_vpage();
> +	install_page(page_root, virt_to_phys(alloc_page()), p);
> +	/* write the magic at the beginning of the page */
> +	*(uintptr_t *)p = VM_MAGIC;
> +	return (void*)((uintptr_t)p + alignment);

s/(void*)/(void *)/

> +}
> +
> +static struct metadata *get_metadata(void *p)
> +{
> +	struct metadata *m = p;
> +
> +	return m - 1;
> +}

So the metadata is not at the start of the metadata page, but at the
end? We have it at the beginning for the one page case and at the end
for the multi page case with metadata on an extra page.

> +
>  /*
>   * Allocate virtual memory, with the specified minimum alignment.
> + * If the allocation fits in one page, only one page is allocated. Otherwise
> + * enough pages are allocated for the object, plus one to keep metadata
> + * information about the allocation.
>   */
>  static void *vm_memalign(size_t alignment, size_t size)
>  {
> +	struct metadata *m;
>  	phys_addr_t pa;
> -	void *mem, *p;
> +	uintptr_t p;
> +	void *mem;
> +	size_t i;
>  
> +	if (!size)
> +		return NULL;
>  	assert(is_power_of_2(alignment));
>  
> +	if (alignment < sizeof(uintptr_t))
> +		alignment = sizeof(uintptr_t);
> +	/* it fits in one page, allocate only one page */
> +	if (alignment + size <= PAGE_SIZE)
> +		return vm_alloc_one_page(alignment);

Don't we also need to take the metadata into account in any size
calculation for one page?

>  	size = PAGE_ALIGN(size) / PAGE_SIZE;
>  	alignment = get_order(PAGE_ALIGN(alignment) / PAGE_SIZE);
> -	mem = p = alloc_vpages_aligned(size, alignment);
> -	while (size--) {
> +	mem = do_alloc_vpages(size, alignment, true);
> +	p = (uintptr_t)mem;
> +	/* skip the metadata page */
> +	mem = (void *)(p + PAGE_SIZE);
> +	/*
> +	 * time to actually allocate the physical pages to back our virtual
> +	 * allocation; note that we need to allocate one extra page (for the
> +	 * metadata), hence the <=
> +	 */
> +	for (i = 0; i <= size; i++, p += PAGE_SIZE) {
>  		pa = virt_to_phys(alloc_page());
>  		assert(pa);
> -		install_page(page_root, pa, p);
> -		p += PAGE_SIZE;
> +		install_page(page_root, pa, (void *)p);
>  	}
> +	m = get_metadata(mem);
> +	m->npages = size;
> +	m->magic = VM_MAGIC;
>  	return mem;
>  }
>  
>  static void vm_free(void *mem, size_t size)
>  {
> -	while (size) {
> -		free_page(phys_to_virt(virt_to_pte_phys(page_root, mem)));
> -		mem += PAGE_SIZE;
> -		size -= PAGE_SIZE;
> +	struct metadata *m;
> +	uintptr_t ptr, end;
> +
> +	/* the pointer is not page-aligned, it was a single-page allocation */
> +	if (!IS_ALIGNED((uintptr_t)mem, PAGE_SIZE)) {
> +		ptr = virt_to_pte_phys(page_root, mem) & PAGE_MASK;
> +		assert(*(uintptr_t *)ptr == VM_MAGIC);
> +		free_page(phys_to_virt(ptr));
> +		return;
>  	}
> +
> +	/* the pointer is page-aligned, it was a multi-page allocation */
> +	m = get_metadata(mem);
> +	assert(m->magic == VM_MAGIC);
> +	assert(m->npages > 0);
> +	/* free all the pages including the metadata page */
> +	ptr = (uintptr_t)mem - PAGE_SIZE;
> +	end = ptr + m->npages * PAGE_SIZE;
> +	for ( ; ptr < end; ptr += PAGE_SIZE)
> +		free_page(phys_to_virt(virt_to_pte_phys(page_root, (void *)ptr)));
> +	/* free the last one separately to avoid overflow issues */
> +	free_page(phys_to_virt(virt_to_pte_phys(page_root, (void *)ptr)));
>  }
>  
>  static struct alloc_ops vmalloc_ops = {
>
Claudio Imbrenda Aug. 19, 2020, 3:31 p.m. UTC | #2
On Wed, 19 Aug 2020 16:36:07 +0200
Janosch Frank <frankja@linux.ibm.com> wrote:

> On 8/14/20 5:10 PM, Claudio Imbrenda wrote:
> 
> LGTM, some smaller nits/questions below:
> 
> Commit message?

oops! I'll fix it

> 
> > Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
> > ---
> >  lib/vmalloc.c | 105
> > +++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed,
> > 95 insertions(+), 10 deletions(-)
> > 
> > diff --git a/lib/vmalloc.c b/lib/vmalloc.c
> > index e0c7b6b..aca0876 100644
> > --- a/lib/vmalloc.c
> > +++ b/lib/vmalloc.c
> > @@ -15,6 +15,13 @@
> >  #include <bitops.h>
> >  #include "vmalloc.h"
> >  
> > +#define VM_MAGIC 0x7E57C0DE
> > +
> > +struct metadata {
> > +	unsigned long npages;
> > +	unsigned long magic;
> > +};
> > +
> >  static struct spinlock lock;
> >  static void *vfree_top = 0;
> >  static void *page_root;
> > @@ -25,8 +32,14 @@ static void *page_root;
> >   *
> >   * nr is the number of pages to allocate
> >   * alignment_pages is the alignment of the allocation *in pages*
> > + * metadata indicates whether an extra (unaligned) page needs to
> > be allocated
> > + * right before the main (aligned) allocation.
> > + *
> > + * The return value points to the first allocated virtual page,
> > which will
> > + * be the (potentially unaligned) metadata page if the metadata
> > flag is
> > + * specified.
> >   */
> > -void *alloc_vpages_aligned(ulong nr, unsigned int align_order)
> > +static void *do_alloc_vpages(ulong nr, unsigned int align_order,
> > bool metadata) {
> >  	uintptr_t ptr;
> >  
> > @@ -34,6 +47,8 @@ void *alloc_vpages_aligned(ulong nr, unsigned int
> > align_order) ptr = (uintptr_t)vfree_top;
> >  	ptr -= PAGE_SIZE * nr;
> >  	ptr &= GENMASK_ULL(63, PAGE_SHIFT + align_order);
> > +	if (metadata)
> > +		ptr -= PAGE_SIZE;
> >  	vfree_top = (void *)ptr;
> >  	spin_unlock(&lock);
> >  
> > @@ -41,6 +56,11 @@ void *alloc_vpages_aligned(ulong nr, unsigned
> > int align_order) return (void *)ptr;
> >  }
> >  
> > +void *alloc_vpages_aligned(ulong nr, unsigned int align_order)
> > +{
> > +	return do_alloc_vpages(nr, align_order, false);
> > +}
> > +
> >  void *alloc_vpages(ulong nr)
> >  {
> >  	return alloc_vpages_aligned(nr, 0);
> > @@ -69,35 +89,100 @@ void *vmap(phys_addr_t phys, size_t size)
> >  	return mem;
> >  }
> >  
> > +/*
> > + * Allocate one page, for an object with specified alignment.
> > + * The resulting pointer will be aligned to the required
> > alignment, but
> > + * intentionally not page-aligned.
> > + */
> > +static void *vm_alloc_one_page(size_t alignment)
> > +{
> > +	void *p;
> > +
> > +	assert(alignment >= sizeof(uintptr_t));
> > +	assert(alignment < PAGE_SIZE);
> > +	p = alloc_vpage();
> > +	install_page(page_root, virt_to_phys(alloc_page()), p);
> > +	/* write the magic at the beginning of the page */
> > +	*(uintptr_t *)p = VM_MAGIC;
> > +	return (void*)((uintptr_t)p + alignment);  
> 
> s/(void*)/(void *)/

will be fixed

> > +}
> > +
> > +static struct metadata *get_metadata(void *p)
> > +{
> > +	struct metadata *m = p;
> > +
> > +	return m - 1;
> > +}  
> 
> So the metadata is not at the start of the metadata page, but at the
> end? We have it at the beginning for the one page case and at the end
> for the multi page case with metadata on an extra page.

correct. it doesn't make a huge difference in the end where the
metadata is, as long as it is somewhere. Probably putting it always
right before the start of the memory is better, in order to catch
accidental off-by-one writes (as they would corrupt the magic value)

please note that the metadata for a single page is just the magic value

> > +
> >  /*
> >   * Allocate virtual memory, with the specified minimum alignment.
> > + * If the allocation fits in one page, only one page is allocated.
> > Otherwise
> > + * enough pages are allocated for the object, plus one to keep
> > metadata
> > + * information about the allocation.
> >   */
> >  static void *vm_memalign(size_t alignment, size_t size)
> >  {
> > +	struct metadata *m;
> >  	phys_addr_t pa;
> > -	void *mem, *p;
> > +	uintptr_t p;
> > +	void *mem;
> > +	size_t i;
> >  
> > +	if (!size)
> > +		return NULL;
> >  	assert(is_power_of_2(alignment));
> >  
> > +	if (alignment < sizeof(uintptr_t))
> > +		alignment = sizeof(uintptr_t);

                            ^^^^^^^^^^^^^^^^^

> > +	/* it fits in one page, allocate only one page */
> > +	if (alignment + size <= PAGE_SIZE)
> > +		return vm_alloc_one_page(alignment);  
> 
> Don't we also need to take the metadata into account in any size
> calculation for one page?

kinda... we guarantee a minimum alignment, which is enough to fit the
magic value, which is the only metadata item for single pages (see
above)

> >  	size = PAGE_ALIGN(size) / PAGE_SIZE;
> >  	alignment = get_order(PAGE_ALIGN(alignment) / PAGE_SIZE);
> > -	mem = p = alloc_vpages_aligned(size, alignment);
> > -	while (size--) {
> > +	mem = do_alloc_vpages(size, alignment, true);
> > +	p = (uintptr_t)mem;
> > +	/* skip the metadata page */
> > +	mem = (void *)(p + PAGE_SIZE);
> > +	/*
> > +	 * time to actually allocate the physical pages to back
> > our virtual
> > +	 * allocation; note that we need to allocate one extra
> > page (for the
> > +	 * metadata), hence the <=
> > +	 */
> > +	for (i = 0; i <= size; i++, p += PAGE_SIZE) {
> >  		pa = virt_to_phys(alloc_page());
> >  		assert(pa);
> > -		install_page(page_root, pa, p);
> > -		p += PAGE_SIZE;
> > +		install_page(page_root, pa, (void *)p);
> >  	}
> > +	m = get_metadata(mem);
> > +	m->npages = size;
> > +	m->magic = VM_MAGIC;
> >  	return mem;
> >  }
> >  
> >  static void vm_free(void *mem, size_t size)
> >  {
> > -	while (size) {
> > -		free_page(phys_to_virt(virt_to_pte_phys(page_root,
> > mem)));
> > -		mem += PAGE_SIZE;
> > -		size -= PAGE_SIZE;
> > +	struct metadata *m;
> > +	uintptr_t ptr, end;
> > +
> > +	/* the pointer is not page-aligned, it was a single-page
> > allocation */
> > +	if (!IS_ALIGNED((uintptr_t)mem, PAGE_SIZE)) {
> > +		ptr = virt_to_pte_phys(page_root, mem) & PAGE_MASK;
> > +		assert(*(uintptr_t *)ptr == VM_MAGIC);
> > +		free_page(phys_to_virt(ptr));
> > +		return;
> >  	}
> > +
> > +	/* the pointer is page-aligned, it was a multi-page
> > allocation */
> > +	m = get_metadata(mem);
> > +	assert(m->magic == VM_MAGIC);
> > +	assert(m->npages > 0);
> > +	/* free all the pages including the metadata page */
> > +	ptr = (uintptr_t)mem - PAGE_SIZE;
> > +	end = ptr + m->npages * PAGE_SIZE;
> > +	for ( ; ptr < end; ptr += PAGE_SIZE)
> > +		free_page(phys_to_virt(virt_to_pte_phys(page_root,
> > (void *)ptr)));
> > +	/* free the last one separately to avoid overflow issues */
> > +	free_page(phys_to_virt(virt_to_pte_phys(page_root, (void
> > *)ptr))); }
> >  
> >  static struct alloc_ops vmalloc_ops = {
> >   
> 
>
Janosch Frank Aug. 19, 2020, 3:36 p.m. UTC | #3
On 8/19/20 5:31 PM, Claudio Imbrenda wrote:
> On Wed, 19 Aug 2020 16:36:07 +0200
> Janosch Frank <frankja@linux.ibm.com> wrote:
> 
>> On 8/14/20 5:10 PM, Claudio Imbrenda wrote:
>>
>> LGTM, some smaller nits/questions below:
>>
>> Commit message?
> 
> oops! I'll fix it
> 
>>
>>> Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
>>> ---
>>>  lib/vmalloc.c | 105
>>> +++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed,
>>> 95 insertions(+), 10 deletions(-)
>>>
>>> diff --git a/lib/vmalloc.c b/lib/vmalloc.c
>>> index e0c7b6b..aca0876 100644
>>> --- a/lib/vmalloc.c
>>> +++ b/lib/vmalloc.c
>>> @@ -15,6 +15,13 @@
>>>  #include <bitops.h>
>>>  #include "vmalloc.h"
>>>  
>>> +#define VM_MAGIC 0x7E57C0DE
>>> +
>>> +struct metadata {
>>> +	unsigned long npages;
>>> +	unsigned long magic;
>>> +};
>>> +
>>>  static struct spinlock lock;
>>>  static void *vfree_top = 0;
>>>  static void *page_root;
>>> @@ -25,8 +32,14 @@ static void *page_root;
>>>   *
>>>   * nr is the number of pages to allocate
>>>   * alignment_pages is the alignment of the allocation *in pages*
>>> + * metadata indicates whether an extra (unaligned) page needs to
>>> be allocated
>>> + * right before the main (aligned) allocation.
>>> + *
>>> + * The return value points to the first allocated virtual page,
>>> which will
>>> + * be the (potentially unaligned) metadata page if the metadata
>>> flag is
>>> + * specified.
>>>   */
>>> -void *alloc_vpages_aligned(ulong nr, unsigned int align_order)
>>> +static void *do_alloc_vpages(ulong nr, unsigned int align_order,
>>> bool metadata) {
>>>  	uintptr_t ptr;
>>>  
>>> @@ -34,6 +47,8 @@ void *alloc_vpages_aligned(ulong nr, unsigned int
>>> align_order) ptr = (uintptr_t)vfree_top;
>>>  	ptr -= PAGE_SIZE * nr;
>>>  	ptr &= GENMASK_ULL(63, PAGE_SHIFT + align_order);
>>> +	if (metadata)
>>> +		ptr -= PAGE_SIZE;
>>>  	vfree_top = (void *)ptr;
>>>  	spin_unlock(&lock);
>>>  
>>> @@ -41,6 +56,11 @@ void *alloc_vpages_aligned(ulong nr, unsigned
>>> int align_order) return (void *)ptr;
>>>  }
>>>  
>>> +void *alloc_vpages_aligned(ulong nr, unsigned int align_order)
>>> +{
>>> +	return do_alloc_vpages(nr, align_order, false);
>>> +}
>>> +
>>>  void *alloc_vpages(ulong nr)
>>>  {
>>>  	return alloc_vpages_aligned(nr, 0);
>>> @@ -69,35 +89,100 @@ void *vmap(phys_addr_t phys, size_t size)
>>>  	return mem;
>>>  }
>>>  
>>> +/*
>>> + * Allocate one page, for an object with specified alignment.
>>> + * The resulting pointer will be aligned to the required
>>> alignment, but
>>> + * intentionally not page-aligned.
>>> + */
>>> +static void *vm_alloc_one_page(size_t alignment)
>>> +{
>>> +	void *p;
>>> +
>>> +	assert(alignment >= sizeof(uintptr_t));
>>> +	assert(alignment < PAGE_SIZE);
>>> +	p = alloc_vpage();
>>> +	install_page(page_root, virt_to_phys(alloc_page()), p);
>>> +	/* write the magic at the beginning of the page */
>>> +	*(uintptr_t *)p = VM_MAGIC;
>>> +	return (void*)((uintptr_t)p + alignment);  
>>
>> s/(void*)/(void *)/
> 
> will be fixed
> 
>>> +}
>>> +
>>> +static struct metadata *get_metadata(void *p)
>>> +{
>>> +	struct metadata *m = p;
>>> +
>>> +	return m - 1;
>>> +}  
>>
>> So the metadata is not at the start of the metadata page, but at the
>> end? We have it at the beginning for the one page case and at the end
>> for the multi page case with metadata on an extra page.
> 
> correct. it doesn't make a huge difference in the end where the
> metadata is, as long as it is somewhere. Probably putting it always
> right before the start of the memory is better, in order to catch
> accidental off-by-one writes (as they would corrupt the magic value)> please note that the metadata for a single page is just the magic value


I assumed you'd write the struct in both cases, should've looked closer.

> 
>>> +
>>>  /*
>>>   * Allocate virtual memory, with the specified minimum alignment.
>>> + * If the allocation fits in one page, only one page is allocated.
>>> Otherwise
>>> + * enough pages are allocated for the object, plus one to keep
>>> metadata
>>> + * information about the allocation.
>>>   */
>>>  static void *vm_memalign(size_t alignment, size_t size)
>>>  {
>>> +	struct metadata *m;
>>>  	phys_addr_t pa;
>>> -	void *mem, *p;
>>> +	uintptr_t p;
>>> +	void *mem;
>>> +	size_t i;
>>>  
>>> +	if (!size)
>>> +		return NULL;
>>>  	assert(is_power_of_2(alignment));
>>>  
>>> +	if (alignment < sizeof(uintptr_t))
>>> +		alignment = sizeof(uintptr_t);
> 
>                             ^^^^^^^^^^^^^^^^^
> 
>>> +	/* it fits in one page, allocate only one page */
>>> +	if (alignment + size <= PAGE_SIZE)
>>> +		return vm_alloc_one_page(alignment);  
>>
>> Don't we also need to take the metadata into account in any size
>> calculation for one page?
> 
> kinda... we guarantee a minimum alignment, which is enough to fit the
> magic value, which is the only metadata item for single pages (see
> above)

Ah, right

> 
>>>  	size = PAGE_ALIGN(size) / PAGE_SIZE;
>>>  	alignment = get_order(PAGE_ALIGN(alignment) / PAGE_SIZE);
>>> -	mem = p = alloc_vpages_aligned(size, alignment);
>>> -	while (size--) {
>>> +	mem = do_alloc_vpages(size, alignment, true);
>>> +	p = (uintptr_t)mem;
>>> +	/* skip the metadata page */
>>> +	mem = (void *)(p + PAGE_SIZE);
>>> +	/*
>>> +	 * time to actually allocate the physical pages to back
>>> our virtual
>>> +	 * allocation; note that we need to allocate one extra
>>> page (for the
>>> +	 * metadata), hence the <=
>>> +	 */
>>> +	for (i = 0; i <= size; i++, p += PAGE_SIZE) {
>>>  		pa = virt_to_phys(alloc_page());
>>>  		assert(pa);
>>> -		install_page(page_root, pa, p);
>>> -		p += PAGE_SIZE;
>>> +		install_page(page_root, pa, (void *)p);
>>>  	}
>>> +	m = get_metadata(mem);
>>> +	m->npages = size;
>>> +	m->magic = VM_MAGIC;
>>>  	return mem;
>>>  }
>>>  
>>>  static void vm_free(void *mem, size_t size)
>>>  {
>>> -	while (size) {
>>> -		free_page(phys_to_virt(virt_to_pte_phys(page_root,
>>> mem)));
>>> -		mem += PAGE_SIZE;
>>> -		size -= PAGE_SIZE;
>>> +	struct metadata *m;
>>> +	uintptr_t ptr, end;
>>> +
>>> +	/* the pointer is not page-aligned, it was a single-page
>>> allocation */
>>> +	if (!IS_ALIGNED((uintptr_t)mem, PAGE_SIZE)) {
>>> +		ptr = virt_to_pte_phys(page_root, mem) & PAGE_MASK;
>>> +		assert(*(uintptr_t *)ptr == VM_MAGIC);
>>> +		free_page(phys_to_virt(ptr));
>>> +		return;
>>>  	}
>>> +
>>> +	/* the pointer is page-aligned, it was a multi-page
>>> allocation */
>>> +	m = get_metadata(mem);
>>> +	assert(m->magic == VM_MAGIC);
>>> +	assert(m->npages > 0);
>>> +	/* free all the pages including the metadata page */
>>> +	ptr = (uintptr_t)mem - PAGE_SIZE;
>>> +	end = ptr + m->npages * PAGE_SIZE;
>>> +	for ( ; ptr < end; ptr += PAGE_SIZE)
>>> +		free_page(phys_to_virt(virt_to_pte_phys(page_root,
>>> (void *)ptr)));
>>> +	/* free the last one separately to avoid overflow issues */
>>> +	free_page(phys_to_virt(virt_to_pte_phys(page_root, (void
>>> *)ptr))); }
>>>  
>>>  static struct alloc_ops vmalloc_ops = {
>>>   
>>
>>
>
diff mbox series

Patch

diff --git a/lib/vmalloc.c b/lib/vmalloc.c
index e0c7b6b..aca0876 100644
--- a/lib/vmalloc.c
+++ b/lib/vmalloc.c
@@ -15,6 +15,13 @@ 
 #include <bitops.h>
 #include "vmalloc.h"
 
+#define VM_MAGIC 0x7E57C0DE
+
+struct metadata {
+	unsigned long npages;
+	unsigned long magic;
+};
+
 static struct spinlock lock;
 static void *vfree_top = 0;
 static void *page_root;
@@ -25,8 +32,14 @@  static void *page_root;
  *
  * nr is the number of pages to allocate
  * alignment_pages is the alignment of the allocation *in pages*
+ * metadata indicates whether an extra (unaligned) page needs to be allocated
+ * right before the main (aligned) allocation.
+ *
+ * The return value points to the first allocated virtual page, which will
+ * be the (potentially unaligned) metadata page if the metadata flag is
+ * specified.
  */
-void *alloc_vpages_aligned(ulong nr, unsigned int align_order)
+static void *do_alloc_vpages(ulong nr, unsigned int align_order, bool metadata)
 {
 	uintptr_t ptr;
 
@@ -34,6 +47,8 @@  void *alloc_vpages_aligned(ulong nr, unsigned int align_order)
 	ptr = (uintptr_t)vfree_top;
 	ptr -= PAGE_SIZE * nr;
 	ptr &= GENMASK_ULL(63, PAGE_SHIFT + align_order);
+	if (metadata)
+		ptr -= PAGE_SIZE;
 	vfree_top = (void *)ptr;
 	spin_unlock(&lock);
 
@@ -41,6 +56,11 @@  void *alloc_vpages_aligned(ulong nr, unsigned int align_order)
 	return (void *)ptr;
 }
 
+void *alloc_vpages_aligned(ulong nr, unsigned int align_order)
+{
+	return do_alloc_vpages(nr, align_order, false);
+}
+
 void *alloc_vpages(ulong nr)
 {
 	return alloc_vpages_aligned(nr, 0);
@@ -69,35 +89,100 @@  void *vmap(phys_addr_t phys, size_t size)
 	return mem;
 }
 
+/*
+ * Allocate one page, for an object with specified alignment.
+ * The resulting pointer will be aligned to the required alignment, but
+ * intentionally not page-aligned.
+ */
+static void *vm_alloc_one_page(size_t alignment)
+{
+	void *p;
+
+	assert(alignment >= sizeof(uintptr_t));
+	assert(alignment < PAGE_SIZE);
+	p = alloc_vpage();
+	install_page(page_root, virt_to_phys(alloc_page()), p);
+	/* write the magic at the beginning of the page */
+	*(uintptr_t *)p = VM_MAGIC;
+	return (void*)((uintptr_t)p + alignment);
+}
+
+static struct metadata *get_metadata(void *p)
+{
+	struct metadata *m = p;
+
+	return m - 1;
+}
+
 /*
  * Allocate virtual memory, with the specified minimum alignment.
+ * If the allocation fits in one page, only one page is allocated. Otherwise
+ * enough pages are allocated for the object, plus one to keep metadata
+ * information about the allocation.
  */
 static void *vm_memalign(size_t alignment, size_t size)
 {
+	struct metadata *m;
 	phys_addr_t pa;
-	void *mem, *p;
+	uintptr_t p;
+	void *mem;
+	size_t i;
 
+	if (!size)
+		return NULL;
 	assert(is_power_of_2(alignment));
 
+	if (alignment < sizeof(uintptr_t))
+		alignment = sizeof(uintptr_t);
+	/* it fits in one page, allocate only one page */
+	if (alignment + size <= PAGE_SIZE)
+		return vm_alloc_one_page(alignment);
 	size = PAGE_ALIGN(size) / PAGE_SIZE;
 	alignment = get_order(PAGE_ALIGN(alignment) / PAGE_SIZE);
-	mem = p = alloc_vpages_aligned(size, alignment);
-	while (size--) {
+	mem = do_alloc_vpages(size, alignment, true);
+	p = (uintptr_t)mem;
+	/* skip the metadata page */
+	mem = (void *)(p + PAGE_SIZE);
+	/*
+	 * time to actually allocate the physical pages to back our virtual
+	 * allocation; note that we need to allocate one extra page (for the
+	 * metadata), hence the <=
+	 */
+	for (i = 0; i <= size; i++, p += PAGE_SIZE) {
 		pa = virt_to_phys(alloc_page());
 		assert(pa);
-		install_page(page_root, pa, p);
-		p += PAGE_SIZE;
+		install_page(page_root, pa, (void *)p);
 	}
+	m = get_metadata(mem);
+	m->npages = size;
+	m->magic = VM_MAGIC;
 	return mem;
 }
 
 static void vm_free(void *mem, size_t size)
 {
-	while (size) {
-		free_page(phys_to_virt(virt_to_pte_phys(page_root, mem)));
-		mem += PAGE_SIZE;
-		size -= PAGE_SIZE;
+	struct metadata *m;
+	uintptr_t ptr, end;
+
+	/* the pointer is not page-aligned, it was a single-page allocation */
+	if (!IS_ALIGNED((uintptr_t)mem, PAGE_SIZE)) {
+		ptr = virt_to_pte_phys(page_root, mem) & PAGE_MASK;
+		assert(*(uintptr_t *)ptr == VM_MAGIC);
+		free_page(phys_to_virt(ptr));
+		return;
 	}
+
+	/* the pointer is page-aligned, it was a multi-page allocation */
+	m = get_metadata(mem);
+	assert(m->magic == VM_MAGIC);
+	assert(m->npages > 0);
+	/* free all the pages including the metadata page */
+	ptr = (uintptr_t)mem - PAGE_SIZE;
+	end = ptr + m->npages * PAGE_SIZE;
+	for ( ; ptr < end; ptr += PAGE_SIZE)
+		free_page(phys_to_virt(virt_to_pte_phys(page_root, (void *)ptr)));
+	/* free the last one separately to avoid overflow issues */
+	free_page(phys_to_virt(virt_to_pte_phys(page_root, (void *)ptr)));
 }
 
 static struct alloc_ops vmalloc_ops = {