@@ -24,7 +24,7 @@
struct alloc_ops {
void *(*memalign)(size_t alignment, size_t size);
- void (*free)(void *ptr, size_t size);
+ void (*free)(void *ptr);
size_t align_min;
};
@@ -62,18 +62,18 @@ void *alloc_pages(unsigned int order);
* alloc_pages* functions.
* The pointer must point to the start of the block.
*/
-void free_pages(void *mem, size_t size);
+void free_pages(void *mem);
/* For backwards compatibility */
static inline void free_page(void *mem)
{
- return free_pages(mem, 1);
+ return free_pages(mem);
}
/* For backwards compatibility */
static inline void free_pages_by_order(void *mem, unsigned int order)
{
- free_pages(mem, 1ull << order);
+ free_pages(mem);
}
#endif
@@ -50,56 +50,24 @@ void *calloc(size_t nmemb, size_t size)
return ptr;
}
-#define METADATA_EXTRA (2 * sizeof(uintptr_t))
-#define OFS_SLACK (-2 * sizeof(uintptr_t))
-#define OFS_SIZE (-sizeof(uintptr_t))
-
-static inline void *block_begin(void *mem)
-{
- uintptr_t slack = *(uintptr_t *)(mem + OFS_SLACK);
- return mem - slack;
-}
-
-static inline uintptr_t block_size(void *mem)
-{
- return *(uintptr_t *)(mem + OFS_SIZE);
-}
-
void free(void *ptr)
{
- if (!alloc_ops->free)
- return;
-
- void *base = block_begin(ptr);
- uintptr_t sz = block_size(ptr);
-
- alloc_ops->free(base, sz);
+ if (alloc_ops->free)
+ alloc_ops->free(ptr);
}
void *memalign(size_t alignment, size_t size)
{
void *p;
- uintptr_t blkalign;
- uintptr_t mem;
if (!size)
return NULL;
- assert(alignment >= sizeof(void *) && is_power_of_2(alignment));
+ assert(is_power_of_2(alignment));
assert(alloc_ops && alloc_ops->memalign);
- size += alignment - 1;
- blkalign = MAX(alignment, alloc_ops->align_min);
- size = ALIGN(size + METADATA_EXTRA, alloc_ops->align_min);
- p = alloc_ops->memalign(blkalign, size);
+ p = alloc_ops->memalign(alignment, size);
assert(p);
- /* Leave room for metadata before aligning the result. */
- mem = (uintptr_t)p + METADATA_EXTRA;
- mem = ALIGN(mem, alignment);
-
- /* Write the metadata */
- *(uintptr_t *)(mem + OFS_SLACK) = mem - (uintptr_t)p;
- *(uintptr_t *)(mem + OFS_SIZE) = size;
- return (void *)mem;
+ return (void *)p;
}
@@ -260,7 +260,7 @@ static void _free_pages(void *mem)
} while (coalesce(a, order, pfn, pfn2));
}
-void free_pages(void *mem, size_t size)
+void free_pages(void *mem)
{
spin_lock(&lock);
_free_pages(mem);
@@ -163,8 +163,8 @@ int smp_cpu_destroy(uint16_t addr)
rc = smp_cpu_stop_nolock(addr, false);
if (!rc) {
cpu = smp_cpu_from_addr(addr);
- free_pages(cpu->lowcore, 2 * PAGE_SIZE);
- free_pages(cpu->stack, 4 * PAGE_SIZE);
+ free_pages(cpu->lowcore);
+ free_pages(cpu->stack);
cpu->lowcore = (void *)-1UL;
cpu->stack = (void *)-1UL;
}
@@ -159,7 +159,7 @@ static void *vm_memalign(size_t alignment, size_t size)
return mem;
}
-static void vm_free(void *mem, size_t size)
+static void vm_free(void *mem)
{
struct metadata *m;
uintptr_t ptr, end;
@@ -143,7 +143,7 @@ static void test_store_status(void)
sigp(1, SIGP_STORE_STATUS_AT_ADDRESS, (uintptr_t)status, NULL);
while (!status->prefix) { mb(); }
report(1, "status written");
- free_pages(status, PAGE_SIZE * 2);
+ free_pages(status);
report_prefix_pop();
smp_cpu_stop(1);
@@ -276,7 +276,7 @@ static void test_reset_initial(void)
report_prefix_pop();
report(smp_cpu_stopped(1), "cpu stopped");
- free_pages(status, PAGE_SIZE);
+ free_pages(status);
report_prefix_pop();
}
Remove the size parameter from the various free functions Since the backends can handle the allocation sizes on their own, simplify the generic malloc wrappers. Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com> --- lib/alloc.h | 2 +- lib/alloc_page.h | 6 +++--- lib/alloc.c | 42 +++++------------------------------------- lib/alloc_page.c | 2 +- lib/s390x/smp.c | 4 ++-- lib/vmalloc.c | 2 +- s390x/smp.c | 4 ++-- 7 files changed, 15 insertions(+), 47 deletions(-)