@@ -33,21 +33,13 @@ static struct mp_block *mem_pool_alloc_block(struct mem_pool *mem_pool, size_t b
return p;
}
-void mem_pool_init(struct mem_pool **mem_pool, size_t initial_size)
+void mem_pool_init(struct mem_pool *pool, size_t initial_size)
{
- struct mem_pool *pool;
-
- if (*mem_pool)
- return;
-
- pool = xcalloc(1, sizeof(*pool));
-
+ memset(pool, 0, sizeof(*pool));
pool->block_alloc = BLOCK_GROWTH_SIZE;
if (initial_size > 0)
mem_pool_alloc_block(pool, initial_size, NULL);
-
- *mem_pool = pool;
}
void mem_pool_discard(struct mem_pool *mem_pool, int invalidate_memory)
@@ -66,7 +58,8 @@ void mem_pool_discard(struct mem_pool *mem_pool, int invalidate_memory)
free(block_to_free);
}
- free(mem_pool);
+ mem_pool->mp_block = NULL;
+ mem_pool->pool_alloc = 0;
}
void *mem_pool_alloc(struct mem_pool *mem_pool, size_t len)
@@ -24,10 +24,10 @@ struct mem_pool {
/*
* Initialize mem_pool with specified initial size.
*/
-void mem_pool_init(struct mem_pool **mem_pool, size_t initial_size);
+void mem_pool_init(struct mem_pool *pool, size_t initial_size);
/*
- * Discard a memory pool and free all the memory it is responsible for.
+ * Discard all the memory the memory pool is responsible for.
*/
void mem_pool_discard(struct mem_pool *mem_pool, int invalidate_memory);
@@ -89,8 +89,10 @@ static struct mem_pool *find_mem_pool(struct index_state *istate)
else
pool_ptr = &istate->ce_mem_pool;
- if (!*pool_ptr)
- mem_pool_init(pool_ptr, 0);
+ if (!*pool_ptr) {
+ *pool_ptr = xmalloc(sizeof(**pool_ptr));
+ mem_pool_init(*pool_ptr, 0);
+ }
return *pool_ptr;
}
@@ -2006,11 +2008,12 @@ static unsigned long load_all_cache_entries(struct index_state *istate,
{
unsigned long consumed;
+ istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
if (istate->version == 4) {
- mem_pool_init(&istate->ce_mem_pool,
+ mem_pool_init(istate->ce_mem_pool,
estimate_cache_size_from_compressed(istate->cache_nr));
} else {
- mem_pool_init(&istate->ce_mem_pool,
+ mem_pool_init(istate->ce_mem_pool,
estimate_cache_size(mmap_size, istate->cache_nr));
}
@@ -2070,7 +2073,8 @@ static unsigned long load_cache_entries_threaded(struct index_state *istate, con
if (istate->name_hash_initialized)
BUG("the name hash isn't thread safe");
- mem_pool_init(&istate->ce_mem_pool, 0);
+ istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
+ mem_pool_init(istate->ce_mem_pool, 0);
/* ensure we have no more threads than we have blocks to process */
if (nr_threads > ieot->nr)
@@ -2097,11 +2101,12 @@ static unsigned long load_cache_entries_threaded(struct index_state *istate, con
nr = 0;
for (j = p->ieot_start; j < p->ieot_start + p->ieot_blocks; j++)
nr += p->ieot->entries[j].nr;
+ istate->ce_mem_pool = xmalloc(sizeof(*istate->ce_mem_pool));
if (istate->version == 4) {
- mem_pool_init(&p->ce_mem_pool,
+ mem_pool_init(p->ce_mem_pool,
estimate_cache_size_from_compressed(nr));
} else {
- mem_pool_init(&p->ce_mem_pool,
+ mem_pool_init(p->ce_mem_pool,
estimate_cache_size(mmap_size, nr));
}
@@ -2358,7 +2363,7 @@ int discard_index(struct index_state *istate)
if (istate->ce_mem_pool) {
mem_pool_discard(istate->ce_mem_pool, should_validate_cache_entries());
- istate->ce_mem_pool = NULL;
+ FREE_AND_NULL(istate->ce_mem_pool);
}
return 0;
@@ -79,8 +79,10 @@ void move_cache_to_base_index(struct index_state *istate)
if (si->base &&
si->base->ce_mem_pool) {
- if (!istate->ce_mem_pool)
- mem_pool_init(&istate->ce_mem_pool, 0);
+ if (!istate->ce_mem_pool) {
+ istate->ce_mem_pool = xmalloc(sizeof(struct mem_pool));
+ mem_pool_init(istate->ce_mem_pool, 0);
+ }
mem_pool_combine(istate->ce_mem_pool, istate->split_index->base->ce_mem_pool);
}