@@ -486,14 +486,6 @@ static struct size_class *zspage_class(struct zs_pool *pool,
return pool->size_class[zspage->class];
}
-static void set_zspage_mapping(struct zspage *zspage,
- unsigned int class_idx,
- int fullness)
-{
- zspage->class = class_idx;
- zspage->fullness = fullness;
-}
-
/*
* zsmalloc divides the pool into various size classes where each
* class maintains a list of zspages where each zspage is divided
@@ -688,6 +680,7 @@ static void insert_zspage(struct size_class *class,
{
class_stat_inc(class, fullness, 1);
list_add(&zspage->list, &class->fullness_list[fullness]);
+ zspage->fullness = fullness;
}
/*
@@ -725,7 +718,6 @@ static int fix_fullness_group(struct size_class *class, struct zspage *zspage)
remove_zspage(class, zspage, currfg);
insert_zspage(class, zspage, newfg);
- set_zspage_mapping(zspage, class_idx, newfg);
out:
return newfg;
}
@@ -1005,6 +997,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
create_page_chain(class, zspage, pages);
init_zspage(class, zspage);
zspage->pool = pool;
+ zspage->class = class->index;
return zspage;
}
@@ -1397,7 +1390,6 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
obj = obj_malloc(pool, zspage, handle);
newfg = get_fullness_group(class, zspage);
insert_zspage(class, zspage, newfg);
- set_zspage_mapping(zspage, class->index, newfg);
record_obj(handle, obj);
atomic_long_add(class->pages_per_zspage, &pool->pages_allocated);
class_stat_inc(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
@@ -1655,7 +1647,6 @@ static int putback_zspage(struct size_class *class, struct zspage *zspage)
fullness = get_fullness_group(class, zspage);
insert_zspage(class, zspage, fullness);
- set_zspage_mapping(zspage, class->index, fullness);
return fullness;
}
We only need to update zspage->fullness when insert_zspage(), since zspage->class is never changed after allocated. Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com> --- mm/zsmalloc.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-)