@@ -1675,6 +1675,8 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
unsigned long handle_new;
unsigned int comp_len_old;
unsigned int comp_len_new;
+ unsigned int class_index_old;
+ unsigned int class_index_new;
void *src, *dst;
int ret;
@@ -1693,6 +1695,7 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
if (ret)
return ret;
+ class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old);
/*
* Iterate the secondary comp algorithms list (in order of priority)
* and try to recompress the page.
@@ -1718,9 +1721,13 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
return ret;
}
+ class_index_new = zs_lookup_class_index(zram->mem_pool,
+ comp_len_new);
+
/* Continue until we make progress */
if (comp_len_new >= huge_class_size ||
comp_len_new >= comp_len_old ||
+ class_index_new >= class_index_old ||
(threshold && comp_len_new >= threshold)) {
zcomp_stream_put(zram->comps[prio]);
continue;
@@ -1743,7 +1750,9 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
* that would save memory, mark the object as incompressible so that
* we will not try to compress it again.
*/
- if (comp_len_new >= huge_class_size || comp_len_new >= comp_len_old) {
+ if (comp_len_new >= huge_class_size ||
+ comp_len_new >= comp_len_old ||
+ class_index_new >= class_index_old) {
zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE);
return 0;
}
@@ -55,5 +55,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
unsigned long zs_get_total_pages(struct zs_pool *pool);
unsigned long zs_compact(struct zs_pool *pool);
+unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size);
+
void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
#endif
@@ -1205,6 +1205,27 @@ static bool zspage_full(struct size_class *class, struct zspage *zspage)
return get_zspage_inuse(zspage) == class->objs_per_zspage;
}
+/**
+ * zs_lookup_class_index() - Returns index of the zsmalloc &size_class
+ * that hold objects of the provided size.
+ * @pool: zsmalloc pool to use
+ * @size: object size
+ *
+ * Context: Any context.
+ *
+ * Return: the index of the zsmalloc &size_class that hold objects of the
+ * provided size.
+ */
+unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size)
+{
+ struct size_class *class;
+
+ class = pool->size_class[get_size_class_index(size)];
+
+ return class->index;
+}
+EXPORT_SYMBOL_GPL(zs_lookup_class_index);
+
unsigned long zs_get_total_pages(struct zs_pool *pool)
{
return atomic_long_read(&pool->pages_allocated);