@@ -1380,13 +1380,14 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
__GFP_KSWAPD_RECLAIM |
__GFP_NOWARN |
__GFP_HIGHMEM |
- __GFP_MOVABLE);
+ __GFP_MOVABLE |
+ __GFP_CMA);
if (!handle) {
zcomp_stream_put(zram->comp);
atomic64_inc(&zram->stats.writestall);
handle = zs_malloc(zram->mem_pool, comp_len,
GFP_NOIO | __GFP_HIGHMEM |
- __GFP_MOVABLE);
+ __GFP_MOVABLE | __GFP_CMA);
if (handle)
goto compress_again;
return -ENOMEM;
@@ -351,7 +351,7 @@ static void destroy_cache(struct zs_pool *pool)
static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
{
return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
- gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
+ gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE|__GFP_CMA));
}
static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
@@ -362,7 +362,7 @@ static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
{
return kmem_cache_alloc(pool->zspage_cachep,
- flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
+ flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE|__GFP_CMA));
}
static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)