diff mbox series

[v2] zswap: do not crash the kernel on decompression failure

Message ID 20250227001445.1099203-1-nphamcs@gmail.com (mailing list archive)
State New
Headers show
Series [v2] zswap: do not crash the kernel on decompression failure | expand

Commit Message

Nhat Pham Feb. 27, 2025, 12:14 a.m. UTC
Currently, we crash the kernel when a decompression failure occurs in
zswap (either because of memory corruption, or a bug in the compression
algorithm). This is overkill. We should only SIGBUS the unfortunate
process asking for the zswap entry on zswap load, and skip the corrupted
entry in zswap writeback. The former is accomplished by returning true
from zswap_load(), indicating that zswap owns the swapped out content,
but without flagging the folio as up-to-date. The process trying to swap
in the page will check for the uptodate folio flag and SIGBUS (see
do_swap_page() in mm/memory.c for more details).

See [1] for a recent upstream discussion about this.

[1]: https://lore.kernel.org/all/ZsiLElTykamcYZ6J@casper.infradead.org/

Suggested-by: Matthew Wilcox <willy@infradead.org>
Suggested-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Signed-off-by: Nhat Pham <nphamcs@gmail.com>
---
 mm/zswap.c | 94 ++++++++++++++++++++++++++++++++++++++----------------
 1 file changed, 67 insertions(+), 27 deletions(-)


base-commit: 598d34afeca6bb10554846cf157a3ded8729516c

Comments

Yosry Ahmed Feb. 27, 2025, 1:19 a.m. UTC | #1
On Wed, Feb 26, 2025 at 04:14:45PM -0800, Nhat Pham wrote:
> Currently, we crash the kernel when a decompression failure occurs in
> zswap (either because of memory corruption, or a bug in the compression
> algorithm). This is overkill. We should only SIGBUS the unfortunate
> process asking for the zswap entry on zswap load, and skip the corrupted
> entry in zswap writeback. The former is accomplished by returning true
> from zswap_load(), indicating that zswap owns the swapped out content,
> but without flagging the folio as up-to-date. The process trying to swap
> in the page will check for the uptodate folio flag and SIGBUS (see
> do_swap_page() in mm/memory.c for more details).

We should call out the extra xarray walks and their perf impact (if
any).

> 
> See [1] for a recent upstream discussion about this.
> 
> [1]: https://lore.kernel.org/all/ZsiLElTykamcYZ6J@casper.infradead.org/
> 
> Suggested-by: Matthew Wilcox <willy@infradead.org>
> Suggested-by: Yosry Ahmed <yosry.ahmed@linux.dev>
> Signed-off-by: Nhat Pham <nphamcs@gmail.com>
> ---
>  mm/zswap.c | 94 ++++++++++++++++++++++++++++++++++++++----------------
>  1 file changed, 67 insertions(+), 27 deletions(-)
> 
> diff --git a/mm/zswap.c b/mm/zswap.c
> index 6dbf31bd2218..e4a2157bbc64 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -62,6 +62,8 @@ static u64 zswap_reject_reclaim_fail;
>  static u64 zswap_reject_compress_fail;
>  /* Compressed page was too big for the allocator to (optimally) store */
>  static u64 zswap_reject_compress_poor;
> +/* Load or writeback failed due to decompression failure */
> +static u64 zswap_decompress_fail;
>  /* Store failed because underlying allocator could not get memory */
>  static u64 zswap_reject_alloc_fail;
>  /* Store failed because the entry metadata could not be allocated (rare) */
> @@ -996,11 +998,13 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
>  	return comp_ret == 0 && alloc_ret == 0;
>  }
>  
> -static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
> +static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio)
>  {
>  	struct zpool *zpool = entry->pool->zpool;
>  	struct scatterlist input, output;
>  	struct crypto_acomp_ctx *acomp_ctx;
> +	int decomp_ret;
> +	bool ret = true;
>  	u8 *src;
>  
>  	acomp_ctx = acomp_ctx_get_cpu_lock(entry->pool);
> @@ -1025,12 +1029,25 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
>  	sg_init_table(&output, 1);
>  	sg_set_folio(&output, folio, PAGE_SIZE, 0);
>  	acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
> -	BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
> -	BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
> +	decomp_ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
> +	if (decomp_ret || acomp_ctx->req->dlen != PAGE_SIZE) {
> +		ret = false;
> +		zswap_decompress_fail++;
> +		pr_alert_ratelimited(
> +			"decompression failed with returned value %d on zswap entry with swap entry value %08lx, swap type %d, and swap offset %lu. compression algorithm is %s. compressed size is %u bytes, and decompressed size is %u bytes.\n",

This is a very long line. I think we should break it into multiple
lines. I know multiline strings are frowned upon by checkpatch, by this
exist (see the warning in mem_cgroup_oom_control_write() for example),
and they are definitely better than a very long line imo.

> +			decomp_ret,
> +			entry->swpentry.val,
> +			swp_type(entry->swpentry),
> +			swp_offset(entry->swpentry),
> +			entry->pool->tfm_name,
> +			entry->length,
> +			acomp_ctx->req->dlen);
> +	}
>  
>  	if (src != acomp_ctx->buffer)
>  		zpool_unmap_handle(zpool, entry->handle);
>  	acomp_ctx_put_unlock(acomp_ctx);
> +	return ret;

Not a big deal but we could probably store the length in a local
variable and move the check here, and avoid needing 'ret'.

>  }
>  
>  /*********************************
> @@ -1060,6 +1077,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
>  	struct writeback_control wbc = {
>  		.sync_mode = WB_SYNC_NONE,
>  	};
> +	int ret = 0;
>  
>  	/* try to allocate swap cache folio */
>  	si = get_swap_device(swpentry);
> @@ -1081,8 +1099,8 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
>  	 * and freed when invalidated by the concurrent shrinker anyway.
>  	 */
>  	if (!folio_was_allocated) {
> -		folio_put(folio);
> -		return -EEXIST;
> +		ret = -EEXIST;
> +		goto put_folio;
>  	}
>  
>  	/*
> @@ -1095,14 +1113,17 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
>  	 * be dereferenced.
>  	 */
>  	tree = swap_zswap_tree(swpentry);
> -	if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) {
> -		delete_from_swap_cache(folio);
> -		folio_unlock(folio);
> -		folio_put(folio);
> -		return -ENOMEM;
> +	if (entry != xa_load(tree, offset)) {
> +		ret = -ENOMEM;
> +		goto delete_unlock;
> +	}
> +
> +	if (!zswap_decompress(entry, folio)) {
> +		ret = -EIO;
> +		goto delete_unlock;
>  	}
>  
> -	zswap_decompress(entry, folio);
> +	xa_erase(tree, offset);
>  
>  	count_vm_event(ZSWPWB);
>  	if (entry->objcg)
> @@ -1118,9 +1139,14 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
>  
>  	/* start writeback */
>  	__swap_writepage(folio, &wbc);
> -	folio_put(folio);
>  
> -	return 0;
> +put_folio:
> +	folio_put(folio);
> +	return ret;
> +delete_unlock:
> +	delete_from_swap_cache(folio);
> +	folio_unlock(folio);
> +	goto put_folio;

I think I suggested a way to avoid this goto in v1:
https://lore.kernel.org/lkml/Z782SPcJI8DFISRa@google.com/.

Did this not work out?

>  }
>  
>  /*********************************
> @@ -1620,6 +1646,20 @@ bool zswap_store(struct folio *folio)
>  	return ret;
>  }
>  
> +/**
> + * zswap_load() - load a page from zswap
> + * @folio: folio to load
> + *
> + * Returns: true if zswap owns the swapped out contents, false otherwise.
> + *
> + * Note that the zswap_load() return value doesn't indicate success or failure,
> + * but whether zswap owns the swapped out contents. This MUST return true if
> + * zswap does own the swapped out contents, even if it fails to write the
> + * contents to the folio. Otherwise, the caller will try to read garbage from
> + * the backend.
> + *
> + * Success is signaled by marking the folio uptodate.
> + */
>  bool zswap_load(struct folio *folio)
>  {
>  	swp_entry_t swp = folio->swap;
> @@ -1644,6 +1684,17 @@ bool zswap_load(struct folio *folio)

The comment that exists here (not visible in the diff) should be
abbreviated now that we already explained the whole uptodate thing
above, right?

>  	if (WARN_ON_ONCE(folio_test_large(folio)))
>  		return true;
>  
> +	entry = xa_load(tree, offset);
> +	if (!entry)
> +		return false;
> +

A small comment here pointing out that we are deliberatly not setting
uptodate because of the failure may make things more obvious, or do you
think that's not needed?

> +	if (!zswap_decompress(entry, folio))
> +		return true;
> +
> +	count_vm_event(ZSWPIN);
> +	if (entry->objcg)
> +		count_objcg_events(entry->objcg, ZSWPIN, 1);
> +
>  	/*
>  	 * When reading into the swapcache, invalidate our entry. The
>  	 * swapcache can be the authoritative owner of the page and
> @@ -1656,21 +1707,8 @@ bool zswap_load(struct folio *folio)
>  	 * files, which reads into a private page and may free it if
>  	 * the fault fails. We remain the primary owner of the entry.)
>  	 */
> -	if (swapcache)
> -		entry = xa_erase(tree, offset);
> -	else
> -		entry = xa_load(tree, offset);
> -
> -	if (!entry)
> -		return false;
> -
> -	zswap_decompress(entry, folio);
> -
> -	count_vm_event(ZSWPIN);
> -	if (entry->objcg)
> -		count_objcg_events(entry->objcg, ZSWPIN, 1);
> -
>  	if (swapcache) {
> +		xa_erase(tree, offset);
>  		zswap_entry_free(entry);
>  		folio_mark_dirty(folio);
>  	}
> @@ -1771,6 +1809,8 @@ static int zswap_debugfs_init(void)
>  			   zswap_debugfs_root, &zswap_reject_compress_fail);
>  	debugfs_create_u64("reject_compress_poor", 0444,
>  			   zswap_debugfs_root, &zswap_reject_compress_poor);
> +	debugfs_create_u64("decompress_fail", 0444,
> +			   zswap_debugfs_root, &zswap_decompress_fail);
>  	debugfs_create_u64("written_back_pages", 0444,
>  			   zswap_debugfs_root, &zswap_written_back_pages);
>  	debugfs_create_file("pool_total_size", 0444,
> 
> base-commit: 598d34afeca6bb10554846cf157a3ded8729516c
> -- 
> 2.43.5
Johannes Weiner Feb. 27, 2025, 4:31 a.m. UTC | #2
On Thu, Feb 27, 2025 at 01:19:31AM +0000, Yosry Ahmed wrote:
> On Wed, Feb 26, 2025 at 04:14:45PM -0800, Nhat Pham wrote:
> >  	if (WARN_ON_ONCE(folio_test_large(folio)))
> >  		return true;
> >  
> > +	entry = xa_load(tree, offset);
> > +	if (!entry)
> > +		return false;
> > +
> 
> A small comment here pointing out that we are deliberatly not setting
> uptodate because of the failure may make things more obvious, or do you
> think that's not needed?
>
> > +	if (!zswap_decompress(entry, folio))
> > +		return true;

How about an actual -ev and have this in swap_read_folio():

        ret = zswap_load(folio);
        if (ret != -ENOENT) {
                folio_unlock(folio);
                goto finish;
        }

	read from swapfile...

Then in zswap_load(), move uptodate further up like this (I had
previously suggested this):

	if (!zswap_decompress(entry, folio))
		return -EIO;

	folio_mark_uptodate(folio);

and I think it would be clear, even without or just minimal comments.
Yosry Ahmed Feb. 27, 2025, 5:44 a.m. UTC | #3
On Wed, Feb 26, 2025 at 11:31:41PM -0500, Johannes Weiner wrote:
> On Thu, Feb 27, 2025 at 01:19:31AM +0000, Yosry Ahmed wrote:
> > On Wed, Feb 26, 2025 at 04:14:45PM -0800, Nhat Pham wrote:
> > >  	if (WARN_ON_ONCE(folio_test_large(folio)))
> > >  		return true;
> > >  
> > > +	entry = xa_load(tree, offset);
> > > +	if (!entry)
> > > +		return false;
> > > +
> > 
> > A small comment here pointing out that we are deliberatly not setting
> > uptodate because of the failure may make things more obvious, or do you
> > think that's not needed?
> >
> > > +	if (!zswap_decompress(entry, folio))
> > > +		return true;
> 
> How about an actual -ev and have this in swap_read_folio():

Good idea, I was going to suggest an enum but this is simpler.

> 
>         ret = zswap_load(folio);
>         if (ret != -ENOENT) {
>                 folio_unlock(folio);
>                 goto finish;
>         }
> 
> 	read from swapfile...
> 
> Then in zswap_load(), move uptodate further up like this (I had
> previously suggested this):
> 
> 	if (!zswap_decompress(entry, folio))
> 		return -EIO;
> 
> 	folio_mark_uptodate(folio);
> 
> and I think it would be clear, even without or just minimal comments.

Another possibility is moving folio_mark_uptodate() back to
swap_read_folio(), which should make things even clearer imo as the
success/failure logic is all in one place:

	ret = zswap_load(folio);
	if (ret != -ENOENT) {
		folio_unlock(folio);
		/* Comment about not marking uptodate */
		if (!ret)
			folio_mark_uptodate();
		goto finish;
	}

or we can make it crystal clear we have 3 distinct cases:

	ret = zswap_load(folio);
	if (!ret) {
		folio_unlock(folio);
		folio_mark_uptodate();
		goto finish;
	} else if (ret != -ENOENT) {
		/* Comment about not marking uptodate */
		folio_unlock(folio);
		goto finish;
	}

WDYT?
Johannes Weiner Feb. 27, 2025, 6:16 a.m. UTC | #4
On Thu, Feb 27, 2025 at 05:44:29AM +0000, Yosry Ahmed wrote:
> On Wed, Feb 26, 2025 at 11:31:41PM -0500, Johannes Weiner wrote:
> > On Thu, Feb 27, 2025 at 01:19:31AM +0000, Yosry Ahmed wrote:
> > > On Wed, Feb 26, 2025 at 04:14:45PM -0800, Nhat Pham wrote:
> > > >  	if (WARN_ON_ONCE(folio_test_large(folio)))
> > > >  		return true;
> > > >  
> > > > +	entry = xa_load(tree, offset);
> > > > +	if (!entry)
> > > > +		return false;
> > > > +
> > > 
> > > A small comment here pointing out that we are deliberatly not setting
> > > uptodate because of the failure may make things more obvious, or do you
> > > think that's not needed?
> > >
> > > > +	if (!zswap_decompress(entry, folio))
> > > > +		return true;
> > 
> > How about an actual -ev and have this in swap_read_folio():
> 
> Good idea, I was going to suggest an enum but this is simpler.
> 
> > 
> >         ret = zswap_load(folio);
> >         if (ret != -ENOENT) {
> >                 folio_unlock(folio);
> >                 goto finish;
> >         }
> > 
> > 	read from swapfile...
> > 
> > Then in zswap_load(), move uptodate further up like this (I had
> > previously suggested this):
> > 
> > 	if (!zswap_decompress(entry, folio))
> > 		return -EIO;
> > 
> > 	folio_mark_uptodate(folio);
> > 
> > and I think it would be clear, even without or just minimal comments.
> 
> Another possibility is moving folio_mark_uptodate() back to
> swap_read_folio(), which should make things even clearer imo as the
> success/failure logic is all in one place:

That works. bdev, swapfile and zeromap set the flag in that file.

> 	ret = zswap_load(folio);
> 	if (ret != -ENOENT) {
> 		folio_unlock(folio);
> 		/* Comment about not marking uptodate */
> 		if (!ret)
> 			folio_mark_uptodate();
> 		goto finish;
> 	}

Personally, I like this one ^. The comment isn't needed IMO, as now
zswap really isn't doing anything special compared to the others.

> or we can make it crystal clear we have 3 distinct cases:
> 
> 	ret = zswap_load(folio);
> 	if (!ret) {
> 		folio_unlock(folio);
> 		folio_mark_uptodate();
> 		goto finish;
> 	} else if (ret != -ENOENT) {
> 		/* Comment about not marking uptodate */
> 		folio_unlock(folio);
> 		goto finish;
> 	}

This seems unnecessarily repetetive.
diff mbox series

Patch

diff --git a/mm/zswap.c b/mm/zswap.c
index 6dbf31bd2218..e4a2157bbc64 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -62,6 +62,8 @@  static u64 zswap_reject_reclaim_fail;
 static u64 zswap_reject_compress_fail;
 /* Compressed page was too big for the allocator to (optimally) store */
 static u64 zswap_reject_compress_poor;
+/* Load or writeback failed due to decompression failure */
+static u64 zswap_decompress_fail;
 /* Store failed because underlying allocator could not get memory */
 static u64 zswap_reject_alloc_fail;
 /* Store failed because the entry metadata could not be allocated (rare) */
@@ -996,11 +998,13 @@  static bool zswap_compress(struct page *page, struct zswap_entry *entry,
 	return comp_ret == 0 && alloc_ret == 0;
 }
 
-static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
+static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio)
 {
 	struct zpool *zpool = entry->pool->zpool;
 	struct scatterlist input, output;
 	struct crypto_acomp_ctx *acomp_ctx;
+	int decomp_ret;
+	bool ret = true;
 	u8 *src;
 
 	acomp_ctx = acomp_ctx_get_cpu_lock(entry->pool);
@@ -1025,12 +1029,25 @@  static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
 	sg_init_table(&output, 1);
 	sg_set_folio(&output, folio, PAGE_SIZE, 0);
 	acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
-	BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
-	BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
+	decomp_ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
+	if (decomp_ret || acomp_ctx->req->dlen != PAGE_SIZE) {
+		ret = false;
+		zswap_decompress_fail++;
+		pr_alert_ratelimited(
+			"decompression failed with returned value %d on zswap entry with swap entry value %08lx, swap type %d, and swap offset %lu. compression algorithm is %s. compressed size is %u bytes, and decompressed size is %u bytes.\n",
+			decomp_ret,
+			entry->swpentry.val,
+			swp_type(entry->swpentry),
+			swp_offset(entry->swpentry),
+			entry->pool->tfm_name,
+			entry->length,
+			acomp_ctx->req->dlen);
+	}
 
 	if (src != acomp_ctx->buffer)
 		zpool_unmap_handle(zpool, entry->handle);
 	acomp_ctx_put_unlock(acomp_ctx);
+	return ret;
 }
 
 /*********************************
@@ -1060,6 +1077,7 @@  static int zswap_writeback_entry(struct zswap_entry *entry,
 	struct writeback_control wbc = {
 		.sync_mode = WB_SYNC_NONE,
 	};
+	int ret = 0;
 
 	/* try to allocate swap cache folio */
 	si = get_swap_device(swpentry);
@@ -1081,8 +1099,8 @@  static int zswap_writeback_entry(struct zswap_entry *entry,
 	 * and freed when invalidated by the concurrent shrinker anyway.
 	 */
 	if (!folio_was_allocated) {
-		folio_put(folio);
-		return -EEXIST;
+		ret = -EEXIST;
+		goto put_folio;
 	}
 
 	/*
@@ -1095,14 +1113,17 @@  static int zswap_writeback_entry(struct zswap_entry *entry,
 	 * be dereferenced.
 	 */
 	tree = swap_zswap_tree(swpentry);
-	if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) {
-		delete_from_swap_cache(folio);
-		folio_unlock(folio);
-		folio_put(folio);
-		return -ENOMEM;
+	if (entry != xa_load(tree, offset)) {
+		ret = -ENOMEM;
+		goto delete_unlock;
+	}
+
+	if (!zswap_decompress(entry, folio)) {
+		ret = -EIO;
+		goto delete_unlock;
 	}
 
-	zswap_decompress(entry, folio);
+	xa_erase(tree, offset);
 
 	count_vm_event(ZSWPWB);
 	if (entry->objcg)
@@ -1118,9 +1139,14 @@  static int zswap_writeback_entry(struct zswap_entry *entry,
 
 	/* start writeback */
 	__swap_writepage(folio, &wbc);
-	folio_put(folio);
 
-	return 0;
+put_folio:
+	folio_put(folio);
+	return ret;
+delete_unlock:
+	delete_from_swap_cache(folio);
+	folio_unlock(folio);
+	goto put_folio;
 }
 
 /*********************************
@@ -1620,6 +1646,20 @@  bool zswap_store(struct folio *folio)
 	return ret;
 }
 
+/**
+ * zswap_load() - load a page from zswap
+ * @folio: folio to load
+ *
+ * Returns: true if zswap owns the swapped out contents, false otherwise.
+ *
+ * Note that the zswap_load() return value doesn't indicate success or failure,
+ * but whether zswap owns the swapped out contents. This MUST return true if
+ * zswap does own the swapped out contents, even if it fails to write the
+ * contents to the folio. Otherwise, the caller will try to read garbage from
+ * the backend.
+ *
+ * Success is signaled by marking the folio uptodate.
+ */
 bool zswap_load(struct folio *folio)
 {
 	swp_entry_t swp = folio->swap;
@@ -1644,6 +1684,17 @@  bool zswap_load(struct folio *folio)
 	if (WARN_ON_ONCE(folio_test_large(folio)))
 		return true;
 
+	entry = xa_load(tree, offset);
+	if (!entry)
+		return false;
+
+	if (!zswap_decompress(entry, folio))
+		return true;
+
+	count_vm_event(ZSWPIN);
+	if (entry->objcg)
+		count_objcg_events(entry->objcg, ZSWPIN, 1);
+
 	/*
 	 * When reading into the swapcache, invalidate our entry. The
 	 * swapcache can be the authoritative owner of the page and
@@ -1656,21 +1707,8 @@  bool zswap_load(struct folio *folio)
 	 * files, which reads into a private page and may free it if
 	 * the fault fails. We remain the primary owner of the entry.)
 	 */
-	if (swapcache)
-		entry = xa_erase(tree, offset);
-	else
-		entry = xa_load(tree, offset);
-
-	if (!entry)
-		return false;
-
-	zswap_decompress(entry, folio);
-
-	count_vm_event(ZSWPIN);
-	if (entry->objcg)
-		count_objcg_events(entry->objcg, ZSWPIN, 1);
-
 	if (swapcache) {
+		xa_erase(tree, offset);
 		zswap_entry_free(entry);
 		folio_mark_dirty(folio);
 	}
@@ -1771,6 +1809,8 @@  static int zswap_debugfs_init(void)
 			   zswap_debugfs_root, &zswap_reject_compress_fail);
 	debugfs_create_u64("reject_compress_poor", 0444,
 			   zswap_debugfs_root, &zswap_reject_compress_poor);
+	debugfs_create_u64("decompress_fail", 0444,
+			   zswap_debugfs_root, &zswap_decompress_fail);
 	debugfs_create_u64("written_back_pages", 0444,
 			   zswap_debugfs_root, &zswap_written_back_pages);
 	debugfs_create_file("pool_total_size", 0444,