diff mbox series

[v2,03/14] fscrypt: adjust effective lblks based on extents

Message ID d198508a448c08103691a1649b49edfa0d4eb98e.1688927487.git.sweettea-kernel@dorminy.me (mailing list archive)
State Superseded
Headers show
Series fscrypt: add extent encryption | expand

Commit Message

Sweet Tea Dorminy July 9, 2023, 6:53 p.m. UTC
If a filesystem uses extent-based encryption, then the offset within a
file is not a constant which can be used for calculating an IV.
For instance, the same extent could be blocks 0-8 in one file, and
blocks 100-108 in another file. Instead, the block offset within the
extent must be used instead.

Update all uses of logical block offset within the file to use logical
block offset within the extent, if applicable.

Signed-off-by: Sweet Tea Dorminy <sweettea-kernel@dorminy.me>
---
 fs/crypto/crypto.c       |  3 ++-
 fs/crypto/inline_crypt.c | 24 +++++++++++++++++-------
 2 files changed, 19 insertions(+), 8 deletions(-)

Comments

Josef Bacik July 14, 2023, 6:13 p.m. UTC | #1
On Sun, Jul 09, 2023 at 02:53:36PM -0400, Sweet Tea Dorminy wrote:
> If a filesystem uses extent-based encryption, then the offset within a
> file is not a constant which can be used for calculating an IV.
> For instance, the same extent could be blocks 0-8 in one file, and
> blocks 100-108 in another file. Instead, the block offset within the
> extent must be used instead.
> 
> Update all uses of logical block offset within the file to use logical
> block offset within the extent, if applicable.
> 
> Signed-off-by: Sweet Tea Dorminy <sweettea-kernel@dorminy.me>
> ---
>  fs/crypto/crypto.c       |  3 ++-
>  fs/crypto/inline_crypt.c | 24 +++++++++++++++++-------
>  2 files changed, 19 insertions(+), 8 deletions(-)
> 
> diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
> index 1b7e375b1c6b..d75f1b3f5795 100644
> --- a/fs/crypto/crypto.c
> +++ b/fs/crypto/crypto.c
> @@ -107,8 +107,9 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
>  	struct skcipher_request *req = NULL;
>  	DECLARE_CRYPTO_WAIT(wait);
>  	struct scatterlist dst, src;
> +	u64 ci_offset = 0;
>  	struct fscrypt_info *ci =
> -		fscrypt_get_lblk_info(inode, lblk_num, NULL, NULL);
> +		fscrypt_get_lblk_info(inode, lblk_num, &ci_offset, NULL);
>  	struct crypto_skcipher *tfm = ci->ci_enc_key->tfm;
>  	int res = 0;
>  
> diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c
> index 885a2ec3d711..b3e7a5291d22 100644
> --- a/fs/crypto/inline_crypt.c
> +++ b/fs/crypto/inline_crypt.c
> @@ -267,12 +267,15 @@ void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
>  {
>  	const struct fscrypt_info *ci;
>  	u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
> +	u64 ci_offset = 0;
>  
>  	if (!fscrypt_inode_uses_inline_crypto(inode))
>  		return;
> -	ci = fscrypt_get_lblk_info(inode, first_lblk, NULL, NULL);
> +	ci = fscrypt_get_lblk_info(inode, first_lblk, &ci_offset, NULL);
> +	if (!ci)
> +		return;
>  
> -	fscrypt_generate_dun(ci, first_lblk, dun);
> +	fscrypt_generate_dun(ci, ci_offset, dun);
>  	bio_crypt_set_ctx(bio, ci->ci_enc_key->blk_key, dun, gfp_mask);
>  }
>  EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx);
> @@ -350,22 +353,23 @@ bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
>  	const struct bio_crypt_ctx *bc = bio->bi_crypt_context;
>  	u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
>  	struct fscrypt_info *ci;
> +	u64 ci_offset = 0;
>  
>  	if (!!bc != fscrypt_inode_uses_inline_crypto(inode))
>  		return false;
>  	if (!bc)
>  		return true;
>  
> -	ci = fscrypt_get_lblk_info(inode, next_lblk, NULL, NULL);
> +	ci = fscrypt_get_lblk_info(inode, next_lblk, &ci_offset, NULL);
>  	/*
>  	 * Comparing the key pointers is good enough, as all I/O for each key
>  	 * uses the same pointer.  I.e., there's currently no need to support
>  	 * merging requests where the keys are the same but the pointers differ.
>  	 */
> -	if (bc->bc_key != ci->ci_enc_key->blk_key)
> +	if (!ci || bc->bc_key != ci->ci_enc_key->blk_key)
>  		return false;
>  

This seems like an unrelated change, we weren't checking !ci before and the
behavior hasn't changed with the new code.  Thanks,

Josef
diff mbox series

Patch

diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 1b7e375b1c6b..d75f1b3f5795 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -107,8 +107,9 @@  int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
 	struct skcipher_request *req = NULL;
 	DECLARE_CRYPTO_WAIT(wait);
 	struct scatterlist dst, src;
+	u64 ci_offset = 0;
 	struct fscrypt_info *ci =
-		fscrypt_get_lblk_info(inode, lblk_num, NULL, NULL);
+		fscrypt_get_lblk_info(inode, lblk_num, &ci_offset, NULL);
 	struct crypto_skcipher *tfm = ci->ci_enc_key->tfm;
 	int res = 0;
 
diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c
index 885a2ec3d711..b3e7a5291d22 100644
--- a/fs/crypto/inline_crypt.c
+++ b/fs/crypto/inline_crypt.c
@@ -267,12 +267,15 @@  void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
 {
 	const struct fscrypt_info *ci;
 	u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
+	u64 ci_offset = 0;
 
 	if (!fscrypt_inode_uses_inline_crypto(inode))
 		return;
-	ci = fscrypt_get_lblk_info(inode, first_lblk, NULL, NULL);
+	ci = fscrypt_get_lblk_info(inode, first_lblk, &ci_offset, NULL);
+	if (!ci)
+		return;
 
-	fscrypt_generate_dun(ci, first_lblk, dun);
+	fscrypt_generate_dun(ci, ci_offset, dun);
 	bio_crypt_set_ctx(bio, ci->ci_enc_key->blk_key, dun, gfp_mask);
 }
 EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx);
@@ -350,22 +353,23 @@  bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
 	const struct bio_crypt_ctx *bc = bio->bi_crypt_context;
 	u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
 	struct fscrypt_info *ci;
+	u64 ci_offset = 0;
 
 	if (!!bc != fscrypt_inode_uses_inline_crypto(inode))
 		return false;
 	if (!bc)
 		return true;
 
-	ci = fscrypt_get_lblk_info(inode, next_lblk, NULL, NULL);
+	ci = fscrypt_get_lblk_info(inode, next_lblk, &ci_offset, NULL);
 	/*
 	 * Comparing the key pointers is good enough, as all I/O for each key
 	 * uses the same pointer.  I.e., there's currently no need to support
 	 * merging requests where the keys are the same but the pointers differ.
 	 */
-	if (bc->bc_key != ci->ci_enc_key->blk_key)
+	if (!ci || bc->bc_key != ci->ci_enc_key->blk_key)
 		return false;
 
-	fscrypt_generate_dun(ci, next_lblk, next_dun);
+	fscrypt_generate_dun(ci, ci_offset, next_dun);
 	return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun);
 }
 EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio);
@@ -460,6 +464,8 @@  u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks)
 {
 	const struct fscrypt_info *ci;
 	u32 dun;
+	u64 ci_offset = 0;
+	u64 extent_len = 0;
 
 	if (!fscrypt_inode_uses_inline_crypto(inode))
 		return nr_blocks;
@@ -467,14 +473,18 @@  u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks)
 	if (nr_blocks <= 1)
 		return nr_blocks;
 
-	ci = fscrypt_get_lblk_info(inode, lblk, NULL, NULL);
+	ci = fscrypt_get_lblk_info(inode, lblk, &ci_offset, &extent_len);
+
+	/* Spanning an extent boundary will change the DUN */
+	nr_blocks = min_t(u64, nr_blocks, extent_len);
+
 	if (!(fscrypt_policy_flags(&ci->ci_policy) &
 	      FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))
 		return nr_blocks;
 
 	/* With IV_INO_LBLK_32, the DUN can wrap around from U32_MAX to 0. */
 
-	dun = ci->ci_hashed_ino + lblk;
+	dun = ci->ci_hashed_ino + ci_offset;
 
 	return min_t(u64, nr_blocks, (u64)U32_MAX + 1 - dun);
 }