@@ -117,6 +117,7 @@ static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
{
struct bio *bio;
struct bio_vec *bv;
+ struct page *first_page;
struct inode *inode;
int mirror_num;
@@ -125,13 +126,17 @@ static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
bio = bio_ctrl->bio;
bv = bio_first_bvec_all(bio);
- inode = bv->bv_page->mapping->host;
+ first_page = bio_first_page_all(bio);
+ if (fscrypt_is_bounce_page(first_page))
+ inode = fscrypt_pagecache_page(first_page)->mapping->host;
+ else
+ inode = first_page->mapping->host;
mirror_num = bio_ctrl->mirror_num;
/* Caller should ensure the bio has at least some range added */
ASSERT(bio->bi_iter.bi_size);
- btrfs_bio(bio)->file_offset = page_offset(bv->bv_page) + bv->bv_offset;
+ btrfs_bio(bio)->file_offset = page_offset(first_page) + bv->bv_offset;
if (!is_data_inode(inode))
btrfs_submit_metadata_bio(inode, bio, mirror_num);
@@ -1018,9 +1023,19 @@ static void end_bio_extent_writepage(struct btrfs_bio *bbio)
ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_segment_all(bvec, bio, iter_all) {
struct page *page = bvec->bv_page;
- struct inode *inode = page->mapping->host;
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- const u32 sectorsize = fs_info->sectorsize;
+ struct inode *inode;
+ struct btrfs_fs_info *fs_info;
+ u32 sectorsize;
+ struct page *bounce_page = NULL;
+
+ if (fscrypt_is_bounce_page(page)) {
+ bounce_page = page;
+ page = fscrypt_pagecache_page(bounce_page);
+ }
+
+ inode = page->mapping->host;
+ fs_info = btrfs_sb(inode->i_sb);
+ sectorsize = fs_info->sectorsize;
/* Our read/write should always be sector aligned. */
if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
@@ -1041,7 +1056,7 @@ static void end_bio_extent_writepage(struct btrfs_bio *bbio)
}
end_extent_writepage(page, error, start, end);
-
+ fscrypt_free_bounce_page(bounce_page);
btrfs_page_clear_writeback(fs_info, page, start, bvec->bv_len);
}
@@ -1233,6 +1248,17 @@ static void end_bio_extent_readpage(struct btrfs_bio *bbio)
}
}
+ if (likely(uptodate)) {
+ if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
+ int ret = fscrypt_decrypt_pagecache_blocks(page,
+ bvec->bv_len,
+ bvec->bv_offset);
+ if (ret) {
+ error_bitmap = (unsigned int) -1;
+ uptodate = false;
+ }
+ }
+ }
if (likely(uptodate)) {
loff_t i_size = i_size_read(inode);
pgoff_t end_index = i_size >> PAGE_SHIFT;
@@ -1567,11 +1593,29 @@ static int submit_extent_page(blk_opf_t opf,
bool force_bio_submit)
{
int ret = 0;
+ struct page *bounce_page = NULL;
struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
unsigned int cur = pg_offset;
ASSERT(bio_ctrl);
+ if ((opf & REQ_OP_MASK) == REQ_OP_WRITE &&
+ fscrypt_inode_uses_fs_layer_crypto(&inode->vfs_inode)) {
+ gfp_t gfp_flags = GFP_NOFS;
+
+ if (bio_ctrl->bio)
+ gfp_flags = GFP_NOWAIT | __GFP_NOWARN;
+ else
+ gfp_flags = GFP_NOFS;
+ bounce_page = fscrypt_encrypt_pagecache_blocks(page, size,
+ pg_offset,
+ gfp_flags);
+ if (IS_ERR(bounce_page))
+ return PTR_ERR(bounce_page);
+ page = bounce_page;
+ pg_offset = 0;
+ }
+
ASSERT(pg_offset < PAGE_SIZE && size <= PAGE_SIZE &&
pg_offset + size <= PAGE_SIZE);
@@ -681,8 +681,13 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
shash->tfm = fs_info->csum_shash;
bio_for_each_segment(bvec, bio, iter) {
- if (use_page_offsets)
- offset = page_offset(bvec.bv_page) + bvec.bv_offset;
+ if (use_page_offsets) {
+ struct page *page = bvec.bv_page;
+
+ if (fscrypt_is_bounce_page(page))
+ page = fscrypt_pagecache_page(page);
+ offset = page_offset(page) + bvec.bv_offset;
+ }
if (!ordered) {
ordered = btrfs_lookup_ordered_extent(inode, offset);
@@ -190,7 +190,38 @@ static int btrfs_fscrypt_get_extent_context(const struct inode *inode,
size_t *extent_offset,
size_t *extent_length)
{
- return len;
+ u64 offset = lblk_num << inode->i_blkbits;
+ struct extent_map *em;
+ int ret;
+
+ /* Since IO must be in progress on this extent, this must succeed */
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, PAGE_SIZE);
+ if (!em)
+ return -EINVAL;
+
+ if (em->block_start == EXTENT_MAP_HOLE) {
+ btrfs_info(BTRFS_I(inode)->root->fs_info,
+ "extent context requested for block %llu of inode %lu without an extent",
+ lblk_num, inode->i_ino);
+ free_extent_map(em);
+ return -ENOENT;
+ }
+
+ ret = ctx ? em->fscrypt_context.len : 0;
+
+ if (ctx)
+ memcpy(ctx, em->fscrypt_context.buffer,
+ em->fscrypt_context.len);
+
+ if (extent_offset)
+ *extent_offset
+ = (offset - em->start) >> inode->i_blkbits;
+
+ if (extent_length)
+ *extent_length = em->len >> inode->i_blkbits;
+
+ free_extent_map(em);
+ return ret;
}
static int btrfs_fscrypt_set_extent_context(void *extent, void *ctx,
@@ -276,9 +276,14 @@ static int check_extent_data_item(struct extent_buffer *leaf,
return -EUCLEAN;
}
- /* Compressed inline extent has no on-disk size, skip it */
- if (btrfs_file_extent_compression(leaf, fi) !=
- BTRFS_COMPRESS_NONE)
+ /*
+ * Compressed inline extent has no on-disk size; encrypted has
+ * variable size; skip them
+ */
+ if ((btrfs_file_extent_compression(leaf, fi) !=
+ BTRFS_COMPRESS_NONE) ||
+ (btrfs_file_extent_encryption(leaf, fi) !=
+ BTRFS_ENCRYPTION_NONE))
return 0;
/* Uncompressed inline extent size must match item size */