@@ -181,6 +181,16 @@ static ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_
iov_iter_advance(iter, orig_count);
}
+ /* If we're going to do decryption or decompression, we're going to
+ * need a bounce buffer - and if the data is misaligned for the crypto
+ * algorithm, we decrypt in place and then copy.
+ */
+ if (test_bit(NETFS_RREQ_CONTENT_ENCRYPTION, &rreq->flags)) {
+ if (!netfs_is_crypto_aligned(rreq, iter))
+ __set_bit(NETFS_RREQ_CRYPT_IN_PLACE, &rreq->flags);
+ __set_bit(NETFS_RREQ_USE_BOUNCE_BUFFER, &rreq->flags);
+ }
+
/* If we're going to use a bounce buffer, we need to set it up. We
* will then need to pad the request out to the minimum block size.
*/
@@ -198,6 +198,23 @@ static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr)
netfs_group->free(netfs_group);
}
+/*
+ * Check to see if a buffer aligns with the crypto unit block size. If it
+ * doesn't the crypto layer is going to copy all the data - in which case
+ * relying on the crypto op for a free copy is pointless.
+ */
+static inline bool netfs_is_crypto_aligned(struct netfs_io_request *rreq,
+ struct iov_iter *iter)
+{
+ struct netfs_inode *ctx = netfs_inode(rreq->inode);
+ unsigned long align, mask = (1UL << ctx->min_bshift) - 1;
+
+ if (!ctx->min_bshift)
+ return true;
+ align = iov_iter_alignment(iter);
+ return (align & mask) == 0;
+}
+
/*
* fscache-cache.c
*/
Support unbuffered and direct I/O reads from an encrypted file. This may require making a larger read than is required into a bounce buffer and copying out the required bits. We don't decrypt in-place in the user buffer lest userspace interfere and muck up the decryption. Signed-off-by: David Howells <dhowells@redhat.com> cc: Jeff Layton <jlayton@kernel.org> cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org --- fs/netfs/direct_read.c | 10 ++++++++++ fs/netfs/internal.h | 17 +++++++++++++++++ 2 files changed, 27 insertions(+)