@@ -3,6 +3,7 @@
netfs-y := \
buffered_read.o \
buffered_write.o \
+ crypto.o \
direct_read.o \
direct_write.o \
io.o \
@@ -77,7 +77,8 @@ static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx,
if (!maybe_trouble && offset == 0 && len >= flen)
return NETFS_WHOLE_FOLIO_MODIFY;
- if (file->f_mode & FMODE_READ)
+ if (file->f_mode & FMODE_READ ||
+ test_bit(NETFS_ICTX_ENCRYPTED, &ctx->flags))
return NETFS_JUST_PREFETCH;
if (netfs_is_cache_enabled(ctx) ||
new file mode 100644
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Network filesystem content encryption support.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include "internal.h"
+
+/*
+ * Populate a scatterlist from the next bufferage of an I/O iterator.
+ */
+static int netfs_iter_to_sglist(const struct iov_iter *iter, size_t len,
+ struct scatterlist *sg, unsigned int n_sg)
+{
+ struct iov_iter tmp_iter = *iter;
+ struct sg_table sgtable = { .sgl = sg };
+ ssize_t ret;
+
+ _enter("%zx/%zx", len, iov_iter_count(iter));
+
+ sg_init_table(sg, n_sg);
+ ret = extract_iter_to_sg(&tmp_iter, len, &sgtable, n_sg, 0);
+ if (ret < 0)
+ return ret;
+ sg_mark_end(&sg[sgtable.nents - 1]);
+ return sgtable.nents;
+}
+
+/*
+ * Prepare a write request for writing. We encrypt in/into the bounce buffer.
+ */
+bool netfs_encrypt(struct netfs_io_request *wreq)
+{
+ struct netfs_inode *ctx = netfs_inode(wreq->inode);
+ struct scatterlist source_sg[16], dest_sg[16];
+ unsigned int n_dest;
+ size_t n, chunk, bsize = 1UL << ctx->crypto_bshift;
+ loff_t pos;
+ int ret;
+
+ _enter("");
+
+ trace_netfs_rreq(wreq, netfs_rreq_trace_encrypt);
+
+ pos = wreq->start;
+ n = wreq->len;
+ _debug("ENCRYPT %llx-%llx", pos, pos + n - 1);
+
+ for (; n > 0; n -= chunk, pos += chunk) {
+ chunk = min(n, bsize);
+
+ ret = netfs_iter_to_sglist(&wreq->io_iter, chunk,
+ dest_sg, ARRAY_SIZE(dest_sg));
+ if (ret < 0)
+ goto error;
+ n_dest = ret;
+
+ if (test_bit(NETFS_RREQ_CRYPT_IN_PLACE, &wreq->flags)) {
+ ret = ctx->ops->encrypt_block(wreq, pos, chunk,
+ dest_sg, n_dest,
+ dest_sg, n_dest);
+ } else {
+ ret = netfs_iter_to_sglist(&wreq->iter, chunk,
+ source_sg, ARRAY_SIZE(source_sg));
+ if (ret < 0)
+ goto error;
+ ret = ctx->ops->encrypt_block(wreq, pos, chunk,
+ source_sg, ret,
+ dest_sg, n_dest);
+ }
+
+ if (ret < 0)
+ goto error_failed;
+ }
+
+ return true;
+
+error_failed:
+ trace_netfs_failure(wreq, NULL, ret, netfs_fail_encryption);
+error:
+ wreq->error = ret;
+ return false;
+}
@@ -22,6 +22,11 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq);
int netfs_prefetch_for_write(struct file *file, struct folio *folio,
size_t offset, size_t len);
+/*
+ * crypto.c
+ */
+bool netfs_encrypt(struct netfs_io_request *wreq);
+
/*
* direct_write.c
*/
@@ -44,6 +44,8 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
refcount_set(&rreq->ref, 1);
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
+ if (test_bit(NETFS_ICTX_ENCRYPTED, &ctx->flags))
+ __set_bit(NETFS_RREQ_CONTENT_ENCRYPTION, &rreq->flags);
if (cached)
__set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
if (file && file->f_flags & O_NONBLOCK)
@@ -366,7 +366,11 @@ int netfs_begin_write(struct netfs_io_request *wreq, bool may_wait,
* background whilst we generate a list of write ops that we want to
* perform.
*/
- // TODO: Encrypt or compress the region as appropriate
+ if (test_bit(NETFS_RREQ_CONTENT_ENCRYPTION, &wreq->flags) &&
+ !netfs_encrypt(wreq)) {
+ may_wait = true;
+ goto out;
+ }
/* We need to write all of the region to the cache */
if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &wreq->flags))
@@ -378,6 +382,7 @@ int netfs_begin_write(struct netfs_io_request *wreq, bool may_wait,
if (test_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))
ctx->ops->create_write_requests(wreq, wreq->start, wreq->len);
+out:
if (atomic_dec_and_test(&wreq->nr_outstanding))
netfs_write_terminated(wreq, false);
@@ -19,6 +19,7 @@
#include <linux/pagemap.h>
#include <linux/uio.h>
+struct scatterlist;
enum netfs_sreq_ref_trace;
/*
@@ -141,7 +142,9 @@ struct netfs_inode {
unsigned long flags;
#define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */
#define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */
+#define NETFS_ICTX_ENCRYPTED 2 /* The file contents are encrypted */
unsigned char min_bshift; /* log2 min block size for bounding box or 0 */
+ unsigned char crypto_bshift; /* log2 of crypto block size */
};
/*
@@ -285,6 +288,8 @@ struct netfs_io_request {
#define NETFS_RREQ_USE_BOUNCE_BUFFER 8 /* Use bounce buffer */
#define NETFS_RREQ_WRITE_TO_CACHE 9 /* Need to write to the cache */
#define NETFS_RREQ_UPLOAD_TO_SERVER 10 /* Need to write to the server */
+#define NETFS_RREQ_CONTENT_ENCRYPTION 11 /* Content encryption is in use */
+#define NETFS_RREQ_CRYPT_IN_PLACE 12 /* Enc/dec in place in ->io_iter */
const struct netfs_request_ops *netfs_ops;
void (*cleanup)(struct netfs_io_request *req);
};
@@ -316,6 +321,11 @@ struct netfs_request_ops {
void (*create_write_requests)(struct netfs_io_request *wreq,
loff_t start, size_t len);
void (*invalidate_cache)(struct netfs_io_request *wreq);
+
+ /* Content encryption */
+ int (*encrypt_block)(struct netfs_io_request *wreq, loff_t pos, size_t len,
+ struct scatterlist *source_sg, unsigned int n_source,
+ struct scatterlist *dest_sg, unsigned int n_dest);
};
/*
@@ -464,6 +474,7 @@ static inline void netfs_inode_init(struct netfs_inode *ctx,
ctx->zero_point = ctx->remote_i_size;
ctx->flags = 0;
ctx->min_bshift = 0;
+ ctx->crypto_bshift = 0;
#if IS_ENABLED(CONFIG_FSCACHE)
ctx->cache = NULL;
#endif
@@ -41,6 +41,7 @@
EM(netfs_rreq_trace_assess, "ASSESS ") \
EM(netfs_rreq_trace_copy, "COPY ") \
EM(netfs_rreq_trace_done, "DONE ") \
+ EM(netfs_rreq_trace_encrypt, "ENCRYPT") \
EM(netfs_rreq_trace_free, "FREE ") \
EM(netfs_rreq_trace_redirty, "REDIRTY") \
EM(netfs_rreq_trace_resubmit, "RESUBMT") \
@@ -76,6 +77,7 @@
EM(netfs_fail_copy_to_cache, "copy-to-cache") \
EM(netfs_fail_dio_read_short, "dio-read-short") \
EM(netfs_fail_dio_read_zero, "dio-read-zero") \
+ EM(netfs_fail_encryption, "encryption") \
EM(netfs_fail_read, "read") \
EM(netfs_fail_short_read, "short-read") \
EM(netfs_fail_prepare_write, "prep-write") \
When dealing with an encrypted file, we gather together sufficient pages from the pagecache to constitute a logical crypto block, allocate a bounce buffer and then ask the filesystem to encrypt between the buffers. The bounce buffer is then passed to the filesystem to upload. The network filesystem must set a flag to indicate what service is desired and what the logical blocksize will be. The netfs library iterates through each block to be processed, providing a pair of scatterlists to describe the start and end buffers. Note that it should be possible in future to encrypt DIO writes also by this same mechanism. Signed-off-by: David Howells <dhowells@redhat.com> cc: Jeff Layton <jlayton@kernel.org> cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org --- fs/netfs/Makefile | 1 + fs/netfs/buffered_write.c | 3 +- fs/netfs/crypto.c | 89 ++++++++++++++++++++++++++++++++++++ fs/netfs/internal.h | 5 ++ fs/netfs/objects.c | 2 + fs/netfs/output.c | 7 ++- include/linux/netfs.h | 11 +++++ include/trace/events/netfs.h | 2 + 8 files changed, 118 insertions(+), 2 deletions(-) create mode 100644 fs/netfs/crypto.c