@@ -741,6 +741,12 @@ struct cl_page {
struct cl_object *cp_obj;
/** vmpage */
struct page *cp_vmpage;
+ /**
+ * Assigned if doing direct IO, because in this case cp_vmpage is not
+ * a valid page cache page, hence the inode cannot be inferred from
+ * cp_vmpage->mapping->host.
+ */
+ struct inode *cp_inode;
/** Linkage of pages within group. Pages must be owned */
struct list_head cp_batch;
/** array of slices offset. Immutable after creation. */
@@ -48,8 +48,12 @@ int ll_set_encflags(struct inode *inode, void *encctx, u32 encctxlen,
#define llcrypt_has_encryption_key(inode) fscrypt_has_encryption_key(inode)
#define llcrypt_encrypt_pagecache_blocks(page, len, offs, gfp_flags) \
fscrypt_encrypt_pagecache_blocks(page, len, offs, gfp_flags)
+#define llcrypt_encrypt_block_inplace(inode, page, len, offs, lblk, gfp_flags) \
+ fscrypt_encrypt_block_inplace(inode, page, len, offs, lblk, gfp_flags)
#define llcrypt_decrypt_pagecache_blocks(page, len, offs) \
fscrypt_decrypt_pagecache_blocks(page, len, offs)
+#define llcrypt_decrypt_block_inplace(inode, page, len, offs, lblk_num) \
+ fscrypt_decrypt_block_inplace(inode, page, len, offs, lblk_num)
#define llcrypt_inherit_context(parent, child, fs_data, preload) \
fscrypt_inherit_context(parent, child, fs_data, preload)
#define llcrypt_get_encryption_info(inode) fscrypt_get_encryption_info(inode)
@@ -82,7 +86,11 @@ int ll_set_encflags(struct inode *inode, void *encctx, u32 encctxlen,
#define llcrypt_has_encryption_key(inode) false
#define llcrypt_encrypt_pagecache_blocks(page, len, offs, gfp_flags) \
ERR_PTR(-EOPNOTSUPP)
+#define llcrypt_encrypt_block_inplace(inode, page, len, offs, lblk, gfp_flags) \
+ -EOPNOTSUPP
#define llcrypt_decrypt_pagecache_blocks(page, len, offs) -EOPNOTSUPP
+#define llcrypt_decrypt_block_inplace(inode, page, len, offs, lblk_num) \
+ -EOPNOTSUPP
#define llcrypt_inherit_context(parent, child, fs_data, preload) -EOPNOTSUPP
#define llcrypt_get_encryption_info(inode) -EOPNOTSUPP
#define llcrypt_put_encryption_info(inode) do {} while (0)
@@ -236,7 +236,6 @@ struct ll_dio_pages {
int io_pages = 0;
size_t page_size = cl_page_size(obj);
int i;
- pgoff_t index = offset >> PAGE_SHIFT;
ssize_t rc = 0;
cl_2queue_init(queue);
@@ -257,26 +256,14 @@ struct ll_dio_pages {
page->cp_sync_io = anchor;
if (inode && IS_ENCRYPTED(inode)) {
- struct page *vmpage = cl_page_vmpage(page);
-
/* In case of Direct IO on encrypted file, we need to
- * set the correct page index, and add a reference to
- * the mapping. This is required by llcrypt to proceed
- * to encryption/decryption, because each block is
- * encrypted independently, and each block's IV is set
- * to the logical block number within the file.
+ * add a reference to the inode on the cl_page.
+ * This info is required by llcrypt to proceed
+ * to encryption/decryption.
* This is safe because we know these pages are private
- * to the thread doing the Direct IO, and despite
- * setting a mapping on the pages, cached lookups will
- * not find them.
- * Set PageChecked to detect special case of Direct IO
- * in osc_brw_fini_request().
- * Reference to the mapping and PageChecked flag are
- * removed in cl_aio_end().
+ * to the thread doing the Direct IO.
*/
- vmpage->index = index++;
- vmpage->mapping = inode->i_mapping;
- SetPageChecked(vmpage);
+ page->cp_inode = inode;
}
cl_page_list_add(&queue->c2_qin, page);
/*
@@ -1081,19 +1081,8 @@ static void cl_aio_end(const struct lu_env *env, struct cl_sync_io *anchor)
/* release pages */
while (aio->cda_pages.pl_nr > 0) {
struct cl_page *page = cl_page_list_first(&aio->cda_pages);
- struct page *vmpage = cl_page_vmpage(page);
- struct inode *inode = vmpage ? page2inode(vmpage) : NULL;
cl_page_get(page);
- /* We end up here in case of Direct IO only. For encrypted file,
- * mapping was set on pages in ll_direct_rw_pages(), so it has
- * to be cleared now before page cleanup.
- * PageChecked flag was also set there, so we clean up here.
- */
- if (inode && IS_ENCRYPTED(inode)) {
- vmpage->mapping = NULL;
- ClearPageChecked(vmpage);
- }
cl_page_list_del(env, &aio->cda_pages, page);
cl_page_delete(env, page);
cl_page_put(env, page);
@@ -231,6 +231,7 @@ struct cl_page *cl_page_alloc(const struct lu_env *env, struct cl_object *o,
cl_page->cp_vmpage = vmpage;
cl_page->cp_state = CPS_CACHED;
cl_page->cp_type = type;
+ cl_page->cp_inode = NULL;
INIT_LIST_HEAD(&cl_page->cp_batch);
lu_ref_init(&cl_page->cp_reference);
cl_object_for_each(o2, o) {
@@ -1409,8 +1409,21 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
void *short_io_buf;
const char *obd_name = cli->cl_import->imp_obd->obd_name;
struct inode *inode;
+ bool directio = false;
inode = page2inode(pga[0]->pg);
+ if (!inode) {
+ /* Try to get reference to inode from cl_page if we are
+ * dealing with direct IO, as handled pages are not
+ * actual page cache pages.
+ */
+ struct osc_async_page *oap = brw_page2oap(pga[0]);
+ struct cl_page *clpage = oap2cl_page(oap);
+
+ inode = clpage->cp_inode;
+ if (inode)
+ directio = true;
+ }
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
return -ENOMEM; /* Recoverable */
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
@@ -1435,6 +1448,8 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
bool retried = false;
bool lockedbymyself;
u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
+ struct address_space *map_orig = NULL;
+ pgoff_t index_orig;
retry_encrypt:
if (nunits & ~LUSTRE_ENCRYPTION_MASK)
@@ -1450,10 +1465,20 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
* which means only once the page is fully processed.
*/
lockedbymyself = trylock_page(pg->pg);
+ if (directio) {
+ map_orig = pg->pg->mapping;
+ pg->pg->mapping = inode->i_mapping;
+ index_orig = pg->pg->index;
+ pg->pg->index = pg->off >> PAGE_SHIFT;
+ }
data_page =
llcrypt_encrypt_pagecache_blocks(pg->pg,
nunits, 0,
GFP_NOFS);
+ if (directio) {
+ pg->pg->mapping = map_orig;
+ pg->pg->index = index_orig;
+ }
if (lockedbymyself)
unlock_page(pg->pg);
if (IS_ERR(data_page)) {
@@ -1921,6 +1946,7 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
struct ost_body *body;
u32 client_cksum = 0;
struct inode *inode;
+ unsigned int blockbits = 0, blocksize = 0;
if (rc < 0 && rc != -EDQUOT) {
DEBUG_REQ(D_INFO, req, "Failed request: rc = %d", rc);
@@ -2113,6 +2139,19 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
}
inode = page2inode(aa->aa_ppga[0]->pg);
+ if (!inode) {
+ /* Try to get reference to inode from cl_page if we are
+ * dealing with direct IO, as handled pages are not
+ * actual page cache pages.
+ */
+ struct osc_async_page *oap = brw_page2oap(aa->aa_ppga[0]);
+
+ inode = oap2cl_page(oap)->cp_inode;
+ if (inode) {
+ blockbits = inode->i_blkbits;
+ blocksize = 1 << blockbits;
+ }
+ }
if (inode && IS_ENCRYPTED(inode)) {
int idx;
@@ -2137,18 +2176,33 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
break;
}
- /* The page is already locked when we arrive
- * here, except when we deal with a twisted
- * page for specific Direct IO support, in
- * which case PageChecked flag is set on page.
- */
- if (PageChecked(pg->pg))
- lock_page(pg->pg);
- rc = llcrypt_decrypt_pagecache_blocks(pg->pg,
- LUSTRE_ENCRYPTION_UNIT_SIZE,
- offs);
- if (PageChecked(pg->pg))
- unlock_page(pg->pg);
+ if (blockbits) {
+ /* This is direct IO case. Directly call
+ * decrypt function that takes inode as
+ * input parameter. Page does not need
+ * to be locked.
+ */
+ u64 lblk_num =
+ ((u64)(pg->off >> PAGE_SHIFT) <<
+ (PAGE_SHIFT - blockbits)) +
+ (offs >> blockbits);
+ unsigned int i;
+
+ for (i = offs;
+ i < offs + LUSTRE_ENCRYPTION_UNIT_SIZE;
+ i += blocksize, lblk_num++) {
+ rc = llcrypt_decrypt_block_inplace(inode,
+ pg->pg,
+ blocksize, i,
+ lblk_num);
+ if (rc)
+ break;
+ }
+ } else {
+ rc = llcrypt_decrypt_pagecache_blocks(pg->pg,
+ LUSTRE_ENCRYPTION_UNIT_SIZE,
+ offs);
+ }
if (rc)
goto out;