From patchwork Fri Jul 23 17:41:31 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gao Xiang X-Patchwork-Id: 12396501 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.7 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,UNPARSEABLE_RELAY,URIBL_BLOCKED, USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 3290DC4338F for ; Fri, 23 Jul 2021 17:41:44 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 03E0160E0C for ; Fri, 23 Jul 2021 17:41:44 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231590AbhGWRBJ (ORCPT ); Fri, 23 Jul 2021 13:01:09 -0400 Received: from out30-45.freemail.mail.aliyun.com ([115.124.30.45]:45206 "EHLO out30-45.freemail.mail.aliyun.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230094AbhGWRBI (ORCPT ); Fri, 23 Jul 2021 13:01:08 -0400 X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R561e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=e01e04400;MF=hsiangkao@linux.alibaba.com;NM=1;PH=DS;RN=9;SR=0;TI=SMTPD_---0UgjVCmC_1627062093; Received: from e18g09479.et15sqa.tbsite.net(mailfrom:hsiangkao@linux.alibaba.com fp:SMTPD_---0UgjVCmC_1627062093) by smtp.aliyun-inc.com(127.0.0.1); Sat, 24 Jul 2021 01:41:38 +0800 From: Gao Xiang To: linux-erofs@lists.ozlabs.org, linux-fsdevel@vger.kernel.org Cc: LKML , Gao Xiang , Christoph Hellwig , "Darrick J . Wong" , Matthew Wilcox , Andreas Gruenbacher , Huang Jianan Subject: [PATCH v7] iomap: make inline data support more flexible Date: Sat, 24 Jul 2021 01:41:31 +0800 Message-Id: <20210723174131.180813-1-hsiangkao@linux.alibaba.com> X-Mailer: git-send-email 2.24.4 MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-fsdevel@vger.kernel.org Add support for reading inline data content into the page cache from nonzero page-aligned file offsets. This enables the EROFS tailpacking mode where the last few bytes of the file are stored right after the inode. The buffered write path remains untouched since EROFS cannot be used for testing. It'd be better to be implemented if upcoming real users care and provide a real pattern rather than leave untested dead code around. Cc: Christoph Hellwig Cc: Darrick J. Wong Cc: Matthew Wilcox Cc: Andreas Gruenbacher Tested-by: Huang Jianan # erofs Signed-off-by: Gao Xiang Reviewed-by: Matthew Wilcox (Oracle) Tested-by: Huang Jianan # erofs Signed-off-by: Gao Xiang Reviewed-by: Joseph Qi Signed-off-by: Andreas Gruenbacher --- v6: https://lore.kernel.org/r/20210722031729.51628-1-hsiangkao@linux.alibaba.com changes since v6: - based on Christoph's reply; - update commit message suggested by Darrick; - disable buffered write path until some real fs users. fs/iomap/buffered-io.c | 42 ++++++++++++++++++++++++++---------------- fs/iomap/direct-io.c | 10 ++++++---- include/linux/iomap.h | 14 ++++++++++++++ 3 files changed, 46 insertions(+), 20 deletions(-) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 87ccb3438bec..f351e1f9e3f6 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -205,25 +205,29 @@ struct iomap_readpage_ctx { struct readahead_control *rac; }; -static void -iomap_read_inline_data(struct inode *inode, struct page *page, - struct iomap *iomap) +static int iomap_read_inline_data(struct inode *inode, struct page *page, + struct iomap *iomap, loff_t pos) { - size_t size = i_size_read(inode); + size_t size = iomap->length + iomap->offset - pos; void *addr; if (PageUptodate(page)) - return; + return PAGE_SIZE; - BUG_ON(page_has_private(page)); - BUG_ON(page->index); - BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data)); + /* inline data must start page aligned in the file */ + if (WARN_ON_ONCE(offset_in_page(pos))) + return -EIO; + if (WARN_ON_ONCE(!iomap_inline_data_size_valid(iomap))) + return -EIO; + if (WARN_ON_ONCE(page_has_private(page))) + return -EIO; addr = kmap_atomic(page); - memcpy(addr, iomap->inline_data, size); + memcpy(addr, iomap_inline_buf(iomap, pos), size); memset(addr + size, 0, PAGE_SIZE - size); kunmap_atomic(addr); SetPageUptodate(page); + return PAGE_SIZE; } static inline bool iomap_block_needs_zeroing(struct inode *inode, @@ -246,11 +250,8 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data, unsigned poff, plen; sector_t sector; - if (iomap->type == IOMAP_INLINE) { - WARN_ON_ONCE(pos); - iomap_read_inline_data(inode, page, iomap); - return PAGE_SIZE; - } + if (iomap->type == IOMAP_INLINE) + return iomap_read_inline_data(inode, page, iomap, pos); /* zero post-eof blocks as the page may be mapped */ iop = iomap_page_create(inode, page); @@ -589,6 +590,15 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags, return 0; } +static int iomap_write_begin_inline(struct inode *inode, + struct page *page, struct iomap *srcmap) +{ + /* needs more work for the tailpacking case, disable for now */ + if (WARN_ON_ONCE(srcmap->offset != 0)) + return -EIO; + return iomap_read_inline_data(inode, page, srcmap, 0); +} + static int iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags, struct page **pagep, struct iomap *iomap, struct iomap *srcmap) @@ -618,14 +628,14 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags, } if (srcmap->type == IOMAP_INLINE) - iomap_read_inline_data(inode, page, srcmap); + status = iomap_write_begin_inline(inode, page, srcmap); else if (iomap->flags & IOMAP_F_BUFFER_HEAD) status = __block_write_begin_int(page, pos, len, NULL, srcmap); else status = __iomap_write_begin(inode, pos, len, flags, page, srcmap); - if (unlikely(status)) + if (unlikely(status < 0)) goto out_unlock; *pagep = page; diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index 9398b8c31323..a6aaea2764a5 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -378,23 +378,25 @@ iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length, struct iomap_dio *dio, struct iomap *iomap) { struct iov_iter *iter = dio->submit.iter; + void *dst = iomap_inline_buf(iomap, pos); size_t copied; - BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data)); + if (WARN_ON_ONCE(!iomap_inline_data_size_valid(iomap))) + return -EIO; if (dio->flags & IOMAP_DIO_WRITE) { loff_t size = inode->i_size; if (pos > size) - memset(iomap->inline_data + size, 0, pos - size); - copied = copy_from_iter(iomap->inline_data + pos, length, iter); + memset(iomap_inline_buf(iomap, size), 0, pos - size); + copied = copy_from_iter(dst, length, iter); if (copied) { if (pos + copied > size) i_size_write(inode, pos + copied); mark_inode_dirty(inode); } } else { - copied = copy_to_iter(iomap->inline_data + pos, length, iter); + copied = copy_to_iter(dst, length, iter); } dio->size += copied; return copied; diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 479c1da3e221..56b118c6d05c 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -97,6 +97,20 @@ iomap_sector(struct iomap *iomap, loff_t pos) return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT; } +static inline void *iomap_inline_buf(const struct iomap *iomap, loff_t pos) +{ + return iomap->inline_data - iomap->offset + pos; +} + +/* + * iomap->inline_data is a potentially kmapped page, ensure it never crosses a + * page boundary. + */ +static inline bool iomap_inline_data_size_valid(const struct iomap *iomap) +{ + return iomap->length <= PAGE_SIZE - offset_in_page(iomap->inline_data); +} + /* * When a filesystem sets page_ops in an iomap mapping it returns, page_prepare * and page_done will be called for each page written to. This only applies to