From patchwork Mon Feb 27 15:17:01 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Christoph Hellwig X-Patchwork-Id: 13153779 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 05610C64ED6 for ; Mon, 27 Feb 2023 15:17:27 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230207AbjB0PRZ (ORCPT ); Mon, 27 Feb 2023 10:17:25 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:59632 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230182AbjB0PRQ (ORCPT ); Mon, 27 Feb 2023 10:17:16 -0500 Received: from bombadil.infradead.org (bombadil.infradead.org [IPv6:2607:7c80:54:3::133]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 70CE52278D for ; Mon, 27 Feb 2023 07:17:11 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=infradead.org; s=bombadil.20210309; h=Content-Transfer-Encoding: MIME-Version:References:In-Reply-To:Message-Id:Date:Subject:Cc:To:From:Sender :Reply-To:Content-Type:Content-ID:Content-Description; bh=UOgQAwetfSGRD/0VopaU1J/T1xl8kMNXklgMeTiIkpA=; b=JzExRZADJNJ6zKrc+6niQ3hQ3u swa2AXNUr1NzVMUZMkWx83nekcp+GjMj8ZJFiuu3DFQo2nWJVBCo0HmnD+wCLkZaniFzG546hJh25 x4h1OAItCh93TnPxaqyeLCpi0CiTgUWJfwDsruMbcGhmNO7pDm43zAcUn308XQ+RIOjyzbFXy6sDB 5Ygu4HCdaqBz23SjNKW7HrU6BCvq6UVIS72IutuSyVJYnTOI5cm0nygnod4Y4FL00uvR6FMdPUCNC 23V93OkTqvVkgFCTwvwJva5S+c/8v6TAMXeYuZ4EG2MmQtxCvG92F5z+HHRNcn++efCfKEjIGKabI rxhXAvYw==; Received: from [136.36.117.140] (helo=localhost) by bombadil.infradead.org with esmtpsa (Exim 4.94.2 #2 (Red Hat Linux)) id 1pWfFU-00AAtg-Qu; Mon, 27 Feb 2023 15:17:08 +0000 From: Christoph Hellwig To: Chris Mason , Josef Bacik , David Sterba Cc: linux-btrfs@vger.kernel.org, Johannes Thumshirn Subject: [PATCH 09/12] btrfs: remove the submit_extent_page return value Date: Mon, 27 Feb 2023 08:17:01 -0700 Message-Id: <20230227151704.1224688-10-hch@lst.de> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230227151704.1224688-1-hch@lst.de> References: <20230227151704.1224688-1-hch@lst.de> MIME-Version: 1.0 X-SRS-Rewrite: SMTP reverse-path rewritten from by bombadil.infradead.org. See http://www.infradead.org/rpr.html Precedence: bulk List-ID: X-Mailing-List: linux-btrfs@vger.kernel.org submit_extent_page always returns 0 since commit d5e4377d5051 ("btrfs: split zone append bios in btrfs_submit_bio"). Change it to a void return type and remove all the unreachable error handling code in the callers. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn --- fs/btrfs/extent_io.c | 156 ++++++++++--------------------------------- 1 file changed, 35 insertions(+), 121 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 5ab4d401505891..b4d986bef2631f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1010,9 +1010,9 @@ static void alloc_new_bio(struct btrfs_inode *inode, * The mirror number for this IO should already be initizlied in * @bio_ctrl->mirror_num. */ -static int submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl, - u64 disk_bytenr, struct page *page, - size_t size, unsigned long pg_offset) +static void submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl, + u64 disk_bytenr, struct page *page, + size_t size, unsigned long pg_offset) { struct btrfs_inode *inode = BTRFS_I(page->mapping->host); unsigned int cur = pg_offset; @@ -1061,7 +1061,6 @@ static int submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl, } cur += added; } - return 0; } static int attach_extent_buffer_page(struct extent_buffer *eb, @@ -1194,7 +1193,7 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, unlock_extent(tree, start, end, NULL); btrfs_page_set_error(fs_info, page, start, PAGE_SIZE); unlock_page(page); - goto out; + return ret; } if (page->index == last_byte >> PAGE_SHIFT) { @@ -1225,8 +1224,7 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, if (IS_ERR(em)) { unlock_extent(tree, cur, end, NULL); end_page_read(page, false, cur, end + 1 - cur); - ret = PTR_ERR(em); - break; + return PTR_ERR(em); } extent_offset = cur - em->start; BUG_ON(extent_map_end(em) <= cur); @@ -1316,22 +1314,13 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, if (force_bio_submit) submit_one_bio(bio_ctrl); - ret = submit_extent_page(bio_ctrl, disk_bytenr, page, iosize, - pg_offset); - if (ret) { - /* - * We have to unlock the remaining range, or the page - * will never be unlocked. - */ - unlock_extent(tree, cur, end, NULL); - end_page_read(page, false, cur, end + 1 - cur); - goto out; - } + submit_extent_page(bio_ctrl, disk_bytenr, page, iosize, + pg_offset); cur = cur + iosize; pg_offset += iosize; } -out: - return ret; + + return 0; } int btrfs_read_folio(struct file *file, struct folio *folio) @@ -1622,19 +1611,9 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode, */ btrfs_page_clear_dirty(fs_info, page, cur, iosize); - ret = submit_extent_page(bio_ctrl, disk_bytenr, page, - iosize, cur - page_offset(page)); - if (ret) { - has_error = true; - if (!saved_ret) - saved_ret = ret; - - btrfs_page_set_error(fs_info, page, cur, iosize); - if (PageWriteback(page)) - btrfs_page_clear_writeback(fs_info, page, cur, - iosize); - } - + submit_extent_page(bio_ctrl, disk_bytenr, page, iosize, + cur - page_offset(page)); + ret = 0; cur += iosize; nr++; } @@ -2092,13 +2071,12 @@ static void prepare_eb_write(struct extent_buffer *eb) * Unlike the work in write_one_eb(), we rely completely on extent locking. * Page locking is only utilized at minimum to keep the VMM code happy. */ -static int write_one_subpage_eb(struct extent_buffer *eb, - struct btrfs_bio_ctrl *bio_ctrl) +static void write_one_subpage_eb(struct extent_buffer *eb, + struct btrfs_bio_ctrl *bio_ctrl) { struct btrfs_fs_info *fs_info = eb->fs_info; struct page *page = eb->pages[0]; bool no_dirty_ebs = false; - int ret; prepare_eb_write(eb); @@ -2114,17 +2092,8 @@ static int write_one_subpage_eb(struct extent_buffer *eb, bio_ctrl->end_io_func = end_bio_subpage_eb_writepage; - ret = submit_extent_page(bio_ctrl, eb->start, page, eb->len, - eb->start - page_offset(page)); - if (ret) { - btrfs_subpage_clear_writeback(fs_info, page, eb->start, eb->len); - set_btree_ioerr(page, eb); - unlock_page(page); - - if (atomic_dec_and_test(&eb->io_pages)) - end_extent_buffer_writeback(eb); - return -EIO; - } + submit_extent_page(bio_ctrl, eb->start, page, eb->len, + eb->start - page_offset(page)); unlock_page(page); /* * Submission finished without problem, if no range of the page is @@ -2132,15 +2101,13 @@ static int write_one_subpage_eb(struct extent_buffer *eb, */ if (no_dirty_ebs) bio_ctrl->wbc->nr_to_write--; - return ret; } -static noinline_for_stack int write_one_eb(struct extent_buffer *eb, +static noinline_for_stack void write_one_eb(struct extent_buffer *eb, struct btrfs_bio_ctrl *bio_ctrl) { u64 disk_bytenr = eb->start; int i, num_pages; - int ret = 0; prepare_eb_write(eb); @@ -2152,31 +2119,11 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, clear_page_dirty_for_io(p); set_page_writeback(p); - ret = submit_extent_page(bio_ctrl, disk_bytenr, p, - PAGE_SIZE, 0); - if (ret) { - set_btree_ioerr(p, eb); - if (PageWriteback(p)) - end_page_writeback(p); - if (atomic_sub_and_test(num_pages - i, &eb->io_pages)) - end_extent_buffer_writeback(eb); - ret = -EIO; - break; - } + submit_extent_page(bio_ctrl, disk_bytenr, p, PAGE_SIZE, 0); disk_bytenr += PAGE_SIZE; bio_ctrl->wbc->nr_to_write--; unlock_page(p); } - - if (unlikely(ret)) { - for (; i < num_pages; i++) { - struct page *p = eb->pages[i]; - clear_page_dirty_for_io(p); - unlock_page(p); - } - } - - return ret; } /* @@ -2256,10 +2203,8 @@ static int submit_eb_subpage(struct page *page, free_extent_buffer(eb); goto cleanup; } - ret = write_one_subpage_eb(eb, bio_ctrl); + write_one_subpage_eb(eb, bio_ctrl); free_extent_buffer(eb); - if (ret < 0) - goto cleanup; submitted++; } return submitted; @@ -2362,10 +2307,8 @@ static int submit_eb_page(struct page *page, btrfs_schedule_zone_finish_bg(cache, eb); btrfs_put_block_group(cache); } - ret = write_one_eb(eb, bio_ctrl); + write_one_eb(eb, bio_ctrl); free_extent_buffer(eb); - if (ret < 0) - return ret; return 1; } @@ -4386,7 +4329,7 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait, .mirror_num = mirror_num, .parent_check = check, }; - int ret = 0; + int ret; ASSERT(!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags)); ASSERT(PagePrivate(page)); @@ -4404,14 +4347,13 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait, return ret; } - ret = 0; if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags) || PageUptodate(page) || btrfs_subpage_test_uptodate(fs_info, page, eb->start, eb->len)) { set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); unlock_extent(io_tree, eb->start, eb->start + eb->len - 1, &cached_state); - return ret; + return 0; } clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); @@ -4423,27 +4365,19 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait, btrfs_subpage_clear_error(fs_info, page, eb->start, eb->len); btrfs_subpage_start_reader(fs_info, page, eb->start, eb->len); - ret = submit_extent_page(&bio_ctrl, eb->start, page, eb->len, - eb->start - page_offset(page)); - if (ret) { - /* - * In the endio function, if we hit something wrong we will - * increase the io_pages, so here we need to decrease it for - * error path. - */ - atomic_dec(&eb->io_pages); - } + submit_extent_page(&bio_ctrl, eb->start, page, eb->len, + eb->start - page_offset(page)); submit_one_bio(&bio_ctrl); - if (ret || wait != WAIT_COMPLETE) { + if (wait != WAIT_COMPLETE) { free_extent_state(cached_state); - return ret; + return 0; } wait_extent_bit(io_tree, eb->start, eb->start + eb->len - 1, EXTENT_LOCKED, &cached_state); if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) - ret = -EIO; - return ret; + return -EIO; + return 0; } int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num, @@ -4451,8 +4385,6 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num, { int i; struct page *page; - int err; - int ret = 0; int locked_pages = 0; int all_uptodate = 1; int num_pages; @@ -4526,27 +4458,9 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num, page = eb->pages[i]; if (!PageUptodate(page)) { - if (ret) { - atomic_dec(&eb->io_pages); - unlock_page(page); - continue; - } - ClearPageError(page); - err = submit_extent_page(&bio_ctrl, - page_offset(page), page, - PAGE_SIZE, 0); - if (err) { - /* - * We failed to submit the bio so it's the - * caller's responsibility to perform cleanup - * i.e unlock page/set error bit. - */ - ret = err; - SetPageError(page); - unlock_page(page); - atomic_dec(&eb->io_pages); - } + submit_extent_page(&bio_ctrl, page_offset(page), page, + PAGE_SIZE, 0); } else { unlock_page(page); } @@ -4554,17 +4468,17 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num, submit_one_bio(&bio_ctrl); - if (ret || wait != WAIT_COMPLETE) - return ret; + if (wait != WAIT_COMPLETE) + return 0; for (i = 0; i < num_pages; i++) { page = eb->pages[i]; wait_on_page_locked(page); if (!PageUptodate(page)) - ret = -EIO; + return -EIO; } - return ret; + return 0; unlock_exit: while (locked_pages > 0) { @@ -4572,7 +4486,7 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num, page = eb->pages[locked_pages]; unlock_page(page); } - return ret; + return 0; } static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,