@@ -30,11 +30,12 @@
*/
bool fscrypt_decrypt_bio(struct bio *bio)
{
- struct folio_iter fi;
+ struct bvec_iter_all iter;
+ struct folio_seg fs;
- bio_for_each_folio_all(fi, bio) {
- int err = fscrypt_decrypt_pagecache_blocks(fi.folio, fi.length,
- fi.offset);
+ bio_for_each_folio_all(fs, bio, iter) {
+ int err = fscrypt_decrypt_pagecache_blocks(fs.fs_folio, fs.fs_len,
+ fs.fs_offset);
if (err) {
bio->bi_status = errno_to_blk_status(err);
@@ -99,14 +99,15 @@ static void buffer_io_error(struct buffer_head *bh)
static void ext4_finish_bio(struct bio *bio)
{
- struct folio_iter fi;
+ struct bvec_iter_all iter;
+ struct folio_seg fs;
- bio_for_each_folio_all(fi, bio) {
- struct folio *folio = fi.folio;
+ bio_for_each_folio_all(fs, bio, iter) {
+ struct folio *folio = fs.fs_folio;
struct folio *io_folio = NULL;
struct buffer_head *bh, *head;
- size_t bio_start = fi.offset;
- size_t bio_end = bio_start + fi.length;
+ size_t bio_start = fs.fs_offset;
+ size_t bio_end = bio_start + fs.fs_len;
unsigned under_io = 0;
unsigned long flags;
@@ -68,10 +68,11 @@ struct bio_post_read_ctx {
static void __read_end_io(struct bio *bio)
{
- struct folio_iter fi;
+ struct bvec_iter_all iter;
+ struct folio_seg fs;
- bio_for_each_folio_all(fi, bio) {
- struct folio *folio = fi.folio;
+ bio_for_each_folio_all(fs, bio, iter) {
+ struct folio *folio = fs.fs_folio;
if (bio->bi_status)
folio_clear_uptodate(folio);
@@ -187,10 +187,11 @@ static void iomap_finish_folio_read(struct folio *folio, size_t offset,
static void iomap_read_end_io(struct bio *bio)
{
int error = blk_status_to_errno(bio->bi_status);
- struct folio_iter fi;
+ struct bvec_iter_all iter;
+ struct folio_seg fs;
- bio_for_each_folio_all(fi, bio)
- iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
+ bio_for_each_folio_all(fs, bio, iter)
+ iomap_finish_folio_read(fs.fs_folio, fs.fs_offset, fs.fs_len, error);
bio_put(bio);
}
@@ -1321,7 +1322,8 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error)
u32 folio_count = 0;
for (bio = &ioend->io_inline_bio; bio; bio = next) {
- struct folio_iter fi;
+ struct bvec_iter_all iter;
+ struct folio_seg fs;
/*
* For the last bio, bi_private points to the ioend, so we
@@ -1333,8 +1335,8 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error)
next = bio->bi_private;
/* walk all folios in bio, ending page IO on them */
- bio_for_each_folio_all(fi, bio) {
- iomap_finish_folio_write(inode, fi.folio, fi.length,
+ bio_for_each_folio_all(fs, bio, iter) {
+ iomap_finish_folio_write(inode, fs.fs_folio, fs.fs_len,
error);
folio_count++;
}
@@ -45,15 +45,16 @@
*/
static void mpage_read_end_io(struct bio *bio)
{
- struct folio_iter fi;
+ struct bvec_iter_all iter;
+ struct folio_seg fs;
int err = blk_status_to_errno(bio->bi_status);
- bio_for_each_folio_all(fi, bio) {
+ bio_for_each_folio_all(fs, bio, iter) {
if (err)
- folio_set_error(fi.folio);
+ folio_set_error(fs.fs_folio);
else
- folio_mark_uptodate(fi.folio);
- folio_unlock(fi.folio);
+ folio_mark_uptodate(fs.fs_folio);
+ folio_unlock(fs.fs_folio);
}
bio_put(bio);
@@ -61,15 +62,16 @@ static void mpage_read_end_io(struct bio *bio)
static void mpage_write_end_io(struct bio *bio)
{
- struct folio_iter fi;
+ struct bvec_iter_all iter;
+ struct folio_seg fs;
int err = blk_status_to_errno(bio->bi_status);
- bio_for_each_folio_all(fi, bio) {
+ bio_for_each_folio_all(fs, bio, iter) {
if (err) {
- folio_set_error(fi.folio);
- mapping_set_error(fi.folio->mapping, err);
+ folio_set_error(fs.fs_folio);
+ mapping_set_error(fs.fs_folio->mapping, err);
}
- folio_end_writeback(fi.folio);
+ folio_end_writeback(fs.fs_folio);
}
bio_put(bio);
@@ -340,7 +340,8 @@ void fsverity_verify_bio(struct bio *bio)
struct inode *inode = bio_first_page_all(bio)->mapping->host;
struct fsverity_info *vi = inode->i_verity_info;
struct ahash_request *req;
- struct folio_iter fi;
+ struct bvec_iter_all iter;
+ struct folio_seg fs;
unsigned long max_ra_pages = 0;
/* This allocation never fails, since it's mempool-backed. */
@@ -359,9 +360,9 @@ void fsverity_verify_bio(struct bio *bio)
max_ra_pages = bio->bi_iter.bi_size >> (PAGE_SHIFT + 2);
}
- bio_for_each_folio_all(fi, bio) {
- if (!verify_data_blocks(inode, vi, req, fi.folio, fi.length,
- fi.offset, max_ra_pages)) {
+ bio_for_each_folio_all(fs, bio, iter) {
+ if (!verify_data_blocks(inode, vi, req, fs.fs_folio, fs.fs_len,
+ fs.fs_offset, max_ra_pages)) {
bio->bi_status = BLK_STS_IOERR;
break;
}
@@ -163,6 +163,46 @@ static inline void bio_advance(struct bio *bio, unsigned int nbytes)
#define bio_for_each_segment(bvl, bio, iter) \
__bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
+struct folio_seg {
+ struct folio *fs_folio;
+ size_t fs_offset;
+ size_t fs_len;
+};
+
+/*
+ * Returns the initial portion of @bv that points to a single folio (or all of
+ * @bv, if it pointsn to a single folio)
+ */
+static inline struct folio_seg biovec_to_folioseg(struct bio_vec bv)
+{
+
+ struct folio *folio = page_folio(bv.bv_page);
+ size_t offset = (folio_page_idx(folio, bv.bv_page) << PAGE_SHIFT) +
+ bv.bv_offset;
+ size_t len = min_t(size_t, folio_size(folio) - offset, bv.bv_len);
+
+ return (struct folio_seg) {
+ .fs_folio = folio,
+ .fs_offset = offset,
+ .fs_len = len,
+ };
+}
+
+static inline struct folio_seg bio_iter_iovec_folio(struct bio *bio,
+ struct bvec_iter iter)
+{
+ return biovec_to_folioseg(bio_iter_iovec(bio, iter));
+}
+
+#define __bio_for_each_folio(fs, bio, iter, start) \
+ for (iter = (start); \
+ (iter).bi_size && \
+ ((fs = bio_iter_iovec_folio((bio), (iter))), 1); \
+ bio_advance_iter_single((bio), &(iter), (fs).fs_len))
+
+#define bio_for_each_folio(fs, bio, iter) \
+ __bio_for_each_folio(fs, bio, iter, (bio)->bi_iter)
+
#define __bio_for_each_bvec(bvl, bio, iter, start) \
for (iter = (start); \
(iter).bi_size && \
@@ -271,59 +311,22 @@ static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
return &bio->bi_io_vec[bio->bi_vcnt - 1];
}
-/**
- * struct folio_iter - State for iterating all folios in a bio.
- * @folio: The current folio we're iterating. NULL after the last folio.
- * @offset: The byte offset within the current folio.
- * @length: The number of bytes in this iteration (will not cross folio
- * boundary).
- */
-struct folio_iter {
- struct folio *folio;
- size_t offset;
- size_t length;
- /* private: for use by the iterator */
- struct folio *_next;
- size_t _seg_count;
- int _i;
-};
-
-static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
- int i)
+static inline struct folio_seg bio_folio_iter_all_peek(const struct bio *bio,
+ const struct bvec_iter_all *iter)
{
- struct bio_vec *bvec = bio_first_bvec_all(bio) + i;
-
- fi->folio = page_folio(bvec->bv_page);
- fi->offset = bvec->bv_offset +
- PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
- fi->_seg_count = bvec->bv_len;
- fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count);
- fi->_next = folio_next(fi->folio);
- fi->_i = i;
-}
-
-static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
-{
- fi->_seg_count -= fi->length;
- if (fi->_seg_count) {
- fi->folio = fi->_next;
- fi->offset = 0;
- fi->length = min(folio_size(fi->folio), fi->_seg_count);
- fi->_next = folio_next(fi->folio);
- } else if (fi->_i + 1 < bio->bi_vcnt) {
- bio_first_folio(fi, bio, fi->_i + 1);
- } else {
- fi->folio = NULL;
- }
+ return biovec_to_folioseg(__bvec_iter_all_peek(bio->bi_io_vec, iter));
}
/**
* bio_for_each_folio_all - Iterate over each folio in a bio.
- * @fi: struct folio_iter which is updated for each folio.
+ * @fi: struct bio_folio_iter_all which is updated for each folio.
* @bio: struct bio to iterate over.
*/
-#define bio_for_each_folio_all(fi, bio) \
- for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio))
+#define bio_for_each_folio_all(fs, bio, iter) \
+ for (bvec_iter_all_init(&iter); \
+ iter.idx < bio->bi_vcnt && \
+ ((fs = bio_folio_iter_all_peek(bio, &iter)), true); \
+ bio_iter_all_advance((bio), &iter, fs.fs_len))
enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
@@ -205,8 +205,8 @@ static inline void bvec_iter_all_init(struct bvec_iter_all *iter_all)
iter_all->idx = 0;
}
-static inline struct bio_vec bvec_iter_all_peek(const struct bio_vec *bvec,
- struct bvec_iter_all *iter)
+static inline struct bio_vec __bvec_iter_all_peek(const struct bio_vec *bvec,
+ const struct bvec_iter_all *iter)
{
struct bio_vec bv = bvec[iter->idx];
@@ -215,8 +215,15 @@ static inline struct bio_vec bvec_iter_all_peek(const struct bio_vec *bvec,
bv.bv_page += bv.bv_offset >> PAGE_SHIFT;
bv.bv_offset &= ~PAGE_MASK;
- bv.bv_len = min_t(unsigned, PAGE_SIZE - bv.bv_offset, bv.bv_len);
+ return bv;
+}
+
+static inline struct bio_vec bvec_iter_all_peek(const struct bio_vec *bvec,
+ const struct bvec_iter_all *iter)
+{
+ struct bio_vec bv = __bvec_iter_all_peek(bvec, iter);
+ bv.bv_len = min_t(unsigned, PAGE_SIZE - bv.bv_offset, bv.bv_len);
return bv;
}