@@ -903,9 +903,8 @@ static inline bool bio_full(struct bio *bio, unsigned len)
return false;
}
-static inline bool page_is_mergeable(const struct bio_vec *bv,
- struct page *page, unsigned int len, unsigned int off,
- bool *same_page)
+static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page,
+ unsigned int len, unsigned int off, bool *same_page)
{
size_t bv_end = bv->bv_offset + bv->bv_len;
phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
@@ -919,38 +918,14 @@ static inline bool page_is_mergeable(const struct bio_vec *bv,
return false;
*same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
- if (*same_page)
- return true;
- else if (IS_ENABLED(CONFIG_KMSAN))
- return false;
- return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE);
-}
-
-/**
- * __bio_try_merge_page - try appending data to an existing bvec.
- * @bio: destination bio
- * @page: start page to add
- * @len: length of the data to add
- * @off: offset of the data relative to @page
- * @same_page: return if the segment has been merged inside the same page
- *
- * Try to add the data at @page + @off to the last bvec of @bio. This is a
- * useful optimisation for file systems with a block size smaller than the
- * page size.
- *
- * Warn if (@len, @off) crosses pages in case that @same_page is true.
- *
- * Return %true on success or %false on failure.
- */
-static bool __bio_try_merge_page(struct bio *bio, struct page *page,
- unsigned int len, unsigned int off, bool *same_page)
-{
- struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
+ if (!*same_page) {
+ if (IS_ENABLED(CONFIG_KMSAN))
+ return false;
+ if (bv->bv_page + bv_end / PAGE_SIZE != page + off / PAGE_SIZE)
+ return false;
+ }
- if (!page_is_mergeable(bv, page, len, off, same_page))
- return false;
bv->bv_len += len;
- bio->bi_iter.bi_size += len;
return true;
}
@@ -972,7 +947,7 @@ static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio,
return false;
if (bv->bv_len + len > queue_max_segment_size(q))
return false;
- return __bio_try_merge_page(bio, page, len, offset, same_page);
+ return bvec_try_merge_page(bv, page, len, offset, same_page);
}
/**
@@ -1001,8 +976,11 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
return 0;
if (bio->bi_vcnt > 0) {
- if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page))
+ if (bio_try_merge_hw_seg(q, bio, page, len, offset,
+ same_page)) {
+ bio->bi_iter.bi_size += len;
return len;
+ }
if (bio->bi_vcnt >=
min(bio->bi_max_vecs, queue_max_segments(q)))
@@ -1123,8 +1101,11 @@ int bio_add_page(struct bio *bio, struct page *page,
return 0;
if (bio->bi_vcnt > 0 &&
- __bio_try_merge_page(bio, page, len, offset, &same_page))
+ bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
+ page, len, offset, &same_page)) {
+ bio->bi_iter.bi_size += len;
return len;
+ }
if (bio->bi_vcnt >= bio->bi_max_vecs)
return 0;
@@ -1206,7 +1187,9 @@ static int bio_iov_add_page(struct bio *bio, struct page *page,
return -EIO;
if (bio->bi_vcnt > 0 &&
- __bio_try_merge_page(bio, page, len, offset, &same_page)) {
+ bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
+ page, len, offset, &same_page)) {
+ bio->bi_iter.bi_size += len;
if (same_page)
bio_release_page(bio, page);
return 0;