@@ -28,10 +28,10 @@ normal code doesn't have to deal with bi_bvec_done.
constructed from the raw biovecs but taking into account bi_bvec_done and
bi_size.
- bio_for_each_segment() has been updated to take a bvec_iter argument
+ bio_for_each_page() has been updated to take a bvec_iter argument
instead of an integer (that corresponded to bi_idx); for a lot of code the
conversion just required changing the types of the arguments to
- bio_for_each_segment().
+ bio_for_each_page().
* Advancing a bvec_iter is done with bio_advance_iter(); bio_advance() is a
wrapper around bio_advance_iter() that operates on bio->bi_iter, and also
@@ -69,7 +69,7 @@ static blk_qc_t nfhd_make_request(struct request_queue *queue, struct bio *bio)
dir = bio_data_dir(bio);
shift = dev->bshift;
- bio_for_each_segment(bvec, bio, iter) {
+ bio_for_each_page(bvec, bio, iter) {
len = bvec.bv_len;
len >>= 9;
nfhd_read_write(dev->id, 0, dir, sec >> shift, len >> shift,
@@ -108,7 +108,7 @@ static blk_qc_t simdisk_make_request(struct request_queue *q, struct bio *bio)
struct bvec_iter iter;
sector_t sector = bio->bi_iter.bi_sector;
- bio_for_each_segment(bvec, bio, iter) {
+ bio_for_each_page(bvec, bio, iter) {
char *buffer = kmap_atomic(bvec.bv_page) + bvec.bv_offset;
unsigned len = bvec.bv_len >> SECTOR_SHIFT;
@@ -204,7 +204,7 @@ static blk_status_t bio_integrity_process(struct bio *bio,
iter.seed = proc_iter->bi_sector;
iter.prot_buf = prot_buf;
- __bio_for_each_segment(bv, bio, bviter, *proc_iter) {
+ __bio_for_each_page(bv, bio, bviter, *proc_iter) {
void *kaddr = kmap_atomic(bv.bv_page);
iter.data_buf = kaddr + bv.bv_offset;
@@ -536,7 +536,7 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
struct bio_vec bv;
struct bvec_iter iter;
- __bio_for_each_segment(bv, bio, iter, start) {
+ __bio_for_each_page(bv, bio, iter, start) {
char *data = bvec_kmap_irq(&bv, &flags);
memset(data, 0, bv.bv_len);
flush_dcache_page(bv.bv_page);
@@ -700,7 +700,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
break;
default:
- bio_for_each_segment(bv, bio_src, iter)
+ bio_for_each_page(bv, bio_src, iter)
bio->bi_io_vec[bio->bi_vcnt++] = bv;
break;
}
@@ -1092,7 +1092,7 @@ static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
int i;
struct bio_vec *bvec;
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_page_all(bvec, bio, i) {
ssize_t ret;
ret = copy_page_from_iter(bvec->bv_page,
@@ -1123,7 +1123,7 @@ static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
int i;
struct bio_vec *bvec;
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_page_all(bvec, bio, i) {
ssize_t ret;
ret = copy_page_to_iter(bvec->bv_page,
@@ -1146,7 +1146,7 @@ void bio_free_pages(struct bio *bio)
struct bio_vec *bvec;
int i;
- bio_for_each_segment_all(bvec, bio, i)
+ bio_for_each_page_all(bvec, bio, i)
__free_page(bvec->bv_page);
}
EXPORT_SYMBOL(bio_free_pages);
@@ -1385,7 +1385,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
return bio;
out_unmap:
- bio_for_each_segment_all(bvec, bio, j) {
+ bio_for_each_page_all(bvec, bio, j) {
put_page(bvec->bv_page);
}
bio_put(bio);
@@ -1400,7 +1400,7 @@ static void __bio_unmap_user(struct bio *bio)
/*
* make sure we dirty pages we wrote to
*/
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_page_all(bvec, bio, i) {
if (bio_data_dir(bio) == READ)
set_page_dirty_lock(bvec->bv_page);
@@ -1493,7 +1493,7 @@ static void bio_copy_kern_endio_read(struct bio *bio)
struct bio_vec *bvec;
int i;
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_page_all(bvec, bio, i) {
memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
p += bvec->bv_len;
}
@@ -1603,7 +1603,7 @@ void bio_set_pages_dirty(struct bio *bio)
struct bio_vec *bvec;
int i;
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_page_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
if (page && !PageCompound(page))
@@ -1617,7 +1617,7 @@ static void bio_release_pages(struct bio *bio)
struct bio_vec *bvec;
int i;
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_page_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
if (page)
@@ -1671,7 +1671,7 @@ void bio_check_pages_dirty(struct bio *bio)
int nr_clean_pages = 0;
int i;
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_page_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
if (PageDirty(page) || PageCompound(page)) {
@@ -1730,7 +1730,7 @@ void bio_flush_dcache_pages(struct bio *bi)
struct bio_vec bvec;
struct bvec_iter iter;
- bio_for_each_segment(bvec, bi, iter)
+ bio_for_each_page(bvec, bi, iter)
flush_dcache_page(bvec.bv_page);
}
EXPORT_SYMBOL(bio_flush_dcache_pages);
@@ -110,7 +110,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
struct bio *new = NULL;
const unsigned max_sectors = get_max_io_size(q, bio);
- bio_for_each_segment(bv, bio, iter) {
+ bio_for_each_page(bv, bio, iter) {
/*
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, disallow it.
@@ -245,7 +245,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
seg_size = 0;
nr_phys_segs = 0;
for_each_bio(bio) {
- bio_for_each_segment(bv, bio, iter) {
+ bio_for_each_page(bv, bio, iter) {
/*
* If SG merging is disabled, each bio vector is
* a segment
@@ -412,7 +412,7 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
int cluster = blk_queue_cluster(q), nsegs = 0;
for_each_bio(bio)
- bio_for_each_segment(bvec, bio, iter)
+ bio_for_each_page(bvec, bio, iter)
__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
&nsegs, &cluster);
@@ -190,7 +190,7 @@ int blkdev_report_zones(struct block_device *bdev,
n = 0;
nz = 0;
nr_rep = 0;
- bio_for_each_segment_all(bv, bio, i) {
+ bio_for_each_page_all(bv, bio, i) {
if (!bv->bv_page)
break;
@@ -223,7 +223,7 @@ int blkdev_report_zones(struct block_device *bdev,
*nr_zones = nz;
out:
- bio_for_each_segment_all(bv, bio, i)
+ bio_for_each_page_all(bv, bio, i)
__free_page(bv->bv_page);
bio_put(bio);
@@ -119,7 +119,7 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
*/
struct bvec_iter from_iter = BVEC_ITER_ALL_INIT;
- bio_for_each_segment(tovec, to, iter) {
+ bio_for_each_page(tovec, to, iter) {
fromvec = bio_iter_iovec(from, from_iter);
if (tovec.bv_page != fromvec.bv_page) {
/*
@@ -147,7 +147,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool)
/*
* free up bounce indirect pages used
*/
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_page_all(bvec, bio, i) {
orig_vec = bio_iter_iovec(bio_orig, orig_iter);
if (bvec->bv_page != orig_vec.bv_page) {
dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
@@ -204,7 +204,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
int sectors = 0;
bool passthrough = bio_is_passthrough(*bio_orig);
- bio_for_each_segment(from, *bio_orig, iter) {
+ bio_for_each_page(from, *bio_orig, iter) {
if (i++ < BIO_MAX_PAGES)
sectors += from.bv_len >> 9;
if (page_to_pfn(from.bv_page) > q->limits.bounce_pfn)
@@ -222,7 +222,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
bio = bio_clone_bioset(*bio_orig, GFP_NOIO, passthrough ? NULL :
bounce_bio_set);
- bio_for_each_segment_all(to, bio, i) {
+ bio_for_each_page_all(to, bio, i) {
struct page *page = to->bv_page;
if (page_to_pfn(page) <= q->limits.bounce_pfn)
@@ -299,7 +299,7 @@ skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter)
int frag = 0;
struct bio_vec bv;
- __bio_for_each_segment(bv, bio, iter, iter)
+ __bio_for_each_page(bv, bio, iter, iter)
skb_fill_page_desc(skb, frag++, bv.bv_page,
bv.bv_offset, bv.bv_len);
}
@@ -1031,7 +1031,7 @@ bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
iter.bi_size = cnt;
- __bio_for_each_segment(bv, bio, iter, iter) {
+ __bio_for_each_page(bv, bio, iter, iter) {
char *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
skb_copy_bits(skb, soff, p, bv.bv_len);
kunmap_atomic(p);
@@ -291,7 +291,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
if (bio_end_sector(bio) > get_capacity(bio->bi_disk))
goto io_error;
- bio_for_each_segment(bvec, bio, iter) {
+ bio_for_each_page(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
int err;
@@ -1601,7 +1601,7 @@ static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
struct bvec_iter iter;
/* hint all but last page with MSG_MORE */
- bio_for_each_segment(bvec, bio, iter) {
+ bio_for_each_page(bvec, bio, iter) {
int err;
err = _drbd_no_send_page(peer_device, bvec.bv_page,
@@ -1623,7 +1623,7 @@ static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *b
struct bvec_iter iter;
/* hint all but last page with MSG_MORE */
- bio_for_each_segment(bvec, bio, iter) {
+ bio_for_each_page(bvec, bio, iter) {
int err;
err = _drbd_send_page(peer_device, bvec.bv_page,
@@ -1919,7 +1919,7 @@ static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_req
bio = req->master_bio;
D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector);
- bio_for_each_segment(bvec, bio, iter) {
+ bio_for_each_page(bvec, bio, iter) {
void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
expect = min_t(int, data_size, bvec.bv_len);
err = drbd_recv_all_warn(peer_device->connection, mapped, expect);
@@ -337,7 +337,7 @@ void drbd_csum_bio(struct crypto_ahash *tfm, struct bio *bio, void *digest)
sg_init_table(&sg, 1);
crypto_ahash_init(req);
- bio_for_each_segment(bvec, bio, iter) {
+ bio_for_each_page(bvec, bio, iter) {
sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
ahash_request_set_crypt(req, &sg, NULL, sg.length);
crypto_ahash_update(req);
@@ -498,7 +498,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
struct bvec_iter iter;
struct bio_vec bvec;
- bio_for_each_segment(bvec, bio, iter) {
+ bio_for_each_page(bvec, bio, iter) {
bool is_last = !next && bio_iter_last(bvec, iter);
int flags = is_last ? 0 : MSG_MORE;
@@ -1171,7 +1171,7 @@ static int null_handle_bio(struct nullb_cmd *cmd)
}
spin_lock_irq(&nullb->lock);
- bio_for_each_segment(bvec, bio, iter) {
+ bio_for_each_page(bvec, bio, iter) {
len = bvec.bv_len;
err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
op_is_write(bio_op(bio)), sector,
@@ -557,7 +557,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
struct bvec_iter iter;
struct bio *next;
- bio_for_each_segment(bvec, bio, iter) {
+ bio_for_each_page(bvec, bio, iter) {
/* PS3 is ppc64, so we don't handle highmem */
char *ptr = page_address(bvec.bv_page) + bvec.bv_offset;
size_t len = bvec.bv_len, retlen;
@@ -723,7 +723,7 @@ blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
bv_len -= RSXX_HW_BLK_SIZE;
}
} else {
- bio_for_each_segment(bvec, bio, iter) {
+ bio_for_each_page(bvec, bio, iter) {
bv_len = bvec.bv_len;
bv_off = bvec.bv_offset;
@@ -1197,7 +1197,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
break;
}
- bio_for_each_segment(bvec, bio, iter) {
+ bio_for_each_page(bvec, bio, iter) {
struct bio_vec bv = bvec;
unsigned int unwritten = bvec.bv_len;
@@ -424,7 +424,7 @@ static void do_btree_node_write(struct btree *b)
struct bio_vec *bv;
void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
- bio_for_each_segment_all(bv, b->bio, j)
+ bio_for_each_page_all(bv, b->bio, j)
memcpy(page_address(bv->bv_page),
base + j * PAGE_SIZE, PAGE_SIZE);
@@ -121,7 +121,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
submit_bio_wait(check);
citer.bi_size = UINT_MAX;
- bio_for_each_segment(bv, bio, iter) {
+ bio_for_each_page(bv, bio, iter) {
void *p1 = kmap_atomic(bv.bv_page);
void *p2;
@@ -43,7 +43,7 @@ static void bio_csum(struct bio *bio, struct bkey *k)
struct bvec_iter iter;
uint64_t csum = 0;
- bio_for_each_segment(bv, bio, iter) {
+ bio_for_each_page(bv, bio, iter) {
void *d = kmap(bv.bv_page) + bv.bv_offset;
csum = bch_crc64_update(csum, d, bv.bv_len);
kunmap(bv.bv_page);
@@ -303,7 +303,7 @@ int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
int i;
struct bio_vec *bv;
- bio_for_each_segment_all(bv, bio, i) {
+ bio_for_each_page_all(bv, bio, i) {
bv->bv_page = alloc_page(gfp_mask);
if (!bv->bv_page) {
while (--bv >= bio->bi_io_vec)
@@ -1451,7 +1451,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
unsigned int i;
struct bio_vec *bv;
- bio_for_each_segment_all(bv, clone, i) {
+ bio_for_each_page_all(bv, clone, i) {
BUG_ON(!bv->bv_page);
mempool_free(bv->bv_page, cc->page_pool);
}
@@ -1256,7 +1256,7 @@ static void integrity_metadata(struct work_struct *w)
if (!checksums)
checksums = checksums_onstack;
- __bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
+ __bio_for_each_page(bv, bio, iter, dio->orig_bi_iter) {
unsigned pos;
char *mem, *checksums_ptr;
@@ -1376,7 +1376,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
if (ic->sectors_per_block > 1) {
struct bvec_iter iter;
struct bio_vec bv;
- bio_for_each_segment(bv, bio, iter) {
+ bio_for_each_page(bv, bio, iter) {
if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
bv.bv_offset, bv.bv_len, ic->sectors_per_block);
@@ -732,7 +732,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
* can't just hold onto the page until some later point, we have to
* manually copy the contents.
*/
- bio_for_each_segment(bv, bio, iter) {
+ bio_for_each_page(bv, bio, iter) {
struct page *page;
void *src, *dst;
@@ -1156,7 +1156,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
* Remap the start sector of the reported zones. For sequential zones,
* also remap the write pointer position.
*/
- bio_for_each_segment(bvec, report_bio, iter) {
+ bio_for_each_page(bvec, report_bio, iter) {
addr = kmap_atomic(bvec.bv_page);
/* Remember the report header in the first page */
@@ -2123,7 +2123,7 @@ static void process_checks(struct r1bio *r1_bio)
/* Now we can 'fixup' the error value */
sbio->bi_status = 0;
- bio_for_each_segment_all(bi, sbio, j)
+ bio_for_each_page_all(bi, sbio, j)
page_len[j] = bi->bv_len;
if (!status) {
@@ -1247,7 +1247,7 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
flags |= ASYNC_TX_FENCE;
init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
- bio_for_each_segment(bvl, bio, iter) {
+ bio_for_each_page(bvl, bio, iter) {
int len = bvl.bv_len;
int clen;
int b_offset = 0;
@@ -187,7 +187,7 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
nsblk = q->queuedata;
rw = bio_data_dir(bio);
do_acct = nd_iostat_start(bio, &start);
- bio_for_each_segment(bvec, bio, iter) {
+ bio_for_each_page(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
BUG_ON(len > PAGE_SIZE);
@@ -1452,7 +1452,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
do_acct = nd_iostat_start(bio, &start);
- bio_for_each_segment(bvec, bio, iter) {
+ bio_for_each_page(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
if (len > PAGE_SIZE || len < btt->sector_size ||
@@ -183,7 +183,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
nvdimm_flush(nd_region);
do_acct = nd_iostat_start(bio, &start);
- bio_for_each_segment(bvec, bio, iter) {
+ bio_for_each_page(bvec, bio, iter) {
rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
bvec.bv_offset, op_is_write(bio_op(bio)),
iter.bi_sector);
@@ -884,7 +884,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
}
index = (bio->bi_iter.bi_sector >> 3);
- bio_for_each_segment(bvec, bio, iter) {
+ bio_for_each_page(bvec, bio, iter) {
page_addr = (unsigned long)
page_address(bvec.bv_page) + bvec.bv_offset;
source_addr = dev_info->start + (index<<12) + bytes_done;
@@ -203,7 +203,7 @@ static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio)
if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset)
goto fail;
index = (bio->bi_iter.bi_sector >> 3) + xdev->offset;
- bio_for_each_segment(bvec, bio, iter) {
+ bio_for_each_page(bvec, bio, iter) {
page_addr = (unsigned long)
kmap(bvec.bv_page) + bvec.bv_offset;
bytes = bvec.bv_len;
@@ -242,7 +242,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
}
__set_current_state(TASK_RUNNING);
- bio_for_each_segment_all(bvec, &bio, i) {
+ bio_for_each_page_all(bvec, &bio, i) {
if (should_dirty && !PageCompound(bvec->bv_page))
set_page_dirty_lock(bvec->bv_page);
put_page(bvec->bv_page);
@@ -310,7 +310,7 @@ static void blkdev_bio_end_io(struct bio *bio)
struct bio_vec *bvec;
int i;
- bio_for_each_segment_all(bvec, bio, i)
+ bio_for_each_page_all(bvec, bio, i)
put_page(bvec->bv_page);
bio_put(bio);
}
@@ -2817,7 +2817,7 @@ static void __btrfsic_submit_bio(struct bio *bio)
goto leave;
cur_bytenr = dev_bytenr;
- bio_for_each_segment(bvec, bio, iter) {
+ bio_for_each_page(bvec, bio, iter) {
BUG_ON(bvec.bv_len != PAGE_SIZE);
mapped_datav[i] = kmap(bvec.bv_page);
i++;
@@ -2832,7 +2832,7 @@ static void __btrfsic_submit_bio(struct bio *bio)
mapped_datav, segs,
bio, &bio_is_patched,
NULL, bio->bi_opf);
- bio_for_each_segment(bvec, bio, iter)
+ bio_for_each_page(bvec, bio, iter)
kunmap(bvec.bv_page);
kfree(mapped_datav);
} else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
@@ -172,7 +172,7 @@ static void end_compressed_bio_read(struct bio *bio)
* checked so the end_io handlers know about it
*/
ASSERT(!bio_flagged(bio, BIO_CLONED));
- bio_for_each_segment_all(bvec, cb->orig_bio, i)
+ bio_for_each_page_all(bvec, cb->orig_bio, i)
SetPageChecked(bvec->bv_page);
bio_endio(cb->orig_bio);
@@ -831,7 +831,7 @@ static blk_status_t btree_csum_one_bio(struct bio *bio)
int i, ret = 0;
ASSERT(!bio_flagged(bio, BIO_CLONED));
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_page_all(bvec, bio, i) {
root = BTRFS_I(bvec->bv_page->mapping->host)->root;
ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
if (ret)
@@ -2458,7 +2458,7 @@ static void end_bio_extent_writepage(struct bio *bio)
int i;
ASSERT(!bio_flagged(bio, BIO_CLONED));
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_page_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
struct inode *inode = page->mapping->host;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -2529,7 +2529,7 @@ static void end_bio_extent_readpage(struct bio *bio)
int i;
ASSERT(!bio_flagged(bio, BIO_CLONED));
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_page_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
struct inode *inode = page->mapping->host;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -3682,7 +3682,7 @@ static void end_bio_extent_buffer_writepage(struct bio *bio)
int i, done;
ASSERT(!bio_flagged(bio, BIO_CLONED));
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_page_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
eb = (struct extent_buffer *)page->private;
@@ -209,7 +209,7 @@ static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio
if (dio)
offset = logical_offset;
- bio_for_each_segment(bvec, bio, iter) {
+ bio_for_each_page(bvec, bio, iter) {
page_bytes_left = bvec.bv_len;
if (count)
goto next;
@@ -451,7 +451,7 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
index = 0;
- bio_for_each_segment(bvec, bio, iter) {
+ bio_for_each_page(bvec, bio, iter) {
if (!contig)
offset = page_offset(bvec.bv_page) + bvec.bv_offset;
@@ -7894,7 +7894,7 @@ static void btrfs_retry_endio_nocsum(struct bio *bio)
done->uptodate = 1;
ASSERT(!bio_flagged(bio, BIO_CLONED));
- bio_for_each_segment_all(bvec, bio, i)
+ bio_for_each_page_all(bvec, bio, i)
clean_io_failure(BTRFS_I(inode)->root->fs_info, failure_tree,
io_tree, done->start, bvec->bv_page,
btrfs_ino(BTRFS_I(inode)), 0);
@@ -7924,7 +7924,7 @@ static blk_status_t __btrfs_correct_data_nocsum(struct inode *inode,
done.inode = inode;
io_bio->bio.bi_iter = io_bio->iter;
- bio_for_each_segment(bvec, &io_bio->bio, iter) {
+ bio_for_each_page(bvec, &io_bio->bio, iter) {
nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len);
pgoff = bvec.bv_offset;
@@ -7986,7 +7986,7 @@ static void btrfs_retry_endio(struct bio *bio)
failure_tree = &BTRFS_I(inode)->io_failure_tree;
ASSERT(!bio_flagged(bio, BIO_CLONED));
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_page_all(bvec, bio, i) {
ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page,
bvec->bv_offset, done->start,
bvec->bv_len);
@@ -8031,7 +8031,7 @@ static blk_status_t __btrfs_subio_endio_read(struct inode *inode,
done.inode = inode;
io_bio->bio.bi_iter = io_bio->iter;
- bio_for_each_segment(bvec, &io_bio->bio, iter) {
+ bio_for_each_page(bvec, &io_bio->bio, iter) {
nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len);
pgoff = bvec.bv_offset;
@@ -1161,7 +1161,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
if (bio_flagged(bio, BIO_CLONED))
bio->bi_iter = btrfs_io_bio(bio)->iter;
- bio_for_each_segment(bvec, bio, iter) {
+ bio_for_each_page(bvec, bio, iter) {
rbio->bio_pages[page_index + i] = bvec.bv_page;
i++;
}
@@ -1448,7 +1448,7 @@ static void set_bio_pages_uptodate(struct bio *bio)
ASSERT(!bio_flagged(bio, BIO_CLONED));
- bio_for_each_segment_all(bvec, bio, i)
+ bio_for_each_page_all(bvec, bio, i)
SetPageUptodate(bvec->bv_page);
}
@@ -38,7 +38,7 @@ static void completion_pages(struct work_struct *work)
struct bio_vec *bv;
int i;
- bio_for_each_segment_all(bv, bio, i) {
+ bio_for_each_page_all(bv, bio, i) {
struct page *page = bv->bv_page;
int ret = fscrypt_decrypt_page(page->mapping->host, page,
PAGE_SIZE, 0, page->index);
@@ -551,7 +551,7 @@ static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio)
if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) {
bio_check_pages_dirty(bio); /* transfers ownership */
} else {
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_page_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
if (dio->op == REQ_OP_READ && !PageCompound(page) &&
@@ -407,7 +407,7 @@ static void _clear_bio(struct bio *bio)
struct bio_vec *bv;
unsigned i;
- bio_for_each_segment_all(bv, bio, i) {
+ bio_for_each_page_all(bv, bio, i) {
unsigned this_count = bv->bv_len;
if (likely(PAGE_SIZE == this_count))
@@ -437,7 +437,7 @@ static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret)
if (!bio)
continue;
- bio_for_each_segment_all(bv, bio, i) {
+ bio_for_each_page_all(bv, bio, i) {
struct page *page = bv->bv_page;
SetPageUptodate(page);
@@ -64,7 +64,7 @@ static void ext4_finish_bio(struct bio *bio)
int i;
struct bio_vec *bvec;
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_page_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
struct page *data_page = NULL;
@@ -81,7 +81,7 @@ static void mpage_end_io(struct bio *bio)
return;
}
}
- bio_for_each_segment_all(bv, bio, i) {
+ bio_for_each_page_all(bv, bio, i) {
struct page *page = bv->bv_page;
if (!bio->bi_status) {
@@ -71,7 +71,7 @@ static void f2fs_read_end_io(struct bio *bio)
}
}
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_page_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
if (!bio->bi_status) {
@@ -92,7 +92,7 @@ static void f2fs_write_end_io(struct bio *bio)
struct bio_vec *bvec;
int i;
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_page_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
enum count_type type = WB_DATA_TYPE(page);
@@ -274,7 +274,7 @@ static bool __has_merged_page(struct f2fs_bio_info *io,
if (!inode && !ino)
return true;
- bio_for_each_segment_all(bvec, io->bio, i) {
+ bio_for_each_page_all(bvec, io->bio, i) {
if (bvec->bv_page->mapping)
target = bvec->bv_page;
@@ -214,7 +214,7 @@ static void gfs2_end_log_write(struct bio *bio)
wake_up(&sdp->sd_logd_waitq);
}
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_page_all(bvec, bio, i) {
page = bvec->bv_page;
if (page_has_buffers(page))
gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
@@ -191,7 +191,7 @@ static void gfs2_meta_read_endio(struct bio *bio)
struct bio_vec *bvec;
int i;
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_page_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
struct buffer_head *bh = page_buffers(page);
unsigned int len = bvec->bv_len;
@@ -818,7 +818,7 @@ static void iomap_dio_bio_end_io(struct bio *bio)
struct bio_vec *bvec;
int i;
- bio_for_each_segment_all(bvec, bio, i)
+ bio_for_each_page_all(bvec, bio, i)
put_page(bvec->bv_page);
bio_put(bio);
}
@@ -49,7 +49,7 @@ static void mpage_end_io(struct bio *bio)
struct bio_vec *bv;
int i;
- bio_for_each_segment_all(bv, bio, i) {
+ bio_for_each_page_all(bv, bio, i) {
struct page *page = bv->bv_page;
page_endio(page, op_is_write(bio_op(bio)),
blk_status_to_errno(bio->bi_status));
@@ -180,7 +180,7 @@ xfs_destroy_ioend(
next = bio->bi_private;
/* walk each page on bio, ending page IO on them */
- bio_for_each_segment_all(bvec, bio, i)
+ bio_for_each_page_all(bvec, bio, i)
xfs_finish_page_writeback(inode, bvec, error);
bio_put(bio);
@@ -157,7 +157,7 @@ static inline void *bio_data(struct bio *bio)
* drivers should _never_ use the all version - the bio may have been split
* before it got to the driver and the driver won't own all of it
*/
-#define bio_for_each_segment_all(bvl, bio, i) \
+#define bio_for_each_page_all(bvl, bio, i) \
for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
@@ -188,14 +188,14 @@ static inline bool bio_rewind_iter(struct bio *bio, struct bvec_iter *iter,
return bvec_iter_rewind(bio->bi_io_vec, iter, bytes);
}
-#define __bio_for_each_segment(bvl, bio, iter, start) \
+#define __bio_for_each_page(bvl, bio, iter, start) \
for (iter = (start); \
(iter).bi_size && \
((bvl = bio_iter_iovec((bio), (iter))), 1); \
bio_advance_iter((bio), &(iter), (bvl).bv_len))
-#define bio_for_each_segment(bvl, bio, iter) \
- __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
+#define bio_for_each_page(bvl, bio, iter) \
+ __bio_for_each_page(bvl, bio, iter, (bio)->bi_iter)
#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
@@ -221,7 +221,7 @@ static inline unsigned bio_segments(struct bio *bio)
break;
}
- bio_for_each_segment(bv, bio, iter)
+ bio_for_each_page(bv, bio, iter)
segs++;
return segs;
@@ -951,7 +951,7 @@ struct req_iterator {
#define rq_for_each_segment(bvl, _rq, _iter) \
__rq_for_each_bio(_iter.bio, _rq) \
- bio_for_each_segment(bvl, _iter.bio, _iter.iter)
+ bio_for_each_page(bvl, _iter.bio, _iter.iter)
#define rq_iter_last(bvec, _iter) \
(_iter.bio->bi_next == NULL && \
@@ -135,7 +135,7 @@ struct ceph_bio_iter {
\
__cur_iter = (it)->iter; \
__cur_iter.bi_size = __cur_n; \
- __bio_for_each_segment(bv, (it)->bio, __cur_iter, __cur_iter) \
+ __bio_for_each_page(bv, (it)->bio, __cur_iter, __cur_iter) \
(void)(BVEC_STEP); \
}))
It is a tree-wide mechanical replacement since both bio_for_each_segment() and bio_for_each_segment_all() never returns real segment at all, and both just return one page per bvec and deceive us for long time, so fix their names. This is a pre-patch for supporting multipage bvec. Once multipage bvec is in, each bvec will store a real multipage segment, so people won't be confused with these wrong names. Signed-off-by: Ming Lei <ming.lei@redhat.com> --- Documentation/block/biovecs.txt | 4 ++-- arch/m68k/emu/nfblock.c | 2 +- arch/xtensa/platforms/iss/simdisk.c | 2 +- block/bio-integrity.c | 2 +- block/bio.c | 24 ++++++++++++------------ block/blk-merge.c | 6 +++--- block/blk-zoned.c | 4 ++-- block/bounce.c | 8 ++++---- drivers/block/aoe/aoecmd.c | 4 ++-- drivers/block/brd.c | 2 +- drivers/block/drbd/drbd_main.c | 4 ++-- drivers/block/drbd/drbd_receiver.c | 2 +- drivers/block/drbd/drbd_worker.c | 2 +- drivers/block/nbd.c | 2 +- drivers/block/null_blk.c | 2 +- drivers/block/ps3vram.c | 2 +- drivers/block/rsxx/dma.c | 2 +- drivers/block/zram/zram_drv.c | 2 +- drivers/md/bcache/btree.c | 2 +- drivers/md/bcache/debug.c | 2 +- drivers/md/bcache/request.c | 2 +- drivers/md/bcache/util.c | 2 +- drivers/md/dm-crypt.c | 2 +- drivers/md/dm-integrity.c | 4 ++-- drivers/md/dm-log-writes.c | 2 +- drivers/md/dm.c | 2 +- drivers/md/raid1.c | 2 +- drivers/md/raid5.c | 2 +- drivers/nvdimm/blk.c | 2 +- drivers/nvdimm/btt.c | 2 +- drivers/nvdimm/pmem.c | 2 +- drivers/s390/block/dcssblk.c | 2 +- drivers/s390/block/xpram.c | 2 +- fs/block_dev.c | 4 ++-- fs/btrfs/check-integrity.c | 4 ++-- fs/btrfs/compression.c | 2 +- fs/btrfs/disk-io.c | 2 +- fs/btrfs/extent_io.c | 6 +++--- fs/btrfs/file-item.c | 4 ++-- fs/btrfs/inode.c | 8 ++++---- fs/btrfs/raid56.c | 4 ++-- fs/crypto/bio.c | 2 +- fs/direct-io.c | 2 +- fs/exofs/ore.c | 2 +- fs/exofs/ore_raid.c | 2 +- fs/ext4/page-io.c | 2 +- fs/ext4/readpage.c | 2 +- fs/f2fs/data.c | 6 +++--- fs/gfs2/lops.c | 2 +- fs/gfs2/meta_io.c | 2 +- fs/iomap.c | 2 +- fs/mpage.c | 2 +- fs/xfs/xfs_aops.c | 2 +- include/linux/bio.h | 10 +++++----- include/linux/blkdev.h | 2 +- include/linux/ceph/messenger.h | 2 +- 56 files changed, 92 insertions(+), 92 deletions(-)