@@ -114,8 +114,7 @@ int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
sg = sg_next(sg);
}
- sg_set_page(sg, bvec_page(&iv), iv.bv_len,
- iv.bv_offset);
+ sg_set_pfn(sg, iv.bv_pfn, iv.bv_len, iv.bv_offset);
segments++;
}
@@ -412,7 +412,7 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
*sg = sg_next(*sg);
}
- sg_set_page(*sg, bvec_page(bvec), nbytes, bvec->bv_offset);
+ sg_set_pfn(*sg, bvec->bv_pfn, nbytes, bvec->bv_offset);
(*nsegs)++;
}
*bvprv = *bvec;
@@ -422,7 +422,7 @@ static inline int __blk_bvec_map_sg(struct request_queue *q, struct bio_vec bv,
struct scatterlist *sglist, struct scatterlist **sg)
{
*sg = sglist;
- sg_set_page(*sg, bvec_page(&bv), bv.bv_len, bv.bv_offset);
+ sg_set_pfn(*sg, bv.bv_pfn, bv.bv_len, bv.bv_offset);
return 1;
}
@@ -327,8 +327,7 @@ void drbd_csum_bio(struct crypto_ahash *tfm, struct bio *bio, void *digest)
crypto_ahash_init(req);
bio_for_each_segment(bvec, bio, iter) {
- sg_set_page(&sg, bvec_page(&bvec), bvec.bv_len,
- bvec.bv_offset);
+ sg_set_pfn(&sg, bvec.bv_pfn, bvec.bv_len, bvec.bv_offset);
ahash_request_set_crypt(req, &sg, NULL, sg.length);
crypto_ahash_update(req);
/* REQ_OP_WRITE_SAME has only one segment,
@@ -1089,15 +1089,15 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
sg_init_table(dmreq->sg_in, 4);
sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t));
sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
- sg_set_page(&dmreq->sg_in[2], bvec_page(&bv_in), cc->sector_size,
- bv_in.bv_offset);
+ sg_set_pfn(&dmreq->sg_in[2], bv_in.bv_pfn, cc->sector_size,
+ bv_in.bv_offset);
sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);
sg_init_table(dmreq->sg_out, 4);
sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t));
sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
- sg_set_page(&dmreq->sg_out[2], bvec_page(&bv_out), cc->sector_size,
- bv_out.bv_offset);
+ sg_set_pfn(&dmreq->sg_out[2], bv_out.bv_pfn, cc->sector_size,
+ bv_out.bv_offset);
sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);
if (cc->iv_gen_ops) {
@@ -1180,12 +1180,10 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
sg_out = &dmreq->sg_out[0];
sg_init_table(sg_in, 1);
- sg_set_page(sg_in, bvec_page(&bv_in), cc->sector_size,
- bv_in.bv_offset);
+ sg_set_pfn(sg_in, bv_in.bv_pfn, cc->sector_size, bv_in.bv_offset);
sg_init_table(sg_out, 1);
- sg_set_page(sg_out, bvec_page(&bv_out), cc->sector_size,
- bv_out.bv_offset);
+ sg_set_pfn(sg_out, bv_out.bv_pfn, cc->sector_size, bv_out.bv_offset);
if (cc->iv_gen_ops) {
/* For READs use IV stored in integrity metadata */
@@ -412,7 +412,7 @@ int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io,
* until you consider the typical block size is 4,096B.
* Going through this loops twice should be very rare.
*/
- sg_set_page(&sg, bvec_page(&bv), len, bv.bv_offset);
+ sg_set_pfn(&sg, bv.bv_pfn, len, bv.bv_offset);
ahash_request_set_crypt(req, &sg, NULL, len);
r = verity_complete_op(res, crypto_ahash_update(req));
@@ -724,8 +724,8 @@ kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx,
fragnob = min((int)(kiov->bv_len - offset), nob);
- sg_set_page(sg, bvec_page(kiov), fragnob,
- kiov->bv_offset + offset);
+ sg_set_pfn(sg, kiov->bv_pfn, fragnob,
+ kiov->bv_offset + offset);
sg = sg_next(sg);
if (!sg) {
CERROR("lacking enough sg entries to map tx\n");