@@ -174,7 +174,6 @@ void bio_integrity_unmap_free_user(struct bio *bio)
bio->bi_integrity = NULL;
bio->bi_opf &= ~REQ_INTEGRITY;
}
-EXPORT_SYMBOL(bio_integrity_unmap_free_user);
/**
* bio_integrity_add_page - Attach integrity metadata
@@ -757,6 +757,9 @@ int blk_rq_unmap_user(struct bio *bio)
bio_release_pages(bio, bio_data_dir(bio) == READ);
}
+ if (bio_integrity(bio))
+ bio_integrity_unmap_free_user(bio);
+
next_bio = bio;
bio = bio->bi_next;
blk_mq_map_bio_put(next_bio);
@@ -112,13 +112,6 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
return req;
}
-static void nvme_unmap_bio(struct bio *bio)
-{
- if (bio_integrity(bio))
- bio_integrity_unmap_free_user(bio);
- blk_rq_unmap_user(bio);
-}
-
static int nvme_map_user_request(struct request *req, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
u32 meta_seed, struct io_uring_cmd *ioucmd, unsigned int flags)
@@ -165,7 +158,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
out_unmap:
if (bio)
- nvme_unmap_bio(bio);
+ blk_rq_unmap_user(bio);
out:
blk_mq_free_request(req);
return ret;
@@ -203,7 +196,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
if (result)
*result = le64_to_cpu(nvme_req(req)->result.u64);
if (bio)
- nvme_unmap_bio(bio);
+ blk_rq_unmap_user(bio);
blk_mq_free_request(req);
if (effects)
@@ -414,7 +407,7 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
if (pdu->bio)
- nvme_unmap_bio(pdu->bio);
+ blk_rq_unmap_user(pdu->bio);
io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags);
}
@@ -440,7 +433,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
*/
if (blk_rq_is_poll(req)) {
if (pdu->bio)
- nvme_unmap_bio(pdu->bio);
+ blk_rq_unmap_user(pdu->bio);
io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status);
} else {
io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);