diff mbox series

[for-next,v5,3/4] block: add helper to map bvec iterator for passthrough

Message ID 20220906062721.62630-4-joshi.k@samsung.com (mailing list archive)
State New, archived
Headers show
Series [for-next,v5,1/4] io_uring: introduce io_uring_cmd_import_fixed | expand

Commit Message

Kanchan Joshi Sept. 6, 2022, 6:27 a.m. UTC
Add blk_rq_map_user_bvec which maps the bvec iterator into a bio and
places that into the request. This helper will be used in nvme for
uring-passthrough with fixed-buffer.
While at it, create another helper bio_map_get to reduce the code
duplication.

Signed-off-by: Kanchan Joshi <joshi.k@samsung.com>
Signed-off-by: Anuj Gupta <anuj20.g@samsung.com>
---
 block/blk-map.c        | 94 +++++++++++++++++++++++++++++++++++++-----
 include/linux/blk-mq.h |  1 +
 2 files changed, 85 insertions(+), 10 deletions(-)

Comments

Chaitanya Kulkarni Sept. 7, 2022, 3:32 p.m. UTC | #1
On 9/5/22 23:27, Kanchan Joshi wrote:
> Add blk_rq_map_user_bvec which maps the bvec iterator into a bio and
> places that into the request. This helper will be used in nvme for
> uring-passthrough with fixed-buffer.
> While at it, create another helper bio_map_get to reduce the code
> duplication.
> 
> Signed-off-by: Kanchan Joshi <joshi.k@samsung.com>
> Signed-off-by: Anuj Gupta <anuj20.g@samsung.com>
> ---
>   block/blk-map.c        | 94 +++++++++++++++++++++++++++++++++++++-----
>   include/linux/blk-mq.h |  1 +
>   2 files changed, 85 insertions(+), 10 deletions(-)
> 
> diff --git a/block/blk-map.c b/block/blk-map.c
> index f3768876d618..e2f268167342 100644
> --- a/block/blk-map.c
> +++ b/block/blk-map.c
> @@ -241,17 +241,10 @@ static void bio_map_put(struct bio *bio)
>   	}
>   }
>   
> -static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
> +static struct bio *bio_map_get(struct request *rq, unsigned int nr_vecs,
>   		gfp_t gfp_mask)
>   {
> -	unsigned int max_sectors = queue_max_hw_sectors(rq->q);
> -	unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
>   	struct bio *bio;
> -	int ret;
> -	int j;
> -
> -	if (!iov_iter_count(iter))
> -		return -EINVAL;
>   
>   	if (rq->cmd_flags & REQ_POLLED) {
>   		blk_opf_t opf = rq->cmd_flags | REQ_ALLOC_CACHE;
> @@ -259,13 +252,31 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
>   		bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask,
>   					&fs_bio_set);
>   		if (!bio)
> -			return -ENOMEM;
> +			return NULL;
>   	} else {
>   		bio = bio_kmalloc(nr_vecs, gfp_mask);
>   		if (!bio)
> -			return -ENOMEM;
> +			return NULL;
>   		bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
>   	}
> +	return bio;
> +}
> +
> +static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
> +		gfp_t gfp_mask)
> +{
> +	unsigned int max_sectors = queue_max_hw_sectors(rq->q);
> +	unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
> +	struct bio *bio;
> +	int ret;
> +	int j;
> +
> +	if (!iov_iter_count(iter))
> +		return -EINVAL;
> +
> +	bio = bio_map_get(rq, nr_vecs, gfp_mask);
> +	if (bio == NULL)
> +		return -ENOMEM;
>   
>   	while (iov_iter_count(iter)) {
>   		struct page **pages, *stack_pages[UIO_FASTIOV];
> @@ -612,6 +623,69 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
>   }
>   EXPORT_SYMBOL(blk_rq_map_user);
>   
> +/* Prepare bio for passthrough IO given an existing bvec iter */
> +int blk_rq_map_user_bvec(struct request *rq, struct iov_iter *iter)
> +{
> +	struct request_queue *q = rq->q;
> +	size_t iter_count, nr_segs;
> +	struct bio *bio;
> +	struct bio_vec *bv, *bvec_arr, *bvprvp = NULL;
> +	struct queue_limits *lim = &q->limits;
> +	unsigned int nsegs = 0, bytes = 0;
> +	int ret, i;
> +

consider this (untested), it also sets the variable i data type same
as it comparison variable in nr_segs the loop i.e. size_t :-

+       struct bio_vec *bv, *bvec_arr, *bvprvp = NULL;
+       struct request_queue *q = rq->q;
+       struct queue_limits *lim = &q->limits;
+       unsigned int nsegs = 0, bytes = 0;
+       size_t iter_count, nr_segs, i;
+       struct bio *bio;
+       int ret;


> +	iter_count = iov_iter_count(iter);
> +	nr_segs = iter->nr_segs;
> +
> +	if (!iter_count || (iter_count >> 9) > queue_max_hw_sectors(q))
> +		return -EINVAL;

can we remove braces for iter_count >> 9 without impacting the intended
functionality?

> +	if (nr_segs > queue_max_segments(q))
> +		return -EINVAL;
> +
> +	/* no iovecs to alloc, as we already have a BVEC iterator */
> +	bio = bio_map_get(rq, 0, GFP_KERNEL);
> +	if (bio == NULL)
> +		return -ENOMEM;
> +
> +	bio_iov_bvec_set(bio, iter);
> +	blk_rq_bio_prep(rq, bio, nr_segs);
> +
> +	/* loop to perform a bunch of sanity checks */
> +	bvec_arr = (struct bio_vec *)iter->bvec;
> +	for (i = 0; i < nr_segs; i++) {
> +		bv = &bvec_arr[i];

I'd just call bvecs instead of bvec_arr, just personal preference.

> +		/*
> +		 * If the queue doesn't support SG gaps and adding this
> +		 * offset would create a gap, disallow it.
> +		 */
> +		if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
> +			ret = -EINVAL;
> +			goto out_free;
> +		}
> +
> +		/* check full condition */
> +		if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len) {
> +			ret = -EINVAL;
> +			goto out_free;
> +		}
> +
> +		if (bytes + bv->bv_len <= iter_count &&
> +				bv->bv_offset + bv->bv_len <= PAGE_SIZE) {
> +			nsegs++;
> +			bytes += bv->bv_len;
> +		} else {
> +			ret = -EINVAL;
> +			goto out_free;

you are only calling goto out_free; from loop with ret = -EINVAL.
you can remove redundant ret = -EINVAL assignment in the loop and
call return -EINVAL from the out_free: label instead return ret.
That will also allow us to remove braces in the loop.

This will also allow us to remove the ret variable on the
since I guess we are in the fast path.

> +		}
> +		bvprvp = bv;
> +	}
> +	return 0;
> +out_free:
> +	bio_map_put(bio);
> +	return ret;
> +}
> +EXPORT_SYMBOL(blk_rq_map_user_bvec);

why not use EXPORT_SYMBOL_GPL() for new addition?

I'm aware that blk-map.c only uses EXPORT_SYMBOL().

> +
>   /**
>    * blk_rq_unmap_user - unmap a request with user data
>    * @bio:	       start of bio list
> diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
> index b43c81d91892..83bef362f0f9 100644
> --- a/include/linux/blk-mq.h
> +++ b/include/linux/blk-mq.h
> @@ -970,6 +970,7 @@ struct rq_map_data {
>   	bool from_user;
>   };
>   
> +int blk_rq_map_user_bvec(struct request *rq, struct iov_iter *iter);
>   int blk_rq_map_user(struct request_queue *, struct request *,
>   		struct rq_map_data *, void __user *, unsigned long, gfp_t);
>   int blk_rq_map_user_iov(struct request_queue *, struct request *,
Kanchan Joshi Sept. 8, 2022, 10:52 a.m. UTC | #2
On Wed, Sep 07, 2022 at 03:32:26PM +0000, Chaitanya Kulkarni wrote:
>On 9/5/22 23:27, Kanchan Joshi wrote:
>> Add blk_rq_map_user_bvec which maps the bvec iterator into a bio and
>> places that into the request. This helper will be used in nvme for
>> uring-passthrough with fixed-buffer.
>> While at it, create another helper bio_map_get to reduce the code
>> duplication.
>>
>> Signed-off-by: Kanchan Joshi <joshi.k@samsung.com>
>> Signed-off-by: Anuj Gupta <anuj20.g@samsung.com>
>> ---
>>   block/blk-map.c        | 94 +++++++++++++++++++++++++++++++++++++-----
>>   include/linux/blk-mq.h |  1 +
>>   2 files changed, 85 insertions(+), 10 deletions(-)
>>
>> diff --git a/block/blk-map.c b/block/blk-map.c
>> index f3768876d618..e2f268167342 100644
>> --- a/block/blk-map.c
>> +++ b/block/blk-map.c
>> @@ -241,17 +241,10 @@ static void bio_map_put(struct bio *bio)
>>   	}
>>   }
>>
>> -static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
>> +static struct bio *bio_map_get(struct request *rq, unsigned int nr_vecs,
>>   		gfp_t gfp_mask)
>>   {
>> -	unsigned int max_sectors = queue_max_hw_sectors(rq->q);
>> -	unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
>>   	struct bio *bio;
>> -	int ret;
>> -	int j;
>> -
>> -	if (!iov_iter_count(iter))
>> -		return -EINVAL;
>>
>>   	if (rq->cmd_flags & REQ_POLLED) {
>>   		blk_opf_t opf = rq->cmd_flags | REQ_ALLOC_CACHE;
>> @@ -259,13 +252,31 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
>>   		bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask,
>>   					&fs_bio_set);
>>   		if (!bio)
>> -			return -ENOMEM;
>> +			return NULL;
>>   	} else {
>>   		bio = bio_kmalloc(nr_vecs, gfp_mask);
>>   		if (!bio)
>> -			return -ENOMEM;
>> +			return NULL;
>>   		bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
>>   	}
>> +	return bio;
>> +}
>> +
>> +static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
>> +		gfp_t gfp_mask)
>> +{
>> +	unsigned int max_sectors = queue_max_hw_sectors(rq->q);
>> +	unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
>> +	struct bio *bio;
>> +	int ret;
>> +	int j;
>> +
>> +	if (!iov_iter_count(iter))
>> +		return -EINVAL;
>> +
>> +	bio = bio_map_get(rq, nr_vecs, gfp_mask);
>> +	if (bio == NULL)
>> +		return -ENOMEM;
>>
>>   	while (iov_iter_count(iter)) {
>>   		struct page **pages, *stack_pages[UIO_FASTIOV];
>> @@ -612,6 +623,69 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
>>   }
>>   EXPORT_SYMBOL(blk_rq_map_user);
>>
>> +/* Prepare bio for passthrough IO given an existing bvec iter */
>> +int blk_rq_map_user_bvec(struct request *rq, struct iov_iter *iter)
>> +{
>> +	struct request_queue *q = rq->q;
>> +	size_t iter_count, nr_segs;
>> +	struct bio *bio;
>> +	struct bio_vec *bv, *bvec_arr, *bvprvp = NULL;
>> +	struct queue_limits *lim = &q->limits;
>> +	unsigned int nsegs = 0, bytes = 0;
>> +	int ret, i;
>> +
>
>consider this (untested), it also sets the variable i data type same
>as it comparison variable in nr_segs the loop i.e. size_t :-
>
>+       struct bio_vec *bv, *bvec_arr, *bvprvp = NULL;
>+       struct request_queue *q = rq->q;
>+       struct queue_limits *lim = &q->limits;
>+       unsigned int nsegs = 0, bytes = 0;
>+       size_t iter_count, nr_segs, i;
>+       struct bio *bio;
>+       int ret;
>
>
>> +	iter_count = iov_iter_count(iter);
>> +	nr_segs = iter->nr_segs;
>> +
>> +	if (!iter_count || (iter_count >> 9) > queue_max_hw_sectors(q))
>> +		return -EINVAL;
>
>can we remove braces for iter_count >> 9 without impacting the intended
>functionality?

I think removing that make it hard to read.
I will fold all other changes you mentioned in v6.
Jens Axboe Sept. 8, 2022, 2:46 p.m. UTC | #3
On 9/8/22 4:52 AM, Kanchan Joshi wrote:
> On Wed, Sep 07, 2022 at 03:32:26PM +0000, Chaitanya Kulkarni wrote:
>> On 9/5/22 23:27, Kanchan Joshi wrote:
>>> Add blk_rq_map_user_bvec which maps the bvec iterator into a bio and
>>> places that into the request. This helper will be used in nvme for
>>> uring-passthrough with fixed-buffer.
>>> While at it, create another helper bio_map_get to reduce the code
>>> duplication.
>>>
>>> Signed-off-by: Kanchan Joshi <joshi.k@samsung.com>
>>> Signed-off-by: Anuj Gupta <anuj20.g@samsung.com>
>>> ---
>>>   block/blk-map.c        | 94 +++++++++++++++++++++++++++++++++++++-----
>>>   include/linux/blk-mq.h |  1 +
>>>   2 files changed, 85 insertions(+), 10 deletions(-)
>>>
>>> diff --git a/block/blk-map.c b/block/blk-map.c
>>> index f3768876d618..e2f268167342 100644
>>> --- a/block/blk-map.c
>>> +++ b/block/blk-map.c
>>> @@ -241,17 +241,10 @@ static void bio_map_put(struct bio *bio)
>>>       }
>>>   }
>>>
>>> -static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
>>> +static struct bio *bio_map_get(struct request *rq, unsigned int nr_vecs,
>>>           gfp_t gfp_mask)
>>>   {
>>> -    unsigned int max_sectors = queue_max_hw_sectors(rq->q);
>>> -    unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
>>>       struct bio *bio;
>>> -    int ret;
>>> -    int j;
>>> -
>>> -    if (!iov_iter_count(iter))
>>> -        return -EINVAL;
>>>
>>>       if (rq->cmd_flags & REQ_POLLED) {
>>>           blk_opf_t opf = rq->cmd_flags | REQ_ALLOC_CACHE;
>>> @@ -259,13 +252,31 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
>>>           bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask,
>>>                       &fs_bio_set);
>>>           if (!bio)
>>> -            return -ENOMEM;
>>> +            return NULL;
>>>       } else {
>>>           bio = bio_kmalloc(nr_vecs, gfp_mask);
>>>           if (!bio)
>>> -            return -ENOMEM;
>>> +            return NULL;
>>>           bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
>>>       }
>>> +    return bio;
>>> +}
>>> +
>>> +static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
>>> +        gfp_t gfp_mask)
>>> +{
>>> +    unsigned int max_sectors = queue_max_hw_sectors(rq->q);
>>> +    unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
>>> +    struct bio *bio;
>>> +    int ret;
>>> +    int j;
>>> +
>>> +    if (!iov_iter_count(iter))
>>> +        return -EINVAL;
>>> +
>>> +    bio = bio_map_get(rq, nr_vecs, gfp_mask);
>>> +    if (bio == NULL)
>>> +        return -ENOMEM;
>>>
>>>       while (iov_iter_count(iter)) {
>>>           struct page **pages, *stack_pages[UIO_FASTIOV];
>>> @@ -612,6 +623,69 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
>>>   }
>>>   EXPORT_SYMBOL(blk_rq_map_user);
>>>
>>> +/* Prepare bio for passthrough IO given an existing bvec iter */
>>> +int blk_rq_map_user_bvec(struct request *rq, struct iov_iter *iter)
>>> +{
>>> +    struct request_queue *q = rq->q;
>>> +    size_t iter_count, nr_segs;
>>> +    struct bio *bio;
>>> +    struct bio_vec *bv, *bvec_arr, *bvprvp = NULL;
>>> +    struct queue_limits *lim = &q->limits;
>>> +    unsigned int nsegs = 0, bytes = 0;
>>> +    int ret, i;
>>> +
>>
>> consider this (untested), it also sets the variable i data type same
>> as it comparison variable in nr_segs the loop i.e. size_t :-
>>
>> +       struct bio_vec *bv, *bvec_arr, *bvprvp = NULL;
>> +       struct request_queue *q = rq->q;
>> +       struct queue_limits *lim = &q->limits;
>> +       unsigned int nsegs = 0, bytes = 0;
>> +       size_t iter_count, nr_segs, i;
>> +       struct bio *bio;
>> +       int ret;
>>
>>
>>> +    iter_count = iov_iter_count(iter);
>>> +    nr_segs = iter->nr_segs;
>>> +
>>> +    if (!iter_count || (iter_count >> 9) > queue_max_hw_sectors(q))
>>> +        return -EINVAL;
>>
>> can we remove braces for iter_count >> 9 without impacting the intended
>> functionality?
> 
> I think removing that make it hard to read.
> I will fold all other changes you mentioned in v6.

Agree - if you have to think about operator precedence, then that's a
sign that the code is less readable and more fragile.
Pankaj Raghav Sept. 8, 2022, 3:11 p.m. UTC | #4
>>
>> consider this (untested), it also sets the variable i data type same
>> as it comparison variable in nr_segs the loop i.e. size_t :-
>>
>> +       struct bio_vec *bv, *bvec_arr, *bvprvp = NULL;
>> +       struct request_queue *q = rq->q;
>> +       struct queue_limits *lim = &q->limits;
>> +       unsigned int nsegs = 0, bytes = 0;
>> +       size_t iter_count, nr_segs, i;
>> +       struct bio *bio;
>> +       int ret;
>>
>>
>>> +    iter_count = iov_iter_count(iter);
>>> +    nr_segs = iter->nr_segs;
>>> +
>>> +    if (!iter_count || (iter_count >> 9) > queue_max_hw_sectors(q))
>>> +        return -EINVAL;
>>
>> can we remove braces for iter_count >> 9 without impacting the intended
>> functionality?
> 
You can also use the SECTOR_SHIFT macro instead of hard-coding 9.

> I think removing that make it hard to read.
> I will fold all other changes you mentioned in v6.
> 
>
diff mbox series

Patch

diff --git a/block/blk-map.c b/block/blk-map.c
index f3768876d618..e2f268167342 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -241,17 +241,10 @@  static void bio_map_put(struct bio *bio)
 	}
 }
 
-static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
+static struct bio *bio_map_get(struct request *rq, unsigned int nr_vecs,
 		gfp_t gfp_mask)
 {
-	unsigned int max_sectors = queue_max_hw_sectors(rq->q);
-	unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
 	struct bio *bio;
-	int ret;
-	int j;
-
-	if (!iov_iter_count(iter))
-		return -EINVAL;
 
 	if (rq->cmd_flags & REQ_POLLED) {
 		blk_opf_t opf = rq->cmd_flags | REQ_ALLOC_CACHE;
@@ -259,13 +252,31 @@  static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
 		bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask,
 					&fs_bio_set);
 		if (!bio)
-			return -ENOMEM;
+			return NULL;
 	} else {
 		bio = bio_kmalloc(nr_vecs, gfp_mask);
 		if (!bio)
-			return -ENOMEM;
+			return NULL;
 		bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
 	}
+	return bio;
+}
+
+static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
+		gfp_t gfp_mask)
+{
+	unsigned int max_sectors = queue_max_hw_sectors(rq->q);
+	unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
+	struct bio *bio;
+	int ret;
+	int j;
+
+	if (!iov_iter_count(iter))
+		return -EINVAL;
+
+	bio = bio_map_get(rq, nr_vecs, gfp_mask);
+	if (bio == NULL)
+		return -ENOMEM;
 
 	while (iov_iter_count(iter)) {
 		struct page **pages, *stack_pages[UIO_FASTIOV];
@@ -612,6 +623,69 @@  int blk_rq_map_user(struct request_queue *q, struct request *rq,
 }
 EXPORT_SYMBOL(blk_rq_map_user);
 
+/* Prepare bio for passthrough IO given an existing bvec iter */
+int blk_rq_map_user_bvec(struct request *rq, struct iov_iter *iter)
+{
+	struct request_queue *q = rq->q;
+	size_t iter_count, nr_segs;
+	struct bio *bio;
+	struct bio_vec *bv, *bvec_arr, *bvprvp = NULL;
+	struct queue_limits *lim = &q->limits;
+	unsigned int nsegs = 0, bytes = 0;
+	int ret, i;
+
+	iter_count = iov_iter_count(iter);
+	nr_segs = iter->nr_segs;
+
+	if (!iter_count || (iter_count >> 9) > queue_max_hw_sectors(q))
+		return -EINVAL;
+	if (nr_segs > queue_max_segments(q))
+		return -EINVAL;
+
+	/* no iovecs to alloc, as we already have a BVEC iterator */
+	bio = bio_map_get(rq, 0, GFP_KERNEL);
+	if (bio == NULL)
+		return -ENOMEM;
+
+	bio_iov_bvec_set(bio, iter);
+	blk_rq_bio_prep(rq, bio, nr_segs);
+
+	/* loop to perform a bunch of sanity checks */
+	bvec_arr = (struct bio_vec *)iter->bvec;
+	for (i = 0; i < nr_segs; i++) {
+		bv = &bvec_arr[i];
+		/*
+		 * If the queue doesn't support SG gaps and adding this
+		 * offset would create a gap, disallow it.
+		 */
+		if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
+			ret = -EINVAL;
+			goto out_free;
+		}
+
+		/* check full condition */
+		if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len) {
+			ret = -EINVAL;
+			goto out_free;
+		}
+
+		if (bytes + bv->bv_len <= iter_count &&
+				bv->bv_offset + bv->bv_len <= PAGE_SIZE) {
+			nsegs++;
+			bytes += bv->bv_len;
+		} else {
+			ret = -EINVAL;
+			goto out_free;
+		}
+		bvprvp = bv;
+	}
+	return 0;
+out_free:
+	bio_map_put(bio);
+	return ret;
+}
+EXPORT_SYMBOL(blk_rq_map_user_bvec);
+
 /**
  * blk_rq_unmap_user - unmap a request with user data
  * @bio:	       start of bio list
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index b43c81d91892..83bef362f0f9 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -970,6 +970,7 @@  struct rq_map_data {
 	bool from_user;
 };
 
+int blk_rq_map_user_bvec(struct request *rq, struct iov_iter *iter);
 int blk_rq_map_user(struct request_queue *, struct request *,
 		struct rq_map_data *, void __user *, unsigned long, gfp_t);
 int blk_rq_map_user_iov(struct request_queue *, struct request *,