diff mbox

[6/6] xen-blkfront: prepare request locally, only then put it on the shared ring

Message ID 951a221b0e655b3077d1f96ac365194320bc8809.1525122026.git-series.marmarek@invisiblethingslab.com (mailing list archive)
State New, archived
Headers show

Commit Message

Marek Marczykowski-Górecki April 30, 2018, 9:01 p.m. UTC
Do not reuse data which theoretically might be already modified by the
backend. This is mostly about private copy of the request
(info->shadow[id].req) - make sure the request saved there is really the
one just filled.

This is complementary to XSA155.

CC: stable@vger.kernel.org
Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
---
 drivers/block/xen-blkfront.c | 76 +++++++++++++++++++++----------------
 1 file changed, 44 insertions(+), 32 deletions(-)

Comments

Roger Pau Monne May 1, 2018, 8:22 a.m. UTC | #1
On Mon, Apr 30, 2018 at 11:01:50PM +0200, Marek Marczykowski-Górecki wrote:
> Do not reuse data which theoretically might be already modified by the
> backend. This is mostly about private copy of the request
> (info->shadow[id].req) - make sure the request saved there is really the
> one just filled.
>
> This is complementary to XSA155.
> 
> CC: stable@vger.kernel.org
> Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
> ---
>  drivers/block/xen-blkfront.c | 76 +++++++++++++++++++++----------------
>  1 file changed, 44 insertions(+), 32 deletions(-)
> 
> diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
> index 3926811..b100b55 100644
> --- a/drivers/block/xen-blkfront.c
> +++ b/drivers/block/xen-blkfront.c
> @@ -525,19 +525,16 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
>  
>  static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,

The name of this function should be changed IMO, since you are no
longer getting a request from the ring, but just initializing a
request struct.

>  					    struct request *req,
> -					    struct blkif_request **ring_req)
> +					    struct blkif_request *ring_req)
>  {
>  	unsigned long id;
>  
> -	*ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
> -	rinfo->ring.req_prod_pvt++;
> -
>  	id = get_id_from_freelist(rinfo);
>  	rinfo->shadow[id].request = req;
>  	rinfo->shadow[id].status = REQ_WAITING;
>  	rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
>  
> -	(*ring_req)->u.rw.id = id;
> +	ring_req->u.rw.id = id;
>  
>  	return id;
>  }
> @@ -545,23 +542,28 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
>  static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
>  {
>  	struct blkfront_info *info = rinfo->dev_info;
> -	struct blkif_request *ring_req;
> +	struct blkif_request ring_req = { 0 };
>  	unsigned long id;
>  
>  	/* Fill out a communications ring structure. */
>  	id = blkif_ring_get_request(rinfo, req, &ring_req);

Maybe I'm missing something obvious here, but you are adding a struct
allocated on the stack to the shadow ring copy, isn't this dangerous?

The pointer stored in the shadow ring copy is going to be invalid
after returning from this function.

The same comment applies to the other calls to blkif_ring_get_request
below that pass a ring_reg allocated on the stack.

Thanks, Roger.
Roger Pau Monne May 1, 2018, 9:15 a.m. UTC | #2
On Tue, May 01, 2018 at 09:22:31AM +0100, Roger Pau Monné wrote:
> On Mon, Apr 30, 2018 at 11:01:50PM +0200, Marek Marczykowski-Górecki wrote:
> >  					    struct request *req,
> > -					    struct blkif_request **ring_req)
> > +					    struct blkif_request *ring_req)
> >  {
> >  	unsigned long id;
> >  
> > -	*ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
> > -	rinfo->ring.req_prod_pvt++;
> > -
> >  	id = get_id_from_freelist(rinfo);
> >  	rinfo->shadow[id].request = req;
> >  	rinfo->shadow[id].status = REQ_WAITING;
> >  	rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
> >  
> > -	(*ring_req)->u.rw.id = id;
> > +	ring_req->u.rw.id = id;
> >  
> >  	return id;
> >  }
> > @@ -545,23 +542,28 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
> >  static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
> >  {
> >  	struct blkfront_info *info = rinfo->dev_info;
> > -	struct blkif_request *ring_req;
> > +	struct blkif_request ring_req = { 0 };
> >  	unsigned long id;
> >  
> >  	/* Fill out a communications ring structure. */
> >  	id = blkif_ring_get_request(rinfo, req, &ring_req);
> 
> Maybe I'm missing something obvious here, but you are adding a struct
> allocated on the stack to the shadow ring copy, isn't this dangerous?

The above comment is wrong, you are storing a pointer to 'req' in the
shadow ring copy, which is fine and is not the ring request.

Roger.
diff mbox

Patch

diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 3926811..b100b55 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -525,19 +525,16 @@  static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
 
 static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
 					    struct request *req,
-					    struct blkif_request **ring_req)
+					    struct blkif_request *ring_req)
 {
 	unsigned long id;
 
-	*ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
-	rinfo->ring.req_prod_pvt++;
-
 	id = get_id_from_freelist(rinfo);
 	rinfo->shadow[id].request = req;
 	rinfo->shadow[id].status = REQ_WAITING;
 	rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
 
-	(*ring_req)->u.rw.id = id;
+	ring_req->u.rw.id = id;
 
 	return id;
 }
@@ -545,23 +542,28 @@  static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
 static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
 {
 	struct blkfront_info *info = rinfo->dev_info;
-	struct blkif_request *ring_req;
+	struct blkif_request ring_req = { 0 };
 	unsigned long id;
 
 	/* Fill out a communications ring structure. */
 	id = blkif_ring_get_request(rinfo, req, &ring_req);
 
-	ring_req->operation = BLKIF_OP_DISCARD;
-	ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
-	ring_req->u.discard.id = id;
-	ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
+	ring_req.operation = BLKIF_OP_DISCARD;
+	ring_req.u.discard.nr_sectors = blk_rq_sectors(req);
+	ring_req.u.discard.id = id;
+	ring_req.u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
 	if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard)
-		ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
+		ring_req.u.discard.flag = BLKIF_DISCARD_SECURE;
 	else
-		ring_req->u.discard.flag = 0;
+		ring_req.u.discard.flag = 0;
+
+	/* make the request available to the backend */
+	*RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt) = ring_req;
+	wmb();
+	rinfo->ring.req_prod_pvt++;
 
 	/* Keep a private copy so we can reissue requests when recovering. */
-	rinfo->shadow[id].req = *ring_req;
+	rinfo->shadow[id].req = ring_req;
 
 	return 0;
 }
@@ -693,7 +695,7 @@  static void blkif_setup_extra_req(struct blkif_request *first,
 static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo)
 {
 	struct blkfront_info *info = rinfo->dev_info;
-	struct blkif_request *ring_req, *extra_ring_req = NULL;
+	struct blkif_request ring_req = { 0 }, extra_ring_req = { 0 };
 	unsigned long id, extra_id = NO_ASSOCIATED_ID;
 	bool require_extra_req = false;
 	int i;
@@ -758,16 +760,16 @@  static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
 		 * BLKIF_OP_WRITE
 		 */
 		BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA);
-		ring_req->operation = BLKIF_OP_INDIRECT;
-		ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
+		ring_req.operation = BLKIF_OP_INDIRECT;
+		ring_req.u.indirect.indirect_op = rq_data_dir(req) ?
 			BLKIF_OP_WRITE : BLKIF_OP_READ;
-		ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
-		ring_req->u.indirect.handle = info->handle;
-		ring_req->u.indirect.nr_segments = num_grant;
+		ring_req.u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
+		ring_req.u.indirect.handle = info->handle;
+		ring_req.u.indirect.nr_segments = num_grant;
 	} else {
-		ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
-		ring_req->u.rw.handle = info->handle;
-		ring_req->operation = rq_data_dir(req) ?
+		ring_req.u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
+		ring_req.u.rw.handle = info->handle;
+		ring_req.operation = rq_data_dir(req) ?
 			BLKIF_OP_WRITE : BLKIF_OP_READ;
 		if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
 			/*
@@ -778,15 +780,15 @@  static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
 			 * since it is guaranteed ordered WRT previous writes.)
 			 */
 			if (info->feature_flush && info->feature_fua)
-				ring_req->operation =
+				ring_req.operation =
 					BLKIF_OP_WRITE_BARRIER;
 			else if (info->feature_flush)
-				ring_req->operation =
+				ring_req.operation =
 					BLKIF_OP_FLUSH_DISKCACHE;
 			else
-				ring_req->operation = 0;
+				ring_req.operation = 0;
 		}
-		ring_req->u.rw.nr_segments = num_grant;
+		ring_req.u.rw.nr_segments = num_grant;
 		if (unlikely(require_extra_req)) {
 			extra_id = blkif_ring_get_request(rinfo, req,
 							  &extra_ring_req);
@@ -796,7 +798,7 @@  static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
 			 */
 			rinfo->shadow[extra_id].num_sg = 0;
 
-			blkif_setup_extra_req(ring_req, extra_ring_req);
+			blkif_setup_extra_req(&ring_req, &extra_ring_req);
 
 			/* Link the 2 requests together */
 			rinfo->shadow[extra_id].associated_id = id;
@@ -804,12 +806,12 @@  static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
 		}
 	}
 
-	setup.ring_req = ring_req;
+	setup.ring_req = &ring_req;
 	setup.id = id;
 
 	setup.require_extra_req = require_extra_req;
 	if (unlikely(require_extra_req))
-		setup.extra_ring_req = extra_ring_req;
+		setup.extra_ring_req = &extra_ring_req;
 
 	for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) {
 		BUG_ON(sg->offset + sg->length > PAGE_SIZE);
@@ -831,10 +833,20 @@  static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
 	if (setup.segments)
 		kunmap_atomic(setup.segments);
 
+	/* make the request available to the backend */
+	*RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt) = ring_req;
+	wmb();
+	rinfo->ring.req_prod_pvt++;
 	/* Keep a private copy so we can reissue requests when recovering. */
-	rinfo->shadow[id].req = *ring_req;
-	if (unlikely(require_extra_req))
-		rinfo->shadow[extra_id].req = *extra_ring_req;
+	rinfo->shadow[id].req = ring_req;
+
+	if (unlikely(require_extra_req)) {
+		*RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt) = extra_ring_req;
+		wmb();
+		rinfo->ring.req_prod_pvt++;
+		/* Keep a private copy so we can reissue requests when recovering. */
+		rinfo->shadow[extra_id].req = extra_ring_req;
+	}
 
 	if (new_persistent_gnts)
 		gnttab_free_grant_references(setup.gref_head);