From patchwork Mon Sep 20 00:01:52 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: =?utf-8?b?TWljaGHFgiBNaXJvc8WCYXc=?= X-Patchwork-Id: 194102 X-Patchwork-Delegate: snitzer@redhat.com Received: from mx02.colomx.prod.int.phx2.redhat.com (mx4-phx2.redhat.com [209.132.183.25]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id o8K04p5V008618 for ; Mon, 20 Sep 2010 00:05:27 GMT Received: from lists01.pubmisc.prod.ext.phx2.redhat.com (lists01.pubmisc.prod.ext.phx2.redhat.com [10.5.19.33]) by mx02.colomx.prod.int.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id o8K02CZj020361; Sun, 19 Sep 2010 20:02:13 -0400 Received: from int-mx02.intmail.prod.int.phx2.redhat.com (int-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.12]) by lists01.pubmisc.prod.ext.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id o8K02Bbd014088 for ; Sun, 19 Sep 2010 20:02:11 -0400 Received: from mx1.redhat.com (ext-mx07.extmail.prod.ext.phx2.redhat.com [10.5.110.11]) by int-mx02.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id o8K025C7020360 for ; Sun, 19 Sep 2010 20:02:06 -0400 Received: from rere.qmqm.pl (rere.qmqm.pl [89.167.52.164]) by mx1.redhat.com (8.13.8/8.13.8) with ESMTP id o8K01rtO006499 for ; Sun, 19 Sep 2010 20:01:54 -0400 Received: by rere.qmqm.pl (Postfix, from userid 1000) id 2BD8613A52; Mon, 20 Sep 2010 02:01:52 +0200 (CEST) Date: Mon, 20 Sep 2010 02:01:52 +0200 From: =?iso-8859-2?Q?Micha=B3_Miros=B3aw?= To: dm-devel@redhat.com Message-ID: <20100920000152.GA1855@rere.qmqm.pl> MIME-Version: 1.0 Content-Disposition: inline User-Agent: Mutt/1.5.18 (2008-05-17) X-RedHat-Spam-Score: -0.012 (SPF_HELO_PASS,SPF_PASS,T_RP_MATCHES_RCVD) X-Scanned-By: MIMEDefang 2.67 on 10.5.11.12 X-Scanned-By: MIMEDefang 2.67 on 10.5.110.11 X-loop: dm-devel@redhat.com Subject: [dm-devel] [PATCH] dm: Fix possible NULL dereferences X-BeenThere: dm-devel@redhat.com X-Mailman-Version: 2.1.12 Precedence: junk Reply-To: device-mapper development List-Id: device-mapper development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: dm-devel-bounces@redhat.com Errors-To: dm-devel-bounces@redhat.com X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter1.kernel.org [140.211.167.41]); Mon, 20 Sep 2010 00:05:27 +0000 (UTC) X-MIME-Autoconverted: from quoted-printable to 8bit by demeter1.kernel.org id o8K04p5V008618 diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ac384b2..60cfacd 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1129,6 +1129,9 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector, struct bio_vec *bv = bio->bi_io_vec + idx; clone = bio_alloc_bioset(GFP_NOIO, 1, bs); + if (unlikely(!clone)) + return NULL; + clone->bi_destructor = dm_bio_destructor; *clone->bi_io_vec = *bv; @@ -1142,7 +1145,10 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector, clone->bi_flags |= 1 << BIO_CLONED; if (bio_integrity(bio)) { - bio_integrity_clone(clone, bio, GFP_NOIO, bs); + if (bio_integrity_clone(clone, bio, GFP_NOIO, bs) < 0) { + bio_put(clone); + return NULL; + } bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len); } @@ -1160,6 +1166,9 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, struct bio *clone; clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); + if (unlikely(!clone)) + return NULL; + __bio_clone(clone, bio); clone->bi_rw &= ~REQ_HARDBARRIER; clone->bi_destructor = dm_bio_destructor; @@ -1170,7 +1179,10 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, clone->bi_flags &= ~(1 << BIO_SEG_VALID); if (bio_integrity(bio)) { - bio_integrity_clone(clone, bio, GFP_NOIO, bs); + if (bio_integrity_clone(clone, bio, GFP_NOIO, bs) < 0) { + bio_put(clone); + return NULL; + } if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) bio_integrity_trim(clone, @@ -1192,7 +1204,7 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci, return tio; } -static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, +static int __issue_target_request(struct clone_info *ci, struct dm_target *ti, unsigned request_nr, sector_t len) { struct dm_target_io *tio = alloc_tio(ci, ti); @@ -1206,6 +1218,9 @@ static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, * and discard, so no need for concern about wasted bvec allocations. */ clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs); + if (unlikely(!clone)) + return -EIO; + __bio_clone(clone, ci->bio); clone->bi_destructor = dm_bio_destructor; if (len) { @@ -1214,15 +1229,18 @@ static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, } __map_bio(ti, clone, tio); + return 0; } -static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti, +static int __issue_target_requests(struct clone_info *ci, struct dm_target *ti, unsigned num_requests, sector_t len) { unsigned request_nr; for (request_nr = 0; request_nr < num_requests; request_nr++) - __issue_target_request(ci, ti, request_nr, len); + if (__issue_target_request(ci, ti, request_nr, len) < 0) + return -EIO; + return 0; } static int __clone_and_map_empty_barrier(struct clone_info *ci) @@ -1231,7 +1249,8 @@ static int __clone_and_map_empty_barrier(struct clone_info *ci) struct dm_target *ti; while ((ti = dm_table_get_target(ci->map, target_nr++))) - __issue_target_requests(ci, ti, ti->num_flush_requests, 0); + if (__issue_target_requests(ci, ti, ti->num_flush_requests, 0) < 0) + return -EIO; ci->sector_count = 0; @@ -1241,7 +1260,7 @@ static int __clone_and_map_empty_barrier(struct clone_info *ci) /* * Perform all io with a single clone. */ -static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti) +static int __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti) { struct bio *clone, *bio = ci->bio; struct dm_target_io *tio; @@ -1250,8 +1269,11 @@ static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti) clone = clone_bio(bio, ci->sector, ci->idx, bio->bi_vcnt - ci->idx, ci->sector_count, ci->md->bs); + if (unlikely(!clone)) + return -EIO; __map_bio(ti, clone, tio); ci->sector_count = 0; + return 0; } static int __clone_and_map_discard(struct clone_info *ci) @@ -1274,7 +1296,8 @@ static int __clone_and_map_discard(struct clone_info *ci) len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); - __issue_target_requests(ci, ti, ti->num_discard_requests, len); + if (__issue_target_requests(ci, ti, ti->num_discard_requests, len) < 0) + return -EIO; ci->sector += len; } while (ci->sector_count -= len); @@ -1306,7 +1329,7 @@ static int __clone_and_map(struct clone_info *ci) * Optimise for the simple case where we can do all of * the remaining io with a single clone. */ - __clone_and_map_simple(ci, ti); + return __clone_and_map_simple(ci, ti); } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { /* @@ -1330,6 +1353,8 @@ static int __clone_and_map(struct clone_info *ci) tio = alloc_tio(ci, ti); clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, ci->md->bs); + if (unlikely(!clone)) + return -EIO; __map_bio(ti, clone, tio); ci->sector += len; @@ -1359,7 +1384,8 @@ static int __clone_and_map(struct clone_info *ci) clone = split_bvec(bio, ci->sector, ci->idx, bv->bv_offset + offset, len, ci->md->bs); - + if (unlikely(!clone)) + return -EIO; __map_bio(ti, clone, tio); ci->sector += len; @@ -2510,6 +2536,8 @@ static int dm_rq_barrier(struct mapped_device *md) ti = dm_table_get_target(map, i); for (j = 0; j < ti->num_flush_requests; j++) { clone = clone_rq(md->flush_request, md, GFP_NOIO); + if (unlikely(!clone)) + return -EIO; dm_rq_set_target_request_nr(clone, j); atomic_inc(&md->pending[rq_data_dir(clone)]); map_request(ti, clone, md);