diff mbox

dm: Fix possible NULL dereferences

Message ID 20100920000152.GA1855@rere.qmqm.pl (mailing list archive)
State Not Applicable, archived
Delegated to: Mike Snitzer
Headers show

Commit Message

Michał Mirosław Sept. 20, 2010, 12:01 a.m. UTC
None
diff mbox

Patch

diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ac384b2..60cfacd 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1129,6 +1129,9 @@  static struct bio *split_bvec(struct bio *bio, sector_t sector,
 	struct bio_vec *bv = bio->bi_io_vec + idx;
 
 	clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
+	if (unlikely(!clone))
+		return NULL;
+
 	clone->bi_destructor = dm_bio_destructor;
 	*clone->bi_io_vec = *bv;
 
@@ -1142,7 +1145,10 @@  static struct bio *split_bvec(struct bio *bio, sector_t sector,
 	clone->bi_flags |= 1 << BIO_CLONED;
 
 	if (bio_integrity(bio)) {
-		bio_integrity_clone(clone, bio, GFP_NOIO, bs);
+		if (bio_integrity_clone(clone, bio, GFP_NOIO, bs) < 0) {
+			bio_put(clone);
+			return NULL;
+		}
 		bio_integrity_trim(clone,
 				   bio_sector_offset(bio, idx, offset), len);
 	}
@@ -1160,6 +1166,9 @@  static struct bio *clone_bio(struct bio *bio, sector_t sector,
 	struct bio *clone;
 
 	clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
+	if (unlikely(!clone))
+		return NULL;
+
 	__bio_clone(clone, bio);
 	clone->bi_rw &= ~REQ_HARDBARRIER;
 	clone->bi_destructor = dm_bio_destructor;
@@ -1170,7 +1179,10 @@  static struct bio *clone_bio(struct bio *bio, sector_t sector,
 	clone->bi_flags &= ~(1 << BIO_SEG_VALID);
 
 	if (bio_integrity(bio)) {
-		bio_integrity_clone(clone, bio, GFP_NOIO, bs);
+		if (bio_integrity_clone(clone, bio, GFP_NOIO, bs) < 0) {
+			bio_put(clone);
+			return NULL;
+		}
 
 		if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
 			bio_integrity_trim(clone,
@@ -1192,7 +1204,7 @@  static struct dm_target_io *alloc_tio(struct clone_info *ci,
 	return tio;
 }
 
-static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
+static int __issue_target_request(struct clone_info *ci, struct dm_target *ti,
 				   unsigned request_nr, sector_t len)
 {
 	struct dm_target_io *tio = alloc_tio(ci, ti);
@@ -1206,6 +1218,9 @@  static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
 	 * and discard, so no need for concern about wasted bvec allocations.
 	 */
 	clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs);
+	if (unlikely(!clone))
+		return -EIO;
+
 	__bio_clone(clone, ci->bio);
 	clone->bi_destructor = dm_bio_destructor;
 	if (len) {
@@ -1214,15 +1229,18 @@  static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
 	}
 
 	__map_bio(ti, clone, tio);
+	return 0;
 }
 
-static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti,
+static int __issue_target_requests(struct clone_info *ci, struct dm_target *ti,
 				    unsigned num_requests, sector_t len)
 {
 	unsigned request_nr;
 
 	for (request_nr = 0; request_nr < num_requests; request_nr++)
-		__issue_target_request(ci, ti, request_nr, len);
+		if (__issue_target_request(ci, ti, request_nr, len) < 0)
+			return -EIO;
+	return 0;
 }
 
 static int __clone_and_map_empty_barrier(struct clone_info *ci)
@@ -1231,7 +1249,8 @@  static int __clone_and_map_empty_barrier(struct clone_info *ci)
 	struct dm_target *ti;
 
 	while ((ti = dm_table_get_target(ci->map, target_nr++)))
-		__issue_target_requests(ci, ti, ti->num_flush_requests, 0);
+		if (__issue_target_requests(ci, ti, ti->num_flush_requests, 0) < 0)
+			return -EIO;
 
 	ci->sector_count = 0;
 
@@ -1241,7 +1260,7 @@  static int __clone_and_map_empty_barrier(struct clone_info *ci)
 /*
  * Perform all io with a single clone.
  */
-static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
+static int __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
 {
 	struct bio *clone, *bio = ci->bio;
 	struct dm_target_io *tio;
@@ -1250,8 +1269,11 @@  static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
 	clone = clone_bio(bio, ci->sector, ci->idx,
 			  bio->bi_vcnt - ci->idx, ci->sector_count,
 			  ci->md->bs);
+	if (unlikely(!clone))
+		return -EIO;
 	__map_bio(ti, clone, tio);
 	ci->sector_count = 0;
+	return 0;
 }
 
 static int __clone_and_map_discard(struct clone_info *ci)
@@ -1274,7 +1296,8 @@  static int __clone_and_map_discard(struct clone_info *ci)
 
 		len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
 
-		__issue_target_requests(ci, ti, ti->num_discard_requests, len);
+		if (__issue_target_requests(ci, ti, ti->num_discard_requests, len) < 0)
+			return -EIO;
 
 		ci->sector += len;
 	} while (ci->sector_count -= len);
@@ -1306,7 +1329,7 @@  static int __clone_and_map(struct clone_info *ci)
 		 * Optimise for the simple case where we can do all of
 		 * the remaining io with a single clone.
 		 */
-		__clone_and_map_simple(ci, ti);
+		return __clone_and_map_simple(ci, ti);
 
 	} else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
 		/*
@@ -1330,6 +1353,8 @@  static int __clone_and_map(struct clone_info *ci)
 		tio = alloc_tio(ci, ti);
 		clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
 				  ci->md->bs);
+		if (unlikely(!clone))
+			return -EIO;
 		__map_bio(ti, clone, tio);
 
 		ci->sector += len;
@@ -1359,7 +1384,8 @@  static int __clone_and_map(struct clone_info *ci)
 			clone = split_bvec(bio, ci->sector, ci->idx,
 					   bv->bv_offset + offset, len,
 					   ci->md->bs);
-
+			if (unlikely(!clone))
+				return -EIO;
 			__map_bio(ti, clone, tio);
 
 			ci->sector += len;
@@ -2510,6 +2536,8 @@  static int dm_rq_barrier(struct mapped_device *md)
 		ti = dm_table_get_target(map, i);
 		for (j = 0; j < ti->num_flush_requests; j++) {
 			clone = clone_rq(md->flush_request, md, GFP_NOIO);
+			if (unlikely(!clone))
+				return -EIO;
 			dm_rq_set_target_request_nr(clone, j);
 			atomic_inc(&md->pending[rq_data_dir(clone)]);
 			map_request(ti, clone, md);