@@ -1077,14 +1077,31 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
}
}
+static void scsi_dump_req(struct request *req)
+{
+ struct bio_vec bvec;
+ struct bio *bio;
+ struct bvec_iter iter;
+ int i = 0;
+
+ __rq_for_each_bio(bio, req) {
+ printk("%d-%p\n", i, bio);
+ bio_for_each_segment(bvec, bio, iter) {
+ printk("\t %p %u %u\n", bvec.bv_page,
+ bvec.bv_offset, bvec.bv_len);
+ }
+ }
+}
+
static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
{
- int count;
+ int count, pre_count = req->nr_phys_segments;
+ int alloc_cnt = queue_max_segments(req->q);
/*
* If sg table allocation fails, requeue request later.
*/
- if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
+ if (unlikely(scsi_alloc_sgtable(sdb, alloc_cnt,
req->mq_ctx != NULL)))
return BLKPREP_DEFER;
@@ -1093,6 +1110,11 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
* each segment.
*/
count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
+
+ if (count > sdb->table.nents) {
+ printk("%s prev vs. now: %d-%d\n", __func__, pre_count, count);
+ scsi_dump_req(req);
+ }
BUG_ON(count > sdb->table.nents);
sdb->table.nents = count;
sdb->length = blk_rq_bytes(req);