===================================================================
@@ -1492,6 +1492,30 @@ static void map_request(struct dm_target
dm_get(md);
tio->ti = ti;
+
+ /*
+ * Although submitted requests to the md->queue are checked against
+ * the table/queue limitations at the submission time, the limitations
+ * may be changed by a table swapping while those already checked
+ * requests are in the md->queue.
+ * If the limitations have been shrunk in such situations, we may be
+ * dispatching requests violating the current limitations here.
+ * Since struct request is a reliable one in the block-layer
+ * and device drivers, dispatching such requests is dangerous.
+ * (e.g. it may cause kernel panic easily.)
+ * Avoid to dispatch such problematic requests in request-based dm.
+ *
+ * Since dm_kill_unmapped_request() expects that tio->ti is correctly
+ * set, this has to be done after the set.
+ */
+ r = blk_rq_check_limits(rq->q, rq);
+ if (unlikely(r)) {
+ DMWARN("violating the queue limitation. the limitation may be"
+ " shrunk while there are some requests in the queue.");
+ dm_kill_unmapped_request(clone, r);
+ return;
+ }
+
r = ti->type->map_rq(ti, clone, &tio->info);
switch (r) {
case DM_MAPIO_SUBMITTED: