@@ -370,6 +370,19 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+ /*
+ * If the host requires kmapping for PIO we need to ensure
+ * that no segment spans a page boundary.
+ */
+ if (host->need_kmap) {
+ unsigned int dma_boundary = host->max_seg_size - 1;
+
+ if (dma_boundary >= PAGE_SIZE)
+ dma_boundary = PAGE_SIZE - 1;
+ blk_queue_segment_boundary(mq->queue, dma_boundary);
+ dma_set_seg_boundary(mmc_dev(host), dma_boundary);
+ }
+
INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
@@ -397,6 +397,7 @@ struct mmc_host {
unsigned int doing_retune:1; /* re-tuning in progress */
unsigned int retune_now:1; /* do re-tuning at next req */
unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
+ unsigned int need_kmap:1; /* only allow single page segments */
int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable devices */
If we want to get rid of the block layer bounce buffering for highmem we need to ensure no segment spans multiple pages so that we can kmap it. Add a flag to struct mmc_host so that we can handle the block and DMA layer interactions in common code. Signed-off-by: Christoph Hellwig <hch@lst.de> --- drivers/mmc/core/queue.c | 13 +++++++++++++ include/linux/mmc/host.h | 1 + 2 files changed, 14 insertions(+)