@@ -399,6 +399,11 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
init_waitqueue_head(&mq->wait);
}
+static inline bool mmc_merge_capable(struct mmc_host *host)
+{
+ return host->caps2 & MMC_CAP2_MERGE_CAPABLE;
+}
+
/* Set queue depth to get a reasonable value for q->nr_requests */
#define MMC_QUEUE_DEPTH 64
@@ -441,7 +446,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
* the host->can_dma_map_merge should be set before to get max_segs
* from mmc_get_max_segments().
*/
- if (host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS &&
+ if (mmc_merge_capable(host) &&
+ host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS &&
dma_get_merge_boundary(mmc_dev(host)))
host->can_dma_map_merge = 1;
else
@@ -367,6 +367,7 @@ struct mmc_host {
#define MMC_CAP2_CQE (1 << 23) /* Has eMMC command queue engine */
#define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */
#define MMC_CAP2_AVOID_3_3V (1 << 25) /* Host must negotiate down from 3.3V */
+#define MMC_CAP2_MERGE_CAPABLE (1 << 26) /* Host can merge a segment over the segment size */
int fixed_drv_type; /* fixed driver type for non-removable media */
The commit 38c38cb73223 ("mmc: queue: use bigger segments if DMA MAP layer can merge the segments") always enables the bugger segments if DMA MAP layer can merge the segments, but some controllers (SDHCI) have strictly limitation about the segments size, and then the commit breaks on the controllers. To fix the issue, this patch adds a new flag MMC_CAP2_MERGE_CAPABLE into the struct mmc_host and the bigger segments usage is disabled as default. Reported-by: Thierry Reding <treding@nvidia.com> Fixes: 38c38cb73223 ("mmc: queue: use bigger segments if DMA MAP layer can merge the segments") Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com> --- drivers/mmc/core/queue.c | 8 +++++++- include/linux/mmc/host.h | 1 + 2 files changed, 8 insertions(+), 1 deletion(-)