@@ -202,17 +202,20 @@ void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
irqreturn_t tmio_mmc_irq(int irq, void *devid);
+/* Note: this function may return PTR_ERR and must be checked! */
static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
unsigned long *flags)
{
+ void *ret;
+
local_irq_save(*flags);
- return kmap_atomic(sg_page(sg)) + sg->offset;
+ return sg_map(sg, 0, SG_KMAP_ATOMIC | SG_MAP_MUST_NOT_FAIL);
}
static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg,
unsigned long *flags, void *virt)
{
- kunmap_atomic(virt - sg->offset);
+ sg_unmap(sg, virt, 0, SG_KMAP_ATOMIC);
local_irq_restore(*flags);
}
@@ -506,6 +506,18 @@ static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
if (host->sg_ptr == &host->bounce_sg) {
unsigned long flags;
void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
+ if (IS_ERR(sg_vaddr)) {
+ /*
+ * This should really never happen unless
+ * the code is changed to use memory that is
+ * not mappable in the sg. Seeing there doesn't
+ * seem to be any error path out of here,
+ * we can only WARN.
+ */
+ WARN(1, "Non-mappable memory used in sg!");
+ return;
+ }
+
memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
}
Straightforward conversion to sg_map helper. Seeing there is no cleare error path, SG_MAP_MUST_NOT_FAIL which may BUG_ON in certain cases in the future. Signed-off-by: Logan Gunthorpe <logang@deltatee.com> Cc: Wolfram Sang <wsa+renesas@sang-engineering.com> Cc: Ulf Hansson <ulf.hansson@linaro.org> --- drivers/mmc/host/tmio_mmc.h | 7 +++++-- drivers/mmc/host/tmio_mmc_pio.c | 12 ++++++++++++ 2 files changed, 17 insertions(+), 2 deletions(-)