@@ -952,9 +952,7 @@ static void atmci_pdc_cleanup(struct atmel_mci *host)
struct mmc_data *data = host->data;
if (data)
- dma_unmap_sg(&host->pdev->dev,
- data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ mmc_dma_unmap_sg(&host->pdev->dev, data);
}
/*
@@ -990,9 +988,7 @@ static void atmci_dma_cleanup(struct atmel_mci *host)
struct mmc_data *data = host->data;
if (data)
- dma_unmap_sg(host->dma.chan->device->dev,
- data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ mmc_dma_unmap_sg(host->dma.chan->device->dev, data);
}
/*
@@ -1117,8 +1113,7 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
/* Configure PDC */
host->data_size = data->blocks * data->blksz;
- sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ sg_len = mmc_dma_map_sg(&host->pdev->dev, data);
if ((!host->caps.has_rwproof)
&& (host->data->flags & MMC_DATA_WRITE)) {
@@ -1192,8 +1187,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) |
ATMCI_DMAEN);
- sglen = dma_map_sg(chan->device->dev, data->sg,
- data->sg_len, mmc_get_dma_dir(data));
+ sglen = mmc_dma_map_sg(chan->device->dev, data);
dmaengine_slave_config(chan, &host->dma_conf);
desc = dmaengine_prep_slave_sg(chan,
@@ -1208,8 +1202,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
return iflags;
unmap_exit:
- dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ mmc_dma_unmap_sg(chan->device->dev, data);
return -ENOMEM;
}
@@ -477,15 +477,12 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
int mask = rw_threshold - 1;
int ret = 0;
- host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ host->sg_len = mmc_dma_map_sg(mmc_dev(host->mmc), data);
/* no individual DMA segment should need a partial FIFO */
for (i = 0; i < host->sg_len; i++) {
if (sg_dma_len(data->sg + i) & mask) {
- dma_unmap_sg(mmc_dev(host->mmc),
- data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ mmc_dma_unmap_sg(mmc_dev(host->mmc), data);
return -1;
}
}
@@ -797,8 +794,7 @@ mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)
if (host->do_dma) {
davinci_abort_dma(host);
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ mmc_dma_unmap_sg(mmc_dev(host->mmc), data);
host->do_dma = false;
}
host->data_dir = DAVINCI_MMC_DATADIR_NONE;
@@ -436,10 +436,7 @@ static void dw_mci_dma_cleanup(struct dw_mci *host)
struct mmc_data *data = host->data;
if (data && data->host_cookie == COOKIE_MAPPED) {
- dma_unmap_sg(host->dev,
- data->sg,
- data->sg_len,
- mmc_get_dma_dir(data));
+ mmc_dma_unmap_sg(host->dev, data);
data->host_cookie = COOKIE_UNMAPPED;
}
}
@@ -892,10 +889,7 @@ static int dw_mci_pre_dma_transfer(struct dw_mci *host,
return -EINVAL;
}
- sg_len = dma_map_sg(host->dev,
- data->sg,
- data->sg_len,
- mmc_get_dma_dir(data));
+ sg_len = mmc_dma_map_sg(host->dev, data);
if (sg_len == 0)
return -EINVAL;
@@ -932,10 +926,7 @@ static void dw_mci_post_req(struct mmc_host *mmc,
return;
if (data->host_cookie != COOKIE_UNMAPPED)
- dma_unmap_sg(slot->host->dev,
- data->sg,
- data->sg_len,
- mmc_get_dma_dir(data));
+ mmc_dma_unmap_sg(slot->host->dev, data);
data->host_cookie = COOKIE_UNMAPPED;
}
@@ -210,9 +210,8 @@ static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
struct mmc_data *data)
{
struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
- enum dma_data_direction dir = mmc_get_dma_dir(data);
- dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+ mmc_dma_unmap_sg(chan->device->dev, data);
}
/* Prepares DMA data for current/next transfer, returns non-zero on failure */
@@ -222,7 +221,6 @@ static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
struct dma_chan *chan)
{
struct jz4740_mmc_host_next *next_data = &host->next_data;
- enum dma_data_direction dir = mmc_get_dma_dir(data);
int sg_len;
if (!next && data->host_cookie &&
@@ -237,11 +235,7 @@ static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
/* Check if next job is already prepared */
if (next || data->host_cookie != host->next_data.cookie) {
- sg_len = dma_map_sg(chan->device->dev,
- data->sg,
- data->sg_len,
- dir);
-
+ sg_len = mmc_dma_map_sg(chan->device->dev, data);
} else {
sg_len = next_data->sg_len;
next_data->sg_len = 0;
@@ -522,8 +522,7 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
else
chan = host->dma_tx_channel;
- dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ mmc_dma_unmap_sg(chan->device->dev, data);
}
static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
@@ -606,8 +605,7 @@ static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
return -EINVAL;
device = chan->device;
- nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ nr_sg = mmc_dma_map_sg(device->dev, data);
if (nr_sg == 0)
return -EINVAL;
@@ -626,8 +624,7 @@ static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
return 0;
unmap_exit:
- dma_unmap_sg(device->dev, data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ mmc_dma_unmap_sg(device->dev, data);
return -ENOMEM;
}
@@ -272,9 +272,7 @@ static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
dir_slave = DMA_DEV_TO_MEM;
}
- len = dma_map_sg(dma_chan->device->dev, data->sg,
- data->sg_len, mmc_get_dma_dir(data));
-
+ len = mmc_dma_map_sg(dma_chan->device->dev, data);
if (len > 0) {
desc = dmaengine_prep_slave_sg(dma_chan, data->sg,
len, dir_slave,
@@ -297,9 +295,7 @@ static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
dma_time = wait_for_completion_interruptible_timeout(
&host->dma_complete, host->timeout);
- dma_unmap_sg(dma_chan->device->dev,
- data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ mmc_dma_unmap_sg(dma_chan->device->dev, data);
}
@@ -475,8 +475,7 @@ static void msdc_prepare_data(struct msdc_host *host, struct mmc_request *mrq)
if (!(data->host_cookie & MSDC_PREPARE_FLAG)) {
data->host_cookie |= MSDC_PREPARE_FLAG;
- data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ data->sg_count = mmc_dma_map_sg(host->dev, data);
}
}
@@ -488,8 +487,7 @@ static void msdc_unprepare_data(struct msdc_host *host, struct mmc_request *mrq)
return;
if (data->host_cookie & MSDC_PREPARE_FLAG) {
- dma_unmap_sg(host->dev, data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ mmc_dma_unmap_sg(host->dev, data);
data->host_cookie &= ~MSDC_PREPARE_FLAG;
}
}
@@ -126,9 +126,7 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)
} else {
dma_addr_t phys_addr;
- host->sg_frags = dma_map_sg(mmc_dev(host->mmc),
- data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ host->sg_frags = mmc_dma_map_sg(mmc_dev(host->mmc), data);
phys_addr = sg_dma_address(data->sg);
mvsd_write(MVSD_SYS_ADDR_LOW, (u32)phys_addr & 0xffff);
mvsd_write(MVSD_SYS_ADDR_HI, (u32)phys_addr >> 16);
@@ -293,8 +291,7 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data,
host->pio_ptr = NULL;
host->pio_size = 0;
} else {
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags,
- mmc_get_dma_dir(data));
+ mmc_dma_unmap_sg(mmc_dev(host->mmc), data);
}
if (err_status & MVSD_ERR_DATA_TIMEOUT)
@@ -404,17 +404,14 @@ static void
mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
int abort)
{
- enum dma_data_direction dma_data_dir;
struct device *dev = mmc_dev(host->mmc);
struct dma_chan *c;
- if (data->flags & MMC_DATA_WRITE) {
- dma_data_dir = DMA_TO_DEVICE;
+ if (data->flags & MMC_DATA_WRITE)
c = host->dma_tx;
- } else {
- dma_data_dir = DMA_FROM_DEVICE;
+ else
c = host->dma_rx;
- }
+
if (c) {
if (data->error) {
dmaengine_terminate_all(c);
@@ -423,7 +420,7 @@ mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
}
dev = c->device->dev;
}
- dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir);
+ mmc_dma_unmap_sg(dev, data);
}
static void mmc_omap_send_stop_work(struct work_struct *work)
@@ -981,7 +978,6 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
host->sg_idx = 0;
if (use_dma) {
- enum dma_data_direction dma_data_dir;
struct dma_async_tx_descriptor *tx;
struct dma_chan *c;
u32 burst, *bp;
@@ -1003,12 +999,10 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
c = host->dma_tx;
bp = &host->dma_tx_burst;
buf = 0x0f80 | (burst - 1) << 0;
- dma_data_dir = DMA_TO_DEVICE;
} else {
c = host->dma_rx;
bp = &host->dma_rx_burst;
buf = 0x800f | (burst - 1) << 8;
- dma_data_dir = DMA_FROM_DEVICE;
}
if (!c)
@@ -1033,8 +1027,7 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
*bp = burst;
}
- host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len,
- dma_data_dir);
+ host->sg_len = mmc_dma_map_sg(c->device->dev, data);
if (host->sg_len == 0)
goto use_pio;
@@ -1044,10 +1044,7 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data);
dmaengine_terminate_all(chan);
- dma_unmap_sg(chan->device->dev,
- host->data->sg, host->data->sg_len,
- mmc_get_dma_dir(host->data));
-
+ mmc_dma_unmap_sg(chan->device->dev, host->data);
host->data->host_cookie = 0;
}
host->data = NULL;
@@ -1339,9 +1336,7 @@ static void omap_hsmmc_dma_callback(void *param)
data = host->mrq->data;
chan = omap_hsmmc_get_dma_chan(host, data);
if (!data->host_cookie)
- dma_unmap_sg(chan->device->dev,
- data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ mmc_dma_unmap_sg(chan->device->dev, data);
req_in_progress = host->req_in_progress;
host->dma_ch = -1;
@@ -1373,9 +1368,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
/* Check if next job is already prepared */
if (next || data->host_cookie != host->next_data.cookie) {
- dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
- mmc_get_dma_dir(data));
-
+ dma_len = mmc_dma_map_sg(chan->device->dev, data);
} else {
dma_len = host->next_data.dma_len;
host->next_data.dma_len = 0;
@@ -1559,8 +1552,7 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
if (host->use_dma && data->host_cookie) {
struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);
- dma_unmap_sg(c->device->dev, data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ mmc_dma_unmap_sg(c->device->dev, data);
data->host_cookie = 0;
}
}
@@ -1092,8 +1092,7 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
else
conf.direction = DMA_MEM_TO_DEV;
- dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ mmc_dma_map_sg(mmc_dev(host->mmc), data);
dmaengine_slave_config(host->dma, &conf);
desc = dmaengine_prep_slave_sg(host->dma, data->sg, data->sg_len,
@@ -1109,8 +1108,7 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
return 0;
unmap_exit:
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ mmc_dma_unmap_sg(mmc_dev(host->mmc), data);
return -ENOMEM;
}
@@ -501,9 +501,7 @@ static int sdhci_pre_dma_transfer(struct sdhci_host *host,
if (data->host_cookie == COOKIE_PRE_MAPPED)
return data->sg_count;
- sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- mmc_get_dma_dir(data));
-
+ sg_count = mmc_dma_map_sg(mmc_dev(host->mmc), data);
if (sg_count == 0)
return -ENOSPC;
@@ -2211,8 +2209,7 @@ static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
struct mmc_data *data = mrq->data;
if (data->host_cookie != COOKIE_UNMAPPED)
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ mmc_dma_unmap_sg(mmc_dev(host->mmc), dataa);
data->host_cookie = COOKIE_UNMAPPED;
}
@@ -2327,8 +2324,7 @@ static bool sdhci_request_done(struct sdhci_host *host)
struct mmc_data *data = mrq->data;
if (data && data->host_cookie == COOKIE_MAPPED) {
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ mmc_dma_unmap_sg(mmc_dev(host->mmc), data);
data->host_cookie = COOKIE_UNMAPPED;
}
}
@@ -391,8 +391,7 @@ static int sunxi_mmc_map_dma(struct sunxi_mmc_host *host,
u32 i, dma_len;
struct scatterlist *sg;
- dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ dma_len = mmc_dma_map_sg(mmc_dev(host->mmc), data);
if (dma_len == 0) {
dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n");
return -ENOMEM;
@@ -542,8 +541,7 @@ static irqreturn_t sunxi_mmc_finalize_request(struct sunxi_mmc_host *host)
mmc_writel(host, REG_GCTRL, rval);
rval |= SDXC_FIFO_RESET;
mmc_writel(host, REG_GCTRL, rval);
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ mmc_dma_unmap_sg(mmc_dev(host->mmc), data);
}
mmc_writel(host, REG_RINTR, 0xffff);
@@ -1013,8 +1011,7 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
spin_unlock_irqrestore(&host->lock, iflags);
if (data)
- dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
- mmc_get_dma_dir(data));
+ mmc_dma_unmap_sg(mmc_dev(mmc), data);
dev_err(mmc_dev(mmc), "request already pending\n");
mrq->cmd->error = -EBUSY;
Use new core functions mmc_dma_(un)map_sg in host drivers where applicable. Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com> --- drivers/mmc/host/atmel-mci.c | 17 +++++------------ drivers/mmc/host/davinci_mmc.c | 10 +++------- drivers/mmc/host/dw_mmc.c | 15 +++------------ drivers/mmc/host/jz4740_mmc.c | 10 ++-------- drivers/mmc/host/mmci.c | 9 +++------ drivers/mmc/host/moxart-mmc.c | 8 ++------ drivers/mmc/host/mtk-sd.c | 6 ++---- drivers/mmc/host/mvsdio.c | 7 ++----- drivers/mmc/host/omap.c | 17 +++++------------ drivers/mmc/host/omap_hsmmc.c | 16 ++++------------ drivers/mmc/host/s3cmci.c | 6 ++---- drivers/mmc/host/sdhci.c | 10 +++------- drivers/mmc/host/sunxi-mmc.c | 9 +++------ 13 files changed, 39 insertions(+), 101 deletions(-)