@@ -8,6 +8,8 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/i2c.h>
@@ -29,6 +31,7 @@
#define LPI2C_MCR 0x10 /* i2c contrl register */
#define LPI2C_MSR 0x14 /* i2c status register */
#define LPI2C_MIER 0x18 /* i2c interrupt enable */
+#define LPI2C_MDER 0x1C /* i2c DMA enable */
#define LPI2C_MCFGR0 0x20 /* i2c master configuration */
#define LPI2C_MCFGR1 0x24 /* i2c master configuration */
#define LPI2C_MCFGR2 0x28 /* i2c master configuration */
@@ -70,11 +73,14 @@
#define MCFGR1_AUTOSTOP BIT(8)
#define MCFGR1_IGNACK BIT(9)
#define MRDR_RXEMPTY BIT(14)
+#define MDER_TDDE BIT(0)
+#define MDER_RDDE BIT(1)
#define I2C_CLK_RATIO 2
#define CHUNK_DATA 256
#define I2C_PM_TIMEOUT 10 /* ms */
+#define I2C_DMA_THRESHOLD 8 /* bytes */
enum lpi2c_imx_mode {
STANDARD, /* 100+Kbps */
@@ -107,6 +113,22 @@ struct lpi2c_imx_struct {
unsigned int rxfifosize;
enum lpi2c_imx_mode mode;
struct i2c_bus_recovery_info rinfo;
+
+ bool can_use_dma;
+ bool using_pio_mode;
+ u8 rx_cmd_buf_len;
+ u16 *rx_cmd_buf;
+ u8 *dma_buf;
+ unsigned int dma_len;
+ unsigned int tx_burst_num;
+ unsigned int rx_burst_num;
+ resource_size_t phy_addr;
+ dma_addr_t dma_tx_addr;
+ dma_addr_t dma_addr;
+ enum dma_data_direction dma_direction;
+ struct dma_chan *chan_tx;
+ struct dma_chan *chan_rx;
+ struct i2c_msg *msg;
};
static void lpi2c_imx_intctrl(struct lpi2c_imx_struct *lpi2c_imx,
@@ -306,7 +328,7 @@ static int lpi2c_imx_master_disable(struct lpi2c_imx_struct *lpi2c_imx)
return 0;
}
-static int lpi2c_imx_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
+static int lpi2c_imx_pio_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
{
unsigned long timeout;
@@ -452,6 +474,439 @@ static void lpi2c_imx_read(struct lpi2c_imx_struct *lpi2c_imx,
lpi2c_imx_intctrl(lpi2c_imx, MIER_RDIE | MIER_NDIE);
}
+static bool is_use_dma(struct lpi2c_imx_struct *lpi2c_imx, struct i2c_msg *msg)
+{
+ if (!lpi2c_imx->can_use_dma)
+ return false;
+
+ /*
+ * When the length of data is less than I2C_DMA_THRESHOLD,
+ * cpu mode is used directly to avoid low performance.
+ */
+ if (msg->len < I2C_DMA_THRESHOLD)
+ return false;
+
+ return true;
+}
+
+static int lpi2c_imx_pio_xfer(struct lpi2c_imx_struct *lpi2c_imx,
+ struct i2c_msg *msg)
+{
+ int result;
+
+ reinit_completion(&lpi2c_imx->complete);
+
+ if (msg->flags & I2C_M_RD)
+ lpi2c_imx_read(lpi2c_imx, msg);
+ else
+ lpi2c_imx_write(lpi2c_imx, msg);
+
+ result = lpi2c_imx_pio_msg_complete(lpi2c_imx);
+ if (result)
+ return result;
+
+ return 0;
+}
+
+static int lpi2c_imx_dma_calculate_timeout(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned long timeout = 0;
+
+ timeout = 8 * lpi2c_imx->msglen * 1000 / lpi2c_imx->bitrate;
+
+ /* Add extra second for scheduler related activities */
+ timeout += 1;
+
+ /* Double calculated timeout */
+ return msecs_to_jiffies(timeout * MSEC_PER_SEC);
+}
+
+static int lpi2c_imx_alloc_rx_cmd_buf(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ u16 rx_remain = lpi2c_imx->msg->len;
+ u16 temp;
+ int cmd_num;
+
+ /*
+ * Calculate the number of rx command words via the DMA TX channel
+ * writing into command register based on the i2c msg len, and build
+ * the rx command words buffer.
+ */
+ cmd_num = DIV_ROUND_UP(rx_remain, CHUNK_DATA);
+ lpi2c_imx->rx_cmd_buf = kcalloc(cmd_num, sizeof(u16), GFP_KERNEL);
+ lpi2c_imx->rx_cmd_buf_len = cmd_num * sizeof(u16);
+
+ if (!lpi2c_imx->rx_cmd_buf) {
+ dev_err(&lpi2c_imx->adapter.dev, "Alloc RX cmd buffer failed\n");
+ return -ENOMEM;
+ }
+
+ for (int i = 0; i < cmd_num ; i++) {
+ temp = rx_remain > CHUNK_DATA ? CHUNK_DATA - 1 : rx_remain - 1;
+ temp |= (RECV_DATA << 8);
+ rx_remain -= CHUNK_DATA;
+ lpi2c_imx->rx_cmd_buf[i] = temp;
+ }
+
+ return 0;
+}
+
+static int lpi2c_imx_dma_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned long time_left, timeout;
+
+ timeout = lpi2c_imx_dma_calculate_timeout(lpi2c_imx);
+ time_left = wait_for_completion_timeout(&lpi2c_imx->complete, timeout);
+ if (time_left == 0) {
+ dev_err(&lpi2c_imx->adapter.dev, "I/O Error in DMA Data Transfer\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void lpi2c_dma_unmap(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ struct dma_chan *chan = lpi2c_imx->dma_direction == DMA_FROM_DEVICE
+ ? lpi2c_imx->chan_rx : lpi2c_imx->chan_tx;
+
+ dma_unmap_single(chan->device->dev, lpi2c_imx->dma_addr,
+ lpi2c_imx->dma_len, lpi2c_imx->dma_direction);
+
+ lpi2c_imx->dma_direction = DMA_NONE;
+}
+
+static void lpi2c_cleanup_rx_cmd_dma(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ dmaengine_terminate_sync(lpi2c_imx->chan_tx);
+ dma_unmap_single(lpi2c_imx->chan_tx->device->dev, lpi2c_imx->dma_tx_addr,
+ lpi2c_imx->rx_cmd_buf_len, DMA_TO_DEVICE);
+}
+
+static void lpi2c_cleanup_dma(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ if (lpi2c_imx->dma_direction == DMA_NONE)
+ return;
+ else if (lpi2c_imx->dma_direction == DMA_FROM_DEVICE)
+ dmaengine_terminate_sync(lpi2c_imx->chan_rx);
+ else if (lpi2c_imx->dma_direction == DMA_TO_DEVICE)
+ dmaengine_terminate_sync(lpi2c_imx->chan_tx);
+
+ lpi2c_dma_unmap(lpi2c_imx);
+}
+
+static void lpi2c_dma_callback(void *data)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = (struct lpi2c_imx_struct *)data;
+
+ complete(&lpi2c_imx->complete);
+}
+
+static int lpi2c_dma_rx_cmd_submit(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ struct dma_chan *txchan = lpi2c_imx->chan_tx;
+ struct dma_async_tx_descriptor *rx_cmd_desc;
+ dma_cookie_t cookie;
+
+ lpi2c_imx->dma_tx_addr = dma_map_single(txchan->device->dev,
+ lpi2c_imx->rx_cmd_buf,
+ lpi2c_imx->rx_cmd_buf_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(txchan->device->dev, lpi2c_imx->dma_tx_addr)) {
+ dev_err(&lpi2c_imx->adapter.dev, "dma map failed, use pio\n");
+ return -EINVAL;
+ }
+
+ rx_cmd_desc = dmaengine_prep_slave_single(txchan, lpi2c_imx->dma_tx_addr,
+ lpi2c_imx->rx_cmd_buf_len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rx_cmd_desc) {
+ dma_unmap_single(txchan->device->dev, lpi2c_imx->dma_tx_addr,
+ lpi2c_imx->rx_cmd_buf_len, DMA_TO_DEVICE);
+ dev_err(&lpi2c_imx->adapter.dev, "dma prep slave sg failed, use pio\n");
+ return -EINVAL;
+ }
+
+
+ cookie = dmaengine_submit(rx_cmd_desc);
+ if (dma_submit_error(cookie)) {
+ dma_unmap_single(txchan->device->dev, lpi2c_imx->dma_tx_addr,
+ lpi2c_imx->rx_cmd_buf_len, DMA_TO_DEVICE);
+ dmaengine_desc_free(rx_cmd_desc);
+ dev_err(&lpi2c_imx->adapter.dev, "submitting dma failed, use pio\n");
+ return -EINVAL;
+ }
+
+ dma_async_issue_pending(txchan);
+
+ return 0;
+}
+
+static int lpi2c_dma_submit(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ bool read = lpi2c_imx->msg->flags & I2C_M_RD;
+ enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ struct dma_chan *chan = read ? lpi2c_imx->chan_rx : lpi2c_imx->chan_tx;
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+
+ lpi2c_imx->dma_direction = dir;
+
+ lpi2c_imx->dma_addr = dma_map_single(chan->device->dev,
+ lpi2c_imx->dma_buf,
+ lpi2c_imx->dma_len, dir);
+ if (dma_mapping_error(chan->device->dev, lpi2c_imx->dma_addr)) {
+ dev_err(&lpi2c_imx->adapter.dev, "dma map failed, use pio\n");
+ return -EINVAL;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, lpi2c_imx->dma_addr,
+ lpi2c_imx->dma_len, read ?
+ DMA_DEV_TO_MEM : DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ dev_err(&lpi2c_imx->adapter.dev, "dma prep slave sg failed, use pio\n");
+ lpi2c_dma_unmap(lpi2c_imx);
+ return -EINVAL;
+ }
+
+ reinit_completion(&lpi2c_imx->complete);
+ desc->callback = lpi2c_dma_callback;
+ desc->callback_param = (void *)lpi2c_imx;
+
+ cookie = dmaengine_submit(desc);
+ if (dma_submit_error(cookie)) {
+ dev_err(&lpi2c_imx->adapter.dev, "submitting dma failed, use pio\n");
+ lpi2c_dma_unmap(lpi2c_imx);
+ dmaengine_desc_free(desc);
+ return -EINVAL;
+ }
+
+ /* Can't switch to PIO mode when DMA have started transferred */
+ lpi2c_imx->using_pio_mode = false;
+
+ dma_async_issue_pending(chan);
+
+ return 0;
+}
+
+static int lpi2c_imx_find_max_burst_num(unsigned int fifosize, unsigned int len)
+{
+ unsigned int i;
+
+ for (i = fifosize / 2; i > 0; i--) {
+ if (!(len % i))
+ break;
+ }
+
+ return i;
+}
+/*
+ * For a highest DMA efficiency, tx/rx burst number should be calculated according to
+ * the FIFO depth.
+ */
+static void lpi2c_imx_dma_burst_num_calculate(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned int cmd_num;
+
+ if (lpi2c_imx->msg->flags & I2C_M_RD) {
+ /*
+ * One RX cmd word can trigger DMA receive no more than 256 bytes.
+ * The number of RX cmd words should be calculated based on the data
+ * length.
+ */
+ cmd_num = DIV_ROUND_UP(lpi2c_imx->dma_len, CHUNK_DATA);
+ lpi2c_imx->tx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->txfifosize,
+ cmd_num);
+ lpi2c_imx->rx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->rxfifosize,
+ lpi2c_imx->dma_len);
+ } else
+ lpi2c_imx->tx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->txfifosize,
+ lpi2c_imx->dma_len);
+}
+
+static int lpi2c_dma_config(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ int ret;
+ struct dma_slave_config rx = {}, tx = {};
+
+ lpi2c_imx_dma_burst_num_calculate(lpi2c_imx);
+
+ if (lpi2c_imx->msg->flags & I2C_M_RD) {
+ if (IS_ERR(lpi2c_imx->chan_tx)) {
+ ret = PTR_ERR(lpi2c_imx->chan_tx);
+ dev_err(&lpi2c_imx->adapter.dev, "can't get RX channel %d\n", ret);
+ return ret;
+ }
+
+ if (IS_ERR(lpi2c_imx->chan_rx)) {
+ ret = PTR_ERR(lpi2c_imx->chan_rx);
+ dev_err(&lpi2c_imx->adapter.dev, "can't get TX channel %d\n", ret);
+ return ret;
+ }
+
+ tx.dst_addr = lpi2c_imx->phy_addr + LPI2C_MTDR;
+ tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ tx.dst_maxburst = lpi2c_imx->tx_burst_num;
+ tx.direction = DMA_MEM_TO_DEV;
+ ret = dmaengine_slave_config(lpi2c_imx->chan_tx, &tx);
+ if (ret < 0) {
+ dev_err(&lpi2c_imx->adapter.dev, "can't configure TX channel %d\n", ret);
+ return ret;
+ }
+
+ rx.src_addr = lpi2c_imx->phy_addr + LPI2C_MRDR;
+ rx.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ rx.src_maxburst = lpi2c_imx->rx_burst_num;
+ rx.direction = DMA_DEV_TO_MEM;
+ ret = dmaengine_slave_config(lpi2c_imx->chan_rx, &rx);
+ if (ret < 0) {
+ dev_err(&lpi2c_imx->adapter.dev, "can't configure RX channel %d\n", ret);
+ return ret;
+ }
+ } else {
+ if (IS_ERR(lpi2c_imx->chan_tx)) {
+ ret = PTR_ERR(lpi2c_imx->chan_tx);
+ dev_err(&lpi2c_imx->adapter.dev, "can't get TX channel %d\n", ret);
+ return ret;
+ }
+
+ tx.dst_addr = lpi2c_imx->phy_addr + LPI2C_MTDR;
+ tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ tx.dst_maxburst = lpi2c_imx->tx_burst_num;
+ tx.direction = DMA_MEM_TO_DEV;
+ ret = dmaengine_slave_config(lpi2c_imx->chan_tx, &tx);
+ if (ret < 0) {
+ dev_err(&lpi2c_imx->adapter.dev, "can't configure TX channel %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void lpi2c_dma_enable(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ /*
+ * TX interrupt will be triggerred when the number of words in the transmit FIFO is equal
+ * or less than TX watermark. RX interrupt will be triggerred when the number of words
+ * in the receive FIFO is greater than RX watermark. In order to trigger the DMA interrupt,
+ * TX watermark should be set equal to the DMA TX burst number but RX watermark should be
+ * set less than the DMA RX burst number.
+ */
+ if (lpi2c_imx->msg->flags & I2C_M_RD) {
+ /* Set I2C TX/RX watermark */
+ writel(lpi2c_imx->tx_burst_num | (lpi2c_imx->rx_burst_num - 1) << 16,
+ lpi2c_imx->base + LPI2C_MFCR);
+ /* Enable I2C DMA TX/RX function */
+ writel(MDER_TDDE | MDER_RDDE, lpi2c_imx->base + LPI2C_MDER);
+ } else {
+ /* Set I2C TX watermark */
+ writel(lpi2c_imx->tx_burst_num, lpi2c_imx->base + LPI2C_MFCR);
+ /* Enable I2C DMA TX function */
+ writel(MDER_TDDE, lpi2c_imx->base + LPI2C_MDER);
+ }
+
+ /* Enable NACK detected */
+ lpi2c_imx_intctrl(lpi2c_imx, MIER_NDIE);
+};
+
+/*
+ * When lpi2c in TX DMA mode we can use one DMA TX channel to write data word
+ * into TXFIFO, but in RX DMA mode it is different.
+ *
+ * LPI2C MTDR register is a command data and transmit data register.
+ * Bit 8-10 is a command data field and Bit 0-7 is a transmit data
+ * field. When the LPI2C master needs to read data, the data number
+ * to read should be set in transmit data field and RECV_DATA should
+ * be set into the command data field to receive (DATA[7:0] + 1) bytes.
+ * The recv data command word is made by RECV_DATA in command data field
+ * and the data number to read in transmit data field. when the length
+ * of the data that needs to be read exceeds 256 bytes, recv data command
+ * word needs to be written to TXFIFO multiple times.
+ *
+ * So when in RX DMA mode, the TX channel also needs to be configured additionally
+ * to send RX command words and the RX command word need be set in advance before
+ * transmitting.
+ */
+static int lpi2c_imx_dma_xfer(struct lpi2c_imx_struct *lpi2c_imx,
+ struct i2c_msg *msg)
+{
+ int result;
+
+ lpi2c_imx->msg = msg;
+ lpi2c_imx->dma_len = msg->len;
+ lpi2c_imx->dma_buf = i2c_get_dma_safe_msg_buf(msg,
+ I2C_DMA_THRESHOLD);
+
+ if (!lpi2c_imx->dma_buf)
+ return -ENOMEM;
+
+ result = lpi2c_dma_config(lpi2c_imx);
+ if (result) {
+ dev_err(&lpi2c_imx->adapter.dev, "DMA Config Fail, error %d\n", result);
+ goto disable_dma;
+ }
+
+ lpi2c_dma_enable(lpi2c_imx);
+
+ result = lpi2c_dma_submit(lpi2c_imx);
+ if (result) {
+ dev_err(&lpi2c_imx->adapter.dev, "DMA submit Fail, error %d\n", result);
+ goto disable_dma;
+ }
+
+ if (msg->flags & I2C_M_RD) {
+ result = lpi2c_imx_alloc_rx_cmd_buf(lpi2c_imx);
+ if (result) {
+ lpi2c_cleanup_dma(lpi2c_imx);
+ goto disable_dma;
+ }
+
+ result = lpi2c_dma_rx_cmd_submit(lpi2c_imx);
+ if (result) {
+ lpi2c_cleanup_dma(lpi2c_imx);
+ goto disable_dma;
+ }
+ }
+
+ result = lpi2c_imx_dma_msg_complete(lpi2c_imx);
+ if (result) {
+ if (msg->flags & I2C_M_RD)
+ lpi2c_cleanup_rx_cmd_dma(lpi2c_imx);
+ lpi2c_cleanup_dma(lpi2c_imx);
+ goto disable_dma;
+ }
+
+ /* When meet NACK in transfer, cleanup all DMA transfer */
+ if ((readl(lpi2c_imx->base + LPI2C_MSR) & MSR_NDF) && !result) {
+ if (msg->flags & I2C_M_RD)
+ lpi2c_cleanup_rx_cmd_dma(lpi2c_imx);
+ lpi2c_cleanup_dma(lpi2c_imx);
+ result = -EIO;
+ goto disable_dma;
+ }
+
+ if (msg->flags & I2C_M_RD)
+ dma_unmap_single(lpi2c_imx->chan_tx->device->dev, lpi2c_imx->dma_tx_addr,
+ lpi2c_imx->rx_cmd_buf_len, DMA_TO_DEVICE);
+ lpi2c_dma_unmap(lpi2c_imx);
+
+disable_dma:
+ /* Disable I2C DMA function */
+ writel(0, lpi2c_imx->base + LPI2C_MDER);
+
+ if (lpi2c_imx->msg->flags & I2C_M_RD)
+ kfree(lpi2c_imx->rx_cmd_buf);
+
+ if (result)
+ i2c_put_dma_safe_msg_buf(lpi2c_imx->dma_buf,
+ lpi2c_imx->msg, false);
+ else
+ i2c_put_dma_safe_msg_buf(lpi2c_imx->dma_buf,
+ lpi2c_imx->msg, true);
+ return result;
+}
+
static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs, int num)
{
@@ -476,14 +931,17 @@ static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
lpi2c_imx->tx_buf = NULL;
lpi2c_imx->delivered = 0;
lpi2c_imx->msglen = msgs[i].len;
- init_completion(&lpi2c_imx->complete);
- if (msgs[i].flags & I2C_M_RD)
- lpi2c_imx_read(lpi2c_imx, &msgs[i]);
- else
- lpi2c_imx_write(lpi2c_imx, &msgs[i]);
+ /* When DMA mode failed before transferring, CPU mode can be used. */
+ lpi2c_imx->using_pio_mode = true;
+
+ if (is_use_dma(lpi2c_imx, &msgs[i])) {
+ result = lpi2c_imx_dma_xfer(lpi2c_imx, &msgs[i]);
+ if (result && lpi2c_imx->using_pio_mode)
+ result = lpi2c_imx_pio_xfer(lpi2c_imx, &msgs[i]);
+ } else
+ result = lpi2c_imx_pio_xfer(lpi2c_imx, &msgs[i]);
- result = lpi2c_imx_msg_complete(lpi2c_imx);
if (result)
goto stop;
@@ -547,6 +1005,49 @@ static int lpi2c_imx_init_recovery_info(struct lpi2c_imx_struct *lpi2c_imx,
return 0;
}
+static void lpi2c_dma_exit(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ if (lpi2c_imx->chan_rx) {
+ dma_release_channel(lpi2c_imx->chan_rx);
+ lpi2c_imx->chan_rx = NULL;
+ }
+
+ if (lpi2c_imx->chan_tx) {
+ dma_release_channel(lpi2c_imx->chan_tx);
+ lpi2c_imx->chan_tx = NULL;
+ }
+}
+
+static int lpi2c_dma_init(struct device *dev,
+ struct lpi2c_imx_struct *lpi2c_imx)
+{
+ int ret;
+
+ lpi2c_imx->chan_rx = lpi2c_imx->chan_tx = NULL;
+ lpi2c_imx->can_use_dma = false;
+
+ /* Prepare for TX DMA: */
+ lpi2c_imx->chan_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(lpi2c_imx->chan_tx)) {
+ ret = PTR_ERR(lpi2c_imx->chan_tx);
+ lpi2c_imx->chan_tx = NULL;
+ dev_dbg(dev, "can't get TX channel %d\n", ret);
+ return ret;
+ }
+
+ /* Prepare for RX DMA: */
+ lpi2c_imx->chan_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(lpi2c_imx->chan_rx)) {
+ ret = PTR_ERR(lpi2c_imx->chan_rx);
+ lpi2c_imx->chan_rx = NULL;
+ dev_dbg(dev, "can't get RX channel %d\n", ret);
+ return ret;
+ }
+
+ lpi2c_imx->can_use_dma = true;
+ return 0;
+}
+
static u32 lpi2c_imx_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
@@ -569,12 +1070,13 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
struct lpi2c_imx_struct *lpi2c_imx;
unsigned int temp;
int irq, ret;
+ struct resource *res;
lpi2c_imx = devm_kzalloc(&pdev->dev, sizeof(*lpi2c_imx), GFP_KERNEL);
if (!lpi2c_imx)
return -ENOMEM;
- lpi2c_imx->base = devm_platform_ioremap_resource(pdev, 0);
+ lpi2c_imx->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(lpi2c_imx->base))
return PTR_ERR(lpi2c_imx->base);
@@ -588,6 +1090,7 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
lpi2c_imx->adapter.dev.of_node = pdev->dev.of_node;
strscpy(lpi2c_imx->adapter.name, pdev->name,
sizeof(lpi2c_imx->adapter.name));
+ lpi2c_imx->phy_addr = (dma_addr_t)res->start;
ret = devm_clk_bulk_get_all(&pdev->dev, &lpi2c_imx->clks);
if (ret < 0)
@@ -627,6 +1130,18 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
if (ret == -EPROBE_DEFER)
goto rpm_disable;
+ /* Init DMA */
+ ret = lpi2c_dma_init(&pdev->dev, lpi2c_imx);
+ if (ret) {
+ lpi2c_dma_exit(lpi2c_imx);
+ if (ret == -EPROBE_DEFER)
+ goto rpm_disable;
+ dev_info(&pdev->dev, "LPI2C use pio mode\n");
+ } else
+ dev_info(&pdev->dev, " LPI2C eDMA enabled\n");
+
+ init_completion(&lpi2c_imx->complete);
+
ret = i2c_add_adapter(&lpi2c_imx->adapter);
if (ret)
goto rpm_disable;