diff mbox series

[v2,02/11] spi: dw-dma: Fail DMA-based transfer if no Tx-buffer specified

Message ID 20200920112322.24585-3-Sergey.Semin@baikalelectronics.ru (mailing list archive)
State Accepted
Commit 7ef30385b05fa8bc13f473c9b0b3ecc7dfb2b208
Headers show
Series spi: dw-dma: Add max SG entries burst capability support | expand

Commit Message

Serge Semin Sept. 20, 2020, 11:23 a.m. UTC
Since commit 46164fde6b78 ("spi: dw: Fix Rx-only DMA transfers") if DMA
interface is enabled, then Tx-buffer must be available in each SPI
transfer. It's required since in order to activate the incoming data
reception either DMA or CPU must be pushing data out to the SPI bus.
But the DW APB SSI DMA driver code is still left in state as if Tx-buffer
might be optional, which is no longer true. Let's fix it so an error would
be returned if no Tx-buffer detected and DMA Tx would be always
enabled.

Signed-off-by: Serge Semin <Sergey.Semin@baikalelectronics.ru>
---
 drivers/spi/spi-dw-dma.c | 27 +++++++++++++--------------
 1 file changed, 13 insertions(+), 14 deletions(-)
diff mbox series

Patch

diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c
index a7ff1e357f8b..1b013ac94a3f 100644
--- a/drivers/spi/spi-dw-dma.c
+++ b/drivers/spi/spi-dw-dma.c
@@ -262,9 +262,6 @@  dw_spi_dma_prepare_tx(struct dw_spi *dws, struct spi_transfer *xfer)
 	struct dma_slave_config txconf;
 	struct dma_async_tx_descriptor *txdesc;
 
-	if (!xfer->tx_buf)
-		return NULL;
-
 	memset(&txconf, 0, sizeof(txconf));
 	txconf.direction = DMA_MEM_TO_DEV;
 	txconf.dst_addr = dws->dma_addr;
@@ -383,17 +380,19 @@  static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws,
 
 static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
 {
-	u16 imr = 0, dma_ctrl = 0;
+	u16 imr, dma_ctrl;
 
-	if (xfer->tx_buf)
-		dma_ctrl |= SPI_DMA_TDMAE;
+	if (!xfer->tx_buf)
+		return -EINVAL;
+
+	/* Set the DMA handshaking interface */
+	dma_ctrl = SPI_DMA_TDMAE;
 	if (xfer->rx_buf)
 		dma_ctrl |= SPI_DMA_RDMAE;
 	dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
 
 	/* Set the interrupt mask */
-	if (xfer->tx_buf)
-		imr |= SPI_INT_TXOI;
+	imr = SPI_INT_TXOI;
 	if (xfer->rx_buf)
 		imr |= SPI_INT_RXUI | SPI_INT_RXOI;
 	spi_umask_intr(dws, imr);
@@ -412,6 +411,8 @@  static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
 
 	/* Prepare the TX dma transfer */
 	txdesc = dw_spi_dma_prepare_tx(dws, xfer);
+	if (!txdesc)
+		return -EINVAL;
 
 	/* Prepare the RX dma transfer */
 	rxdesc = dw_spi_dma_prepare_rx(dws, xfer);
@@ -423,17 +424,15 @@  static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
 		dma_async_issue_pending(dws->rxchan);
 	}
 
-	if (txdesc) {
-		set_bit(TX_BUSY, &dws->dma_chan_busy);
-		dmaengine_submit(txdesc);
-		dma_async_issue_pending(dws->txchan);
-	}
+	set_bit(TX_BUSY, &dws->dma_chan_busy);
+	dmaengine_submit(txdesc);
+	dma_async_issue_pending(dws->txchan);
 
 	ret = dw_spi_dma_wait(dws, xfer);
 	if (ret)
 		return ret;
 
-	if (txdesc && dws->master->cur_msg->status == -EINPROGRESS) {
+	if (dws->master->cur_msg->status == -EINPROGRESS) {
 		ret = dw_spi_dma_wait_tx_done(dws, xfer);
 		if (ret)
 			return ret;