From patchwork Sun Apr 22 18:35:20 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Boris Brezillon X-Patchwork-Id: 10355805 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id AA63B60388 for ; Sun, 22 Apr 2018 18:35:53 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 9ADA42893F for ; Sun, 22 Apr 2018 18:35:53 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 8DB0A28949; Sun, 22 Apr 2018 18:35:53 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-7.9 required=2.0 tests=BAYES_00, MAILING_LIST_MULTI, RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 04AE42893B for ; Sun, 22 Apr 2018 18:35:53 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753443AbeDVSfw (ORCPT ); Sun, 22 Apr 2018 14:35:52 -0400 Received: from mail.bootlin.com ([62.4.15.54]:47288 "EHLO mail.bootlin.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753432AbeDVSfv (ORCPT ); Sun, 22 Apr 2018 14:35:51 -0400 Received: by mail.bootlin.com (Postfix, from userid 110) id ED9D7207BE; Sun, 22 Apr 2018 20:35:49 +0200 (CEST) Received: from localhost.localdomain (unknown [195.53.49.241]) by mail.bootlin.com (Postfix) with ESMTPSA id D1CE7207BF; Sun, 22 Apr 2018 20:35:33 +0200 (CEST) From: Boris Brezillon To: David Woodhouse , Brian Norris , Boris Brezillon , Marek Vasut , Richard Weinberger , Cyrille Pitchen , linux-mtd@lists.infradead.org, Miquel Raynal , Mark Brown , linux-spi@vger.kernel.org Cc: Peter Pan , Frieder Schrempf , Vignesh R , Yogesh Gaur , =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= , Kamal Dasu , Maxime Chevallier Subject: [PATCH v3 7/9] spi: ti-qspi: Implement the spi_mem interface Date: Sun, 22 Apr 2018 20:35:20 +0200 Message-Id: <20180422183522.11118-8-boris.brezillon@bootlin.com> X-Mailer: git-send-email 2.14.1 In-Reply-To: <20180422183522.11118-1-boris.brezillon@bootlin.com> References: <20180422183522.11118-1-boris.brezillon@bootlin.com> Sender: linux-spi-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-spi@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP The spi_mem interface is meant to replace the spi_flash_read() one. Implement the ->exec_op() method so that we can smoothly get rid of the old interface. Signed-off-by: Boris Brezillon --- Changes in v3: - none Changes in v2: - include spi-mem.h - treat op->addr.val differently since it's now an u64 - Fix 'buf is DMA-able' check - Extract max mmap size from resource_size() --- drivers/spi/spi-ti-qspi.c | 84 +++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 71 insertions(+), 13 deletions(-) diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index c24d9b45a27c..a37db01447a7 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -36,6 +36,7 @@ #include #include +#include struct ti_qspi_regs { u32 clkctrl; @@ -50,6 +51,7 @@ struct ti_qspi { struct spi_master *master; void __iomem *base; void __iomem *mmap_base; + size_t mmap_size; struct regmap *ctrl_base; unsigned int ctrl_reg; struct clk *fclk; @@ -434,12 +436,10 @@ static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst, return 0; } -static int ti_qspi_dma_bounce_buffer(struct ti_qspi *qspi, - struct spi_flash_read_message *msg) +static int ti_qspi_dma_bounce_buffer(struct ti_qspi *qspi, loff_t offs, + void *to, size_t readsize) { - size_t readsize = msg->len; - void *to = msg->buf; - dma_addr_t dma_src = qspi->mmap_phys_base + msg->from; + dma_addr_t dma_src = qspi->mmap_phys_base + offs; int ret = 0; /* @@ -507,13 +507,14 @@ static void ti_qspi_disable_memory_map(struct spi_device *spi) qspi->mmap_enabled = false; } -static void ti_qspi_setup_mmap_read(struct spi_device *spi, - struct spi_flash_read_message *msg) +static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode, + u8 data_nbits, u8 addr_width, + u8 dummy_bytes) { struct ti_qspi *qspi = spi_master_get_devdata(spi->master); - u32 memval = msg->read_opcode; + u32 memval = opcode; - switch (msg->data_nbits) { + switch (data_nbits) { case SPI_NBITS_QUAD: memval |= QSPI_SETUP_RD_QUAD; break; @@ -524,8 +525,8 @@ static void ti_qspi_setup_mmap_read(struct spi_device *spi, memval |= QSPI_SETUP_RD_NORMAL; break; } - memval |= ((msg->addr_width - 1) << QSPI_SETUP_ADDR_SHIFT | - msg->dummy_bytes << QSPI_SETUP_DUMMY_SHIFT); + memval |= ((addr_width - 1) << QSPI_SETUP_ADDR_SHIFT | + dummy_bytes << QSPI_SETUP_DUMMY_SHIFT); ti_qspi_write(qspi, memval, QSPI_SPI_SETUP_REG(spi->chip_select)); } @@ -546,13 +547,15 @@ static int ti_qspi_spi_flash_read(struct spi_device *spi, if (!qspi->mmap_enabled) ti_qspi_enable_memory_map(spi); - ti_qspi_setup_mmap_read(spi, msg); + ti_qspi_setup_mmap_read(spi, msg->read_opcode, msg->data_nbits, + msg->addr_width, msg->dummy_bytes); if (qspi->rx_chan) { if (msg->cur_msg_mapped) ret = ti_qspi_dma_xfer_sg(qspi, msg->rx_sg, msg->from); else - ret = ti_qspi_dma_bounce_buffer(qspi, msg); + ret = ti_qspi_dma_bounce_buffer(qspi, msg->from, + msg->buf, msg->len); if (ret) goto err_unlock; } else { @@ -566,6 +569,58 @@ static int ti_qspi_spi_flash_read(struct spi_device *spi, return ret; } +static int ti_qspi_exec_mem_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct ti_qspi *qspi = spi_master_get_devdata(mem->spi->master); + u32 from = 0; + int ret = 0; + + /* Only optimize read path. */ + if (!op->data.nbytes || op->data.dir != SPI_MEM_DATA_IN || + !op->addr.nbytes || op->addr.nbytes > 4) + return -ENOTSUPP; + + /* Address exceeds MMIO window size, fall back to regular mode. */ + from = op->addr.val; + if (from + op->data.nbytes > qspi->mmap_size) + return -ENOTSUPP; + + mutex_lock(&qspi->list_lock); + + if (!qspi->mmap_enabled) + ti_qspi_enable_memory_map(mem->spi); + ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth, + op->addr.nbytes, op->dummy.nbytes); + + if (qspi->rx_chan) { + struct sg_table sgt; + + if (virt_addr_valid(op->data.buf.in) && + !spi_controller_dma_map_mem_op_data(mem->spi->master, op, + &sgt)) { + ret = ti_qspi_dma_xfer_sg(qspi, sgt, from); + spi_controller_dma_unmap_mem_op_data(mem->spi->master, + op, &sgt); + } else { + ret = ti_qspi_dma_bounce_buffer(qspi, from, + op->data.buf.in, + op->data.nbytes); + } + } else { + memcpy_fromio(op->data.buf.in, qspi->mmap_base + from, + op->data.nbytes); + } + + mutex_unlock(&qspi->list_lock); + + return ret; +} + +static const struct spi_controller_mem_ops ti_qspi_mem_ops = { + .exec_op = ti_qspi_exec_mem_op, +}; + static int ti_qspi_start_transfer_one(struct spi_master *master, struct spi_message *m) { @@ -673,6 +728,7 @@ static int ti_qspi_probe(struct platform_device *pdev) master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8); master->spi_flash_read = ti_qspi_spi_flash_read; + master->mem_ops = &ti_qspi_mem_ops; if (!of_property_read_u32(np, "num-cs", &num_cs)) master->num_chipselect = num_cs; @@ -778,6 +834,7 @@ static int ti_qspi_probe(struct platform_device *pdev) no_dma: if (!qspi->rx_chan && res_mmap) { + qspi->mmap_size = resource_size(res_mmap); qspi->mmap_base = devm_ioremap_resource(&pdev->dev, res_mmap); if (IS_ERR(qspi->mmap_base)) { dev_info(&pdev->dev, @@ -785,6 +842,7 @@ static int ti_qspi_probe(struct platform_device *pdev) PTR_ERR(qspi->mmap_base)); qspi->mmap_base = NULL; master->spi_flash_read = NULL; + master->mem_ops = NULL; } } qspi->mmap_enabled = false;