@@ -19,21 +19,6 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
-static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
- const struct nand_page_io_req *req,
- u16 *column)
-{
- struct nand_device *nand = spinand_to_nand(spinand);
- unsigned int shift;
-
- if (nand->memorg.planes_per_lun < 2)
- return;
-
- /* The plane number is passed in MSB just above the column address */
- shift = fls(nand->memorg.pagesize);
- *column |= req->pos.plane << shift;
-}
-
static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
{
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
@@ -227,27 +212,21 @@ static int spinand_load_page_op(struct spinand_device *spinand,
static int spinand_read_from_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
- struct spi_mem_op op = *spinand->op_templates.read_cache;
struct nand_device *nand = spinand_to_nand(spinand);
struct mtd_info *mtd = nanddev_to_mtd(nand);
- struct nand_page_io_req adjreq = *req;
+ struct spi_mem_dirmap_desc *rdesc;
unsigned int nbytes = 0;
void *buf = NULL;
u16 column = 0;
- int ret;
+ ssize_t ret;
if (req->datalen) {
- adjreq.datalen = nanddev_page_size(nand);
- adjreq.dataoffs = 0;
- adjreq.databuf.in = spinand->databuf;
buf = spinand->databuf;
- nbytes = adjreq.datalen;
+ nbytes = nanddev_page_size(nand);
+ column = 0;
}
if (req->ooblen) {
- adjreq.ooblen = nanddev_per_page_oobsize(nand);
- adjreq.ooboffs = 0;
- adjreq.oobbuf.in = spinand->oobbuf;
nbytes += nanddev_per_page_oobsize(nand);
if (!buf) {
buf = spinand->oobbuf;
@@ -255,28 +234,18 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
}
}
- spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
- op.addr.val = column;
+ rdesc = spinand->dirmaps[req->pos.plane].rdesc;
- /*
- * Some controllers are limited in term of max RX data size. In this
- * case, just repeat the READ_CACHE operation after updating the
- * column.
- */
while (nbytes) {
- op.data.buf.in = buf;
- op.data.nbytes = nbytes;
- ret = spi_mem_adjust_op_size(spinand->spimem, &op);
- if (ret)
- return ret;
+ ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
+ if (!ret || ret > nbytes)
+ ret = -EIO;
- ret = spi_mem_exec_op(spinand->spimem, &op);
- if (ret)
+ if (ret < 0)
return ret;
- buf += op.data.nbytes;
- nbytes -= op.data.nbytes;
- op.addr.val += op.data.nbytes;
+ nbytes -= ret;
+ column += ret;
}
if (req->datalen)
@@ -300,28 +269,18 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
static int spinand_write_to_cache_op(struct spinand_device *spinand,
const struct nand_page_io_req *req)
{
- struct spi_mem_op op = *spinand->op_templates.write_cache;
struct nand_device *nand = spinand_to_nand(spinand);
struct mtd_info *mtd = nanddev_to_mtd(nand);
- struct nand_page_io_req adjreq = *req;
- unsigned int nbytes = 0;
- void *buf = NULL;
- u16 column = 0;
- int ret;
+ struct spi_mem_dirmap_desc *wdesc;
+ unsigned int nbytes, column = 0;
+ ssize_t ret;
- memset(spinand->databuf, 0xff,
- nanddev_page_size(nand) +
- nanddev_per_page_oobsize(nand));
+ nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
+ memset(spinand->databuf, 0xff, nbytes);
- if (req->datalen) {
+ if (req->datalen)
memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
req->datalen);
- adjreq.dataoffs = 0;
- adjreq.datalen = nanddev_page_size(nand);
- adjreq.databuf.out = spinand->databuf;
- nbytes = adjreq.datalen;
- buf = spinand->databuf;
- }
if (req->ooblen) {
if (req->mode == MTD_OPS_AUTO_OOB)
@@ -332,52 +291,21 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
else
memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
req->ooblen);
-
- adjreq.ooblen = nanddev_per_page_oobsize(nand);
- adjreq.ooboffs = 0;
- nbytes += nanddev_per_page_oobsize(nand);
- if (!buf) {
- buf = spinand->oobbuf;
- column = nanddev_page_size(nand);
- }
}
- spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
-
- op = *spinand->op_templates.write_cache;
- op.addr.val = column;
+ wdesc = spinand->dirmaps[req->pos.plane].wdesc;
- /*
- * Some controllers are limited in term of max TX data size. In this
- * case, split the operation into one LOAD CACHE and one or more
- * LOAD RANDOM CACHE.
- */
while (nbytes) {
- op.data.buf.out = buf;
- op.data.nbytes = nbytes;
+ ret = spi_mem_dirmap_read(wdesc, column, nbytes,
+ spinand->databuf + column);
+ if (!ret || ret > nbytes)
+ ret = -EIO;
- ret = spi_mem_adjust_op_size(spinand->spimem, &op);
- if (ret)
- return ret;
-
- ret = spi_mem_exec_op(spinand->spimem, &op);
- if (ret)
+ if (ret < 0)
return ret;
- buf += op.data.nbytes;
- nbytes -= op.data.nbytes;
- op.addr.val += op.data.nbytes;
-
- /*
- * We need to use the RANDOM LOAD CACHE operation if there's
- * more than one iteration, because the LOAD operation resets
- * the cache to 0xff.
- */
- if (nbytes) {
- column = op.addr.val;
- op = *spinand->op_templates.update_cache;
- op.addr.val = column;
- }
+ nbytes -= ret;
+ column += ret;
}
return 0;
@@ -757,6 +685,97 @@ static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
return ret;
}
+static int spinand_create_dirmap(struct spinand_device *spinand,
+ unsigned int plane)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct spi_mem_dirmap_info info = {
+ .length = nanddev_page_size(nand) +
+ nanddev_per_page_oobsize(nand),
+ };
+ struct spi_mem_dirmap_desc *desc;
+
+ /* The plane number is passed in MSB just above the column address */
+ info.offset = plane << fls(nand->memorg.pagesize);
+
+ info.op_tmpl = *spinand->op_templates.update_cache;
+ desc = spi_mem_dirmap_create(spinand->spimem, &info);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ spinand->dirmaps[plane].wdesc = desc;
+
+ info.op_tmpl = *spinand->op_templates.read_cache;
+ desc = spi_mem_dirmap_create(spinand->spimem, &info);
+ if (IS_ERR(desc)) {
+ spi_mem_dirmap_destroy(spinand->dirmaps[plane].wdesc);
+ spinand->dirmaps[plane].wdesc = NULL;
+ return PTR_ERR(desc);
+ }
+
+ spinand->dirmaps[plane].rdesc = desc;
+
+ return 0;
+}
+
+static void spinand_destroy_dirmap(struct spinand_device *spinand,
+ unsigned int plane)
+{
+ spi_mem_dirmap_destroy(spinand->dirmaps[plane].rdesc);
+ spi_mem_dirmap_destroy(spinand->dirmaps[plane].wdesc);
+}
+
+static int spinand_create_dirmaps(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int i, ret;
+
+ spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
+ sizeof(*spinand->dirmaps) *
+ nand->memorg.planes_per_lun,
+ GFP_KERNEL);
+ if (!spinand->dirmaps)
+ return -ENOMEM;
+
+ for (i = 0; i < nand->memorg.planes_per_lun; i++) {
+ ret = spinand_create_dirmap(spinand, i);
+ if (ret)
+ goto err_destroy_dirmaps;
+ }
+
+ return 0;
+
+err_destroy_dirmaps:
+ for (i--; i >= 0; i--)
+ spinand_destroy_dirmap(spinand, i);
+
+ return ret;
+}
+
+static void spinand_destroy_dirmaps(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int i;
+
+ for (i = 0; i < nand->memorg.planes_per_lun; i++)
+ spinand_destroy_dirmap(spinand, i);
+}
+
+const struct spi_mem_op *
+spinand_find_supported_op(struct spinand_device *spinand,
+ const struct spi_mem_op *ops,
+ unsigned int nops)
+{
+ unsigned int i;
+
+ for (i = 0; i < nops; i++) {
+ if (spi_mem_supports_op(spinand->spimem, &ops[i]))
+ return &ops[i];
+ }
+
+ return NULL;
+}
+
static const struct nand_ops spinand_ops = {
.erase = spinand_erase,
.markbad = spinand_markbad,
@@ -1012,20 +1031,28 @@ static int spinand_init(struct spinand_device *spinand)
goto err_free_bufs;
}
+ ret = spinand_create_dirmaps(spinand);
+ if (ret) {
+ dev_err(dev,
+ "Failed to create direct mappings for read/write operations (err = %d)\n",
+ ret);
+ goto err_manuf_cleanup;
+ }
+
/* After power up, all blocks are locked, so unlock them here. */
for (i = 0; i < nand->memorg.ntargets; i++) {
ret = spinand_select_target(spinand, i);
if (ret)
- goto err_free_bufs;
+ goto err_destroy_dirmaps;
ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
if (ret)
- goto err_free_bufs;
+ goto err_destroy_dirmaps;
}
ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
if (ret)
- goto err_manuf_cleanup;
+ goto err_destroy_dirmaps;
/*
* Right now, we don't support ECC, so let the whole oob
@@ -1054,6 +1081,9 @@ static int spinand_init(struct spinand_device *spinand)
err_cleanup_nanddev:
nanddev_cleanup(nand);
+err_destroy_dirmaps:
+ spinand_destroy_dirmaps(spinand);
+
err_manuf_cleanup:
spinand_manufacturer_cleanup(spinand);
@@ -1068,6 +1098,7 @@ static void spinand_cleanup(struct spinand_device *spinand)
struct nand_device *nand = spinand_to_nand(spinand);
nanddev_cleanup(nand);
+ spinand_destroy_dirmaps(spinand);
spinand_manufacturer_cleanup(spinand);
kfree(spinand->databuf);
kfree(spinand->scratchbuf);
@@ -300,6 +300,11 @@ struct spinand_info {
__VA_ARGS__ \
}
+struct spinand_dirmap {
+ struct spi_mem_dirmap_desc *wdesc;
+ struct spi_mem_dirmap_desc *rdesc;
+};
+
/**
* struct spinand_device - SPI NAND device instance
* @base: NAND device instance
@@ -339,6 +344,8 @@ struct spinand_device {
const struct spi_mem_op *update_cache;
} op_templates;
+ struct spinand_dirmap *dirmaps;
+
int (*select_target)(struct spinand_device *spinand,
unsigned int target);
unsigned int cur_target;
Make use of the spi-mem direct mapping API to let advanced controllers optimize read/write operations when they support direct mapping. Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com> --- Changes in v2: - New patch --- drivers/mtd/nand/spi/core.c | 231 ++++++++++++++++++++---------------- include/linux/mtd/spinand.h | 7 ++ 2 files changed, 138 insertions(+), 100 deletions(-)