Message ID | 20211130083202.14228-3-xiangsheng.hou@mediatek.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add Mediatek SPI Nand controller and convert ECC driver | expand |
Hi Xiangsheng, xiangsheng.hou@mediatek.com wrote on Tue, 30 Nov 2021 16:31:59 +0800: > Convert the Mediatek HW ECC engine to the ECC infrastructure with > pipelined case. > > Signed-off-by: Xiangsheng Hou <xiangsheng.hou@mediatek.com> > --- > drivers/mtd/nand/ecc-mtk.c | 614 +++++++++++++++++++++++++++++++ > include/linux/mtd/nand-ecc-mtk.h | 68 ++++ > 2 files changed, 682 insertions(+) > > diff --git a/drivers/mtd/nand/ecc-mtk.c b/drivers/mtd/nand/ecc-mtk.c > index 31d7c77d5c59..c44499b3d0a5 100644 > --- a/drivers/mtd/nand/ecc-mtk.c > +++ b/drivers/mtd/nand/ecc-mtk.c > @@ -16,6 +16,7 @@ > #include <linux/of_platform.h> > #include <linux/mutex.h> > > +#include <linux/mtd/nand.h> > #include <linux/mtd/nand-ecc-mtk.h> > > #define ECC_IDLE_MASK BIT(0) > @@ -41,11 +42,17 @@ > #define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE) > #define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON) > > +#define OOB_FREE_MAX_SIZE 8 > +#define OOB_FREE_MIN_SIZE 1 > + > struct mtk_ecc_caps { > u32 err_mask; > const u8 *ecc_strength; > const u32 *ecc_regs; > u8 num_ecc_strength; > + const u8 *spare_size; > + u8 num_spare_size; > + u32 max_section_size; > u8 ecc_mode_shift; > u32 parity_bits; > int pg_irq_sel; > @@ -79,6 +86,12 @@ static const u8 ecc_strength_mt7622[] = { > 4, 6, 8, 10, 12, 14, 16 > }; > > +/* spare size for each section that each IP supports */ > +static const u8 spare_size_mt7622[] = { > + 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, > + 52, 62, 61, 63, 64, 67, 74 > +}; > + > enum mtk_ecc_regs { > ECC_ENCPAR00, > ECC_ENCIRQ_EN, > @@ -447,6 +460,604 @@ unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc) > } > EXPORT_SYMBOL(mtk_ecc_get_parity_bits); > > +static inline int mtk_ecc_data_off(struct nand_device *nand, int i) > +{ > + int eccsize = nand->ecc.ctx.conf.step_size; > + > + return i * eccsize; > +} > + > +static inline int mtk_ecc_oob_free_position(struct nand_device *nand, int i) > +{ > + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); > + int position; > + > + if (i < eng->bbm_ctl.section) > + position = (i + 1) * eng->oob_free; > + else if (i == eng->bbm_ctl.section) > + position = 0; > + else > + position = i * eng->oob_free; > + > + return position; > +} > + > +static inline int mtk_ecc_data_len(struct nand_device *nand) > +{ > + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); > + int eccsize = nand->ecc.ctx.conf.step_size; > + int eccbytes = eng->oob_ecc; > + > + return eccsize + eng->oob_free + eccbytes; > +} > + > +static inline u8 *mtk_ecc_section_ptr(struct nand_device *nand, int i) > +{ > + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); > + > + return eng->bounce_page_buf + i * mtk_ecc_data_len(nand); > +} > + > +static inline u8 *mtk_ecc_oob_free_ptr(struct nand_device *nand, int i) > +{ > + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); > + int eccsize = nand->ecc.ctx.conf.step_size; > + > + return eng->bounce_page_buf + i * mtk_ecc_data_len(nand) + eccsize; > +} > + > +static void mtk_ecc_no_bbm_swap(struct nand_device *a, u8 *b, u8 *c) > +{ > + /* nop */ Is this really useful? > +} > + > +static void mtk_ecc_bbm_swap(struct nand_device *nand, u8 *databuf, u8 *oobbuf) > +{ > + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); > + int step_size = nand->ecc.ctx.conf.step_size; > + u32 bbm_pos = eng->bbm_ctl.position; > + > + bbm_pos += eng->bbm_ctl.section * step_size; > + > + swap(oobbuf[0], databuf[bbm_pos]); > +} > + > +static void mtk_ecc_set_bbm_ctl(struct mtk_ecc_bbm_ctl *bbm_ctl, > + struct nand_device *nand) > +{ > + if (nanddev_page_size(nand) == 512) { > + bbm_ctl->bbm_swap = mtk_ecc_no_bbm_swap; > + } else { > + bbm_ctl->bbm_swap = mtk_ecc_bbm_swap; > + bbm_ctl->section = nanddev_page_size(nand) / > + mtk_ecc_data_len(nand); > + bbm_ctl->position = nanddev_page_size(nand) % > + mtk_ecc_data_len(nand); > + } > +} > + > +static int mtk_ecc_ooblayout_free(struct mtd_info *mtd, int section, > + struct mtd_oob_region *oob_region) > +{ > + struct nand_device *nand = mtd_to_nanddev(mtd); > + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); > + struct nand_ecc_props *conf = &nand->ecc.ctx.conf; > + u32 eccsteps, bbm_bytes = 0; > + > + eccsteps = mtd->writesize / conf->step_size; > + > + if (section >= eccsteps) > + return -ERANGE; > + > + /* Reserve 1 byte for BBM only for section 0 */ > + if (section == 0) > + bbm_bytes = 1; > + > + oob_region->length = eng->oob_free - bbm_bytes; > + oob_region->offset = section * eng->oob_free + bbm_bytes; > + > + return 0; > +} > + > +static int mtk_ecc_ooblayout_ecc(struct mtd_info *mtd, int section, > + struct mtd_oob_region *oob_region) > +{ > + struct nand_device *nand = mtd_to_nanddev(mtd); > + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); > + > + if (section) > + return -ERANGE; > + > + oob_region->offset = eng->oob_free * eng->nsteps; > + oob_region->length = mtd->oobsize - oob_region->offset; > + > + return 0; > +} > + > +static const struct mtd_ooblayout_ops mtk_ecc_ooblayout_ops = { > + .free = mtk_ecc_ooblayout_free, > + .ecc = mtk_ecc_ooblayout_ecc, > +}; > + > +const struct mtd_ooblayout_ops *mtk_ecc_get_ooblayout(void) > +{ > + return &mtk_ecc_ooblayout_ops; > +} > + > +static struct device *mtk_ecc_get_engine_dev(struct device *dev) > +{ > + struct platform_device *eccpdev; > + struct device_node *np; > + > + /* > + * The device node is only the host controller, > + * not the actual ECC engine when pipelined case. > + */ > + np = of_parse_phandle(dev->of_node, "nand-ecc-engine", 0); > + if (!np) > + return NULL; > + > + eccpdev = of_find_device_by_node(np); > + if (!eccpdev) { > + of_node_put(np); > + return NULL; > + } > + > + platform_device_put(eccpdev); > + of_node_put(np); > + > + return &eccpdev->dev; > +} As this will be the exact same function for all the pipelined engines, I am tempted to put this in the core. I'll soon send a iteration, stay tuned. > +/* > + * mtk_ecc_data_format() - Convert to/from MTK ECC on-flash data format > + * > + * MTK ECC engine organize page data by section, the on-flash format as bellow: > + * || section 0 || section 1 || ... > + * || data | OOB free | OOB ECC || data || OOB free | OOB ECC || ... > + * > + * Terefore, it`s necessary to convert data when reading/writing in raw mode. > + */ > +static void mtk_ecc_data_format(struct nand_device *nand, mtk_ecc_reorganize_data_layout()? > + struct nand_page_io_req *req) > +{ > + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); > + int step_size = nand->ecc.ctx.conf.step_size; > + void *databuf, *oobbuf; > + int i; > + > + if (req->type == NAND_PAGE_WRITE) { > + databuf = (void *)req->databuf.out; > + oobbuf = (void *)req->oobbuf.out; > + > + /* > + * Convert the source databuf and oobbuf to MTK ECC > + * on-flash data format. > + */ > + for (i = 0; i < eng->nsteps; i++) { > + if (i == eng->bbm_ctl.section) > + eng->bbm_ctl.bbm_swap(nand, > + databuf, oobbuf); Do you really need this swap? Isn't the overall move enough to put the BBM at the right place? > + memcpy(mtk_ecc_section_ptr(nand, i), > + databuf + mtk_ecc_data_off(nand, i), > + step_size); > + > + memcpy(mtk_ecc_oob_free_ptr(nand, i), > + oobbuf + mtk_ecc_oob_free_position(nand, i), > + eng->oob_free); > + > + memcpy(mtk_ecc_oob_free_ptr(nand, i) + eng->oob_free, > + oobbuf + eng->oob_free * eng->nsteps + > + i * eng->oob_ecc, > + eng->oob_ecc); > + } > + > + req->databuf.out = eng->bounce_page_buf; > + req->oobbuf.out = eng->bounce_oob_buf; > + } else { > + databuf = req->databuf.in; > + oobbuf = req->oobbuf.in; > + > + /* > + * Convert the on-flash MTK ECC data format to > + * destination databuf and oobbuf. > + */ > + memcpy(eng->bounce_page_buf, databuf, > + nanddev_page_size(nand)); > + memcpy(eng->bounce_oob_buf, oobbuf, > + nanddev_per_page_oobsize(nand)); > + > + for (i = 0; i < eng->nsteps; i++) { > + memcpy(databuf + mtk_ecc_data_off(nand, i), > + mtk_ecc_section_ptr(nand, i), step_size); > + > + memcpy(oobbuf + mtk_ecc_oob_free_position(nand, i), > + mtk_ecc_section_ptr(nand, i) + step_size, > + eng->oob_free); > + > + memcpy(oobbuf + eng->oob_free * eng->nsteps + > + i * eng->oob_ecc, > + mtk_ecc_section_ptr(nand, i) + step_size > + + eng->oob_free, > + eng->oob_ecc); > + > + if (i == eng->bbm_ctl.section) > + eng->bbm_ctl.bbm_swap(nand, > + databuf, oobbuf); > + } > + } > +} > + > +static void mtk_ecc_oob_free_shift(struct nand_device *nand, > + u8 *dst_buf, u8 *src_buf, bool write) > +{ > + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); > + u32 position; > + int i; > + > + for (i = 0; i < eng->nsteps; i++) { > + if (i < eng->bbm_ctl.section) > + position = (i + 1) * eng->oob_free; > + else if (i == eng->bbm_ctl.section) > + position = 0; > + else > + position = i * eng->oob_free; > + > + if (write) > + memcpy(dst_buf + i * eng->oob_free, src_buf + position, > + eng->oob_free); > + else > + memcpy(dst_buf + position, src_buf + i * eng->oob_free, > + eng->oob_free); > + } > +} > + > +static void mtk_ecc_set_section_size_and_strength(struct nand_device *nand) > +{ > + struct nand_ecc_props *reqs = &nand->ecc.requirements; > + struct nand_ecc_props *user = &nand->ecc.user_conf; > + struct nand_ecc_props *conf = &nand->ecc.ctx.conf; > + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); > + > + /* Configure the correction depending on the NAND device topology */ > + if (user->step_size && user->strength) { > + conf->step_size = user->step_size; > + conf->strength = user->strength; > + } else if (reqs->step_size && reqs->strength) { > + conf->step_size = reqs->step_size; > + conf->strength = reqs->strength; > + } > + > + /* > + * Align ECC strength and ECC size. > + * The MTK HW ECC engine only support 512 and 1024 ECC size. > + */ > + if (conf->step_size < 1024) { I prefer stronger checks than '<'. > + if (nanddev_page_size(nand) > 512 && > + eng->ecc->caps->max_section_size > 512) { > + conf->step_size = 1024; > + conf->strength <<= 1; the operation "<<= 1" is more readable as "* 2" IMHO. Same below in both directions. > + } else { > + conf->step_size = 512; > + } > + } else { > + conf->step_size = 1024; > + } > + > + eng->section_size = conf->step_size; > +} > + > +static int mtk_ecc_set_spare_per_section(struct nand_device *nand) > +{ > + struct nand_ecc_props *conf = &nand->ecc.ctx.conf; > + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); > + const u8 *spare = eng->ecc->caps->spare_size; > + u32 i, closest_spare = 0; > + > + eng->nsteps = nanddev_page_size(nand) / conf->step_size; > + eng->oob_per_section = nanddev_per_page_oobsize(nand) / eng->nsteps; > + > + if (conf->step_size == 1024) > + eng->oob_per_section >>= 1; > + > + if (eng->oob_per_section < spare[0]) { > + dev_err(eng->ecc->dev, "OOB size per section too small %d\n", > + eng->oob_per_section); > + return -EINVAL; > + } > + > + for (i = 0; i < eng->ecc->caps->num_spare_size; i++) { > + if (eng->oob_per_section >= spare[i] && > + spare[i] >= spare[closest_spare]) { > + closest_spare = i; > + if (eng->oob_per_section == spare[i]) > + break; > + } > + } > + > + eng->oob_per_section = spare[closest_spare]; > + eng->oob_per_section_idx = closest_spare; > + > + if (conf->step_size == 1024) > + eng->oob_per_section <<= 1; > + > + return 0; > +} > + > +int mtk_ecc_prepare_io_req_pipelined(struct nand_device *nand, > + struct nand_page_io_req *req) > +{ > + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); > + struct mtd_info *mtd = nanddev_to_mtd(nand); > + int ret; > + > + nand_ecc_tweak_req(&eng->req_ctx, req); > + > + /* Store the source buffer data to avoid modify source data */ > + if (req->type == NAND_PAGE_WRITE) { > + if (req->datalen) > + memcpy(eng->src_page_buf + req->dataoffs, > + req->databuf.out, > + req->datalen); > + > + if (req->ooblen) > + memcpy(eng->src_oob_buf + req->ooboffs, > + req->oobbuf.out, > + req->ooblen); > + } > + > + if (req->mode == MTD_OPS_RAW) { > + if (req->type == NAND_PAGE_WRITE) > + mtk_ecc_data_format(nand, req); > + > + return 0; > + } > + > + eng->ecc_cfg.mode = ECC_NFI_MODE; > + eng->ecc_cfg.sectors = eng->nsteps; > + eng->ecc_cfg.op = ECC_DECODE; > + > + if (req->type == NAND_PAGE_READ) > + return mtk_ecc_enable(eng->ecc, &eng->ecc_cfg); > + > + memset(eng->bounce_oob_buf, 0xff, nanddev_per_page_oobsize(nand)); > + if (req->ooblen) { > + if (req->mode == MTD_OPS_AUTO_OOB) { > + ret = mtd_ooblayout_set_databytes(mtd, > + req->oobbuf.out, > + eng->bounce_oob_buf, > + req->ooboffs, > + mtd->oobavail); > + if (ret) > + return ret; > + } else { > + memcpy(eng->bounce_oob_buf + req->ooboffs, > + req->oobbuf.out, > + req->ooblen); > + } > + } > + > + eng->bbm_ctl.bbm_swap(nand, (void *)req->databuf.out, > + eng->bounce_oob_buf); > + mtk_ecc_oob_free_shift(nand, (void *)req->oobbuf.out, > + eng->bounce_oob_buf, true); > + > + eng->ecc_cfg.op = ECC_ENCODE; > + > + return mtk_ecc_enable(eng->ecc, &eng->ecc_cfg); > +} > + > +int mtk_ecc_finish_io_req_pipelined(struct nand_device *nand, > + struct nand_page_io_req *req) > +{ > + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); > + struct mtd_info *mtd = nanddev_to_mtd(nand); > + struct mtk_ecc_stats stats; > + int ret; > + > + if (req->type == NAND_PAGE_WRITE) { > + /* Restore the source buffer data */ > + if (req->datalen) > + memcpy((void *)req->databuf.out, > + eng->src_page_buf + req->dataoffs, > + req->datalen); > + > + if (req->ooblen) > + memcpy((void *)req->oobbuf.out, > + eng->src_oob_buf + req->ooboffs, > + req->ooblen); > + > + if (req->mode != MTD_OPS_RAW) > + mtk_ecc_disable(eng->ecc); > + > + nand_ecc_restore_req(&eng->req_ctx, req); > + > + return 0; > + } > + > + if (req->mode == MTD_OPS_RAW) { > + mtk_ecc_data_format(nand, req); > + nand_ecc_restore_req(&eng->req_ctx, req); > + > + return 0; > + } > + > + ret = mtk_ecc_wait_done(eng->ecc, ECC_DECODE); > + if (ret) { > + ret = -ETIMEDOUT; > + goto out; > + } > + > + if (eng->read_empty) { > + memset(req->databuf.in, 0xff, nanddev_page_size(nand)); > + memset(req->oobbuf.in, 0xff, nanddev_per_page_oobsize(nand)); > + ret = 0; > + > + goto out; > + } > + > + mtk_ecc_get_stats(eng->ecc, &stats, eng->nsteps); > + mtd->ecc_stats.corrected += stats.corrected; > + mtd->ecc_stats.failed += stats.failed; > + > + /* > + * Return -EBADMSG when exit uncorrect ECC error. > + * Otherwise, return the bitflips. > + */ > + if (stats.failed) > + ret = -EBADMSG; > + else > + ret = stats.bitflips; > + > + memset(eng->bounce_oob_buf, 0xff, nanddev_per_page_oobsize(nand)); > + mtk_ecc_oob_free_shift(nand, eng->bounce_oob_buf, req->oobbuf.in, false); > + eng->bbm_ctl.bbm_swap(nand, req->databuf.in, eng->bounce_oob_buf); > + > + if (req->ooblen) { > + if (req->mode == MTD_OPS_AUTO_OOB) > + ret = mtd_ooblayout_get_databytes(mtd, > + req->oobbuf.in, > + eng->bounce_oob_buf, > + req->ooboffs, > + mtd->oobavail); > + else > + memcpy(req->oobbuf.in, > + eng->bounce_oob_buf + req->ooboffs, > + req->ooblen); > + } > + > +out: > + mtk_ecc_disable(eng->ecc); > + nand_ecc_restore_req(&eng->req_ctx, req); > + > + return ret; > +} > + > +int mtk_ecc_init_ctx_pipelined(struct nand_device *nand) > +{ > + struct nand_ecc_props *conf = &nand->ecc.ctx.conf; > + struct mtd_info *mtd = nanddev_to_mtd(nand); > + struct mtk_ecc_engine *eng; > + struct device *dev; > + int free, ret; > + > + /* > + * In the case of a pipelined engine, the device registering the ECC > + * engine is not the actual ECC engine device but the host controller. > + */ > + dev = mtk_ecc_get_engine_dev(nand->ecc.engine->dev); > + if (!dev) > + return -EINVAL; > + > + eng = devm_kzalloc(dev, sizeof(*eng), GFP_KERNEL); > + if (!eng) > + return -ENOMEM; > + > + nand->ecc.ctx.priv = eng; > + nand->ecc.engine->priv = eng; > + > + eng->ecc = dev_get_drvdata(dev); > + > + mtk_ecc_set_section_size_and_strength(nand); > + > + ret = mtk_ecc_set_spare_per_section(nand); > + if (ret) > + return ret; > + > + clk_prepare_enable(eng->ecc->clk); > + mtk_ecc_hw_init(eng->ecc); > + > + /* Calculate OOB free bytes except ECC parity data */ > + free = (conf->strength * mtk_ecc_get_parity_bits(eng->ecc) > + + 7) >> 3; > + free = eng->oob_per_section - free; > + > + /* > + * Enhance ECC strength if OOB left is bigger than max FDM size > + * or reduce ECC strength if OOB size is not enough for ECC > + * parity data. > + */ > + if (free > OOB_FREE_MAX_SIZE) > + eng->oob_ecc = eng->oob_per_section - OOB_FREE_MAX_SIZE; > + else if (free < 0) > + eng->oob_ecc = eng->oob_per_section - OOB_FREE_MIN_SIZE; > + > + /* Calculate and adjust ECC strenth based on OOB ECC bytes */ > + conf->strength = (eng->oob_ecc << 3) / > + mtk_ecc_get_parity_bits(eng->ecc); > + mtk_ecc_adjust_strength(eng->ecc, &conf->strength); > + > + eng->oob_ecc = DIV_ROUND_UP(conf->strength * > + mtk_ecc_get_parity_bits(eng->ecc), 8); > + > + eng->oob_free = eng->oob_per_section - eng->oob_ecc; > + if (eng->oob_free > OOB_FREE_MAX_SIZE) > + eng->oob_free = OOB_FREE_MAX_SIZE; > + > + eng->oob_free_protected = OOB_FREE_MIN_SIZE; > + > + eng->oob_ecc = eng->oob_per_section - eng->oob_free; > + > + if (!mtd->ooblayout) > + mtd_set_ooblayout(mtd, mtk_ecc_get_ooblayout()); > + > + ret = nand_ecc_init_req_tweaking(&eng->req_ctx, nand); > + if (ret) > + return ret; > + > + eng->src_page_buf = kmalloc(nanddev_page_size(nand) + > + nanddev_per_page_oobsize(nand), GFP_KERNEL); > + eng->bounce_page_buf = kmalloc(nanddev_page_size(nand) + > + nanddev_per_page_oobsize(nand), GFP_KERNEL); > + if (!eng->src_page_buf || !eng->bounce_page_buf) { > + ret = -ENOMEM; > + goto cleanup_req_tweak; > + } > + > + eng->src_oob_buf = eng->src_page_buf + nanddev_page_size(nand); > + eng->bounce_oob_buf = eng->bounce_page_buf + nanddev_page_size(nand); > + > + mtk_ecc_set_bbm_ctl(&eng->bbm_ctl, nand); > + eng->ecc_cfg.strength = conf->strength; > + eng->ecc_cfg.len = conf->step_size + eng->oob_free_protected; > + mtd->bitflip_threshold = conf->strength; > + > + return 0; > + > +cleanup_req_tweak: > + nand_ecc_cleanup_req_tweaking(&eng->req_ctx); > + > + return ret; > +} > + > +void mtk_ecc_cleanup_ctx_pipelined(struct nand_device *nand) > +{ > + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); > + > + if (eng) { > + nand_ecc_cleanup_req_tweaking(&eng->req_ctx); > + kfree(eng->src_page_buf); > + kfree(eng->bounce_page_buf); > + } > +} > + > +/* > + * The MTK ECC engine work at pipelined situation, > + * will be registered by the drivers that wrap it. > + */ > +static struct nand_ecc_engine_ops mtk_ecc_engine_pipelined_ops = { > + .init_ctx = mtk_ecc_init_ctx_pipelined, > + .cleanup_ctx = mtk_ecc_cleanup_ctx_pipelined, > + .prepare_io_req = mtk_ecc_prepare_io_req_pipelined, > + .finish_io_req = mtk_ecc_finish_io_req_pipelined, > +}; > + > +struct nand_ecc_engine_ops *mtk_ecc_get_pipelined_ops(void) > +{ > + return &mtk_ecc_engine_pipelined_ops; > +} > +EXPORT_SYMBOL(mtk_ecc_get_pipelined_ops); > + > static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = { > .err_mask = 0x3f, > .ecc_strength = ecc_strength_mt2701, > @@ -472,6 +1083,9 @@ static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = { > .ecc_strength = ecc_strength_mt7622, > .ecc_regs = mt7622_ecc_regs, > .num_ecc_strength = 7, > + .spare_size = spare_size_mt7622, > + .num_spare_size = 19, > + .max_section_size = 1024, > .ecc_mode_shift = 4, > .parity_bits = 13, > .pg_irq_sel = 0, > diff --git a/include/linux/mtd/nand-ecc-mtk.h b/include/linux/mtd/nand-ecc-mtk.h > index 0e48c36e6ca0..6d550032cbd9 100644 > --- a/include/linux/mtd/nand-ecc-mtk.h > +++ b/include/linux/mtd/nand-ecc-mtk.h > @@ -33,6 +33,61 @@ struct mtk_ecc_config { > u32 len; > }; > > +/** > + * struct mtk_ecc_bbm_ctl - Information relative to the BBM swap > + * @bbm_swap: BBM swap function > + * @section: Section number in data area for swap > + * @position: Position in @section for swap with BBM > + */ > +struct mtk_ecc_bbm_ctl { > + void (*bbm_swap)(struct nand_device *nand, u8 *databuf, u8 *oobbuf); > + u32 section; > + u32 position; > +}; > + > +/** > + * struct mtk_ecc_engine - Information relative to the ECC > + * @req_ctx: Save request context and tweak the original request to fit the > + * engine needs > + * @oob_per_section: OOB size for each section to store OOB free/ECC bytes > + * @oob_per_section_idx: The index for @oob_per_section in spare size array > + * @oob_ecc: OOB size for each section to store the ECC parity > + * @oob_free: OOB size for each section to store the OOB free bytes > + * @oob_free_protected: OOB free bytes will be protected by the ECC engine > + * @section_size: The size of each section > + * @read_empty: Indicate whether empty page for one read operation > + * @nsteps: The number of the sections > + * @src_page_buf: Buffer used to store source data buffer when write > + * @src_oob_buf: Buffer used to store source OOB buffer when write > + * @bounce_page_buf: Data bounce buffer > + * @bounce_oob_buf: OOB bounce buffer > + * @ecc: The ECC engine private data structure > + * @ecc_cfg: The configuration of each ECC operation > + * @bbm_ctl: Information relative to the BBM swap > + */ > +struct mtk_ecc_engine { > + struct nand_ecc_req_tweak_ctx req_ctx; > + > + u32 oob_per_section; > + u32 oob_per_section_idx; > + u32 oob_ecc; > + u32 oob_free; > + u32 oob_free_protected; > + u32 section_size; > + > + bool read_empty; > + u32 nsteps; > + > + u8 *src_page_buf; > + u8 *src_oob_buf; > + u8 *bounce_page_buf; > + u8 *bounce_oob_buf; > + > + struct mtk_ecc *ecc; > + struct mtk_ecc_config ecc_cfg; > + struct mtk_ecc_bbm_ctl bbm_ctl; > +}; This and above should not be exported and be located in the driver. > + > int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32); > void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int); > int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation); > @@ -44,4 +99,17 @@ unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc); > struct mtk_ecc *of_mtk_ecc_get(struct device_node *); > void mtk_ecc_release(struct mtk_ecc *); > > +#if IS_ENABLED(CONFIG_MTD_NAND_ECC_MTK) > + > +struct nand_ecc_engine_ops *mtk_ecc_get_pipelined_ops(void); > + > +#else /* !CONFIG_MTD_NAND_ECC_MTK */ > + > +struct nand_ecc_engine_ops *mtk_ecc_get_pipelined_ops(void) > +{ > + return NULL; > +} > + > +#endif /* CONFIG_MTD_NAND_ECC_MTK */ > + > #endif Thanks, Miquèl
Hi Miquel, On Thu, 2021-12-09 at 11:32 +0100, Miquel Raynal wrote: > Hi Xiangsheng, > > xiangsheng.hou@mediatek.com wrote on Tue, 30 Nov 2021 16:31:59 +0800: > > > > > +static void mtk_ecc_no_bbm_swap(struct nand_device *a, u8 *b, u8 > > *c) > > +{ > > + /* nop */ > > Is this really useful? For 512 bytes page size, it is no need to do BBM swap due to the ECC engine step size will be 512 bytes. However, there have 512 bytes SLC NAND page size in history, although have not seen such SPI/Parallel NAND device for now. Do you think there no need to consider this small page device? > > > +} > > + > > +static void mtk_ecc_bbm_swap(struct nand_device *nand, u8 > > *databuf, u8 *oobbuf) > > +{ > > + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); > > + int step_size = nand->ecc.ctx.conf.step_size; > > + u32 bbm_pos = eng->bbm_ctl.position; > > + > > + bbm_pos += eng->bbm_ctl.section * step_size; > > + > > + swap(oobbuf[0], databuf[bbm_pos]); > > +} > > + > > +static void mtk_ecc_set_bbm_ctl(struct mtk_ecc_bbm_ctl *bbm_ctl, > > + struct nand_device *nand) > > +{ > > + if (nanddev_page_size(nand) == 512) { > > + bbm_ctl->bbm_swap = mtk_ecc_no_bbm_swap; > > + } else { > > + bbm_ctl->bbm_swap = mtk_ecc_bbm_swap; > > + bbm_ctl->section = nanddev_page_size(nand) / > > + mtk_ecc_data_len(nand); > > + bbm_ctl->position = nanddev_page_size(nand) % > > + mtk_ecc_data_len(nand); > > + } > > +} > > > > + > > +static struct device *mtk_ecc_get_engine_dev(struct device *dev) > > +{ > > + struct platform_device *eccpdev; > > + struct device_node *np; > > + > > + /* > > + * The device node is only the host controller, > > + * not the actual ECC engine when pipelined case. > > + */ > > + np = of_parse_phandle(dev->of_node, "nand-ecc-engine", 0); > > + if (!np) > > + return NULL; > > + > > + eccpdev = of_find_device_by_node(np); > > + if (!eccpdev) { > > + of_node_put(np); > > + return NULL; > > + } > > + > > + platform_device_put(eccpdev); > > + of_node_put(np); > > + > > + return &eccpdev->dev; > > +} > > As this will be the exact same function for all the pipelined > engines, > I am tempted to put this in the core. I'll soon send a iteration, > stay > tuned. > Look forward to the function. > > +/* > > + * mtk_ecc_data_format() - Convert to/from MTK ECC on-flash data > > format > > + * > > + * MTK ECC engine organize page data by section, the on-flash > > format as bellow: > > + * || section 0 || section 1 || > > ... > > + * || data | OOB free | OOB ECC || data || OOB free | OOB ECC || > > ... > > + * > > + * Terefore, it`s necessary to convert data when reading/writing > > in raw mode. > > + */ > > +static void mtk_ecc_data_format(struct nand_device *nand, > > mtk_ecc_reorganize_data_layout()? Will be changed. > > > + struct nand_page_io_req *req) > > +{ > > + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); > > + int step_size = nand->ecc.ctx.conf.step_size; > > + void *databuf, *oobbuf; > > + int i; > > + > > + if (req->type == NAND_PAGE_WRITE) { > > + databuf = (void *)req->databuf.out; > > + oobbuf = (void *)req->oobbuf.out; > > + > > + /* > > + * Convert the source databuf and oobbuf to MTK ECC > > + * on-flash data format. > > + */ > > + for (i = 0; i < eng->nsteps; i++) { > > + if (i == eng->bbm_ctl.section) > > + eng->bbm_ctl.bbm_swap(nand, > > + databuf, oobbuf); > > Do you really need this swap? Isn't the overall move enough to put > the > BBM at the right place? > For OPS_RAW mode, need organize flash data in the MTK ECC engine data format. Other operation in this function only organize data by section and not include BBM swap. For other mode, this function will not be called. > > + memcpy(mtk_ecc_section_ptr(nand, i), > > + databuf + mtk_ecc_data_off(nand, i), > > + step_size); > > + > > + memcpy(mtk_ecc_oob_free_ptr(nand, i), > > + oobbuf + mtk_ecc_oob_free_position(nand, > > i), > > + eng->oob_free); > > + > > + memcpy(mtk_ecc_oob_free_ptr(nand, i) + eng- > > >oob_free, > > + oobbuf + eng->oob_free * eng->nsteps + > > + i * eng->oob_ecc, > > + eng->oob_ecc); > > + } > > + > > + req->databuf.out = eng->bounce_page_buf; > > + req->oobbuf.out = eng->bounce_oob_buf; > > + } else { > > + databuf = req->databuf.in; > > + oobbuf = req->oobbuf.in; > > + > > + /* > > + * Convert the on-flash MTK ECC data format to > > + * destination databuf and oobbuf. > > + */ > > + memcpy(eng->bounce_page_buf, databuf, > > + nanddev_page_size(nand)); > > + memcpy(eng->bounce_oob_buf, oobbuf, > > + nanddev_per_page_oobsize(nand)); > > + > > + for (i = 0; i < eng->nsteps; i++) { > > + memcpy(databuf + mtk_ecc_data_off(nand, i), > > + mtk_ecc_section_ptr(nand, i), > > step_size); > > + > > + memcpy(oobbuf + mtk_ecc_oob_free_position(nand, > > i), > > + mtk_ecc_section_ptr(nand, i) + > > step_size, > > + eng->oob_free); > > + > > + memcpy(oobbuf + eng->oob_free * eng->nsteps + > > + i * eng->oob_ecc, > > + mtk_ecc_section_ptr(nand, i) + step_size > > + + eng->oob_free, > > + eng->oob_ecc); > > + > > + if (i == eng->bbm_ctl.section) > > + eng->bbm_ctl.bbm_swap(nand, > > + databuf, oobbuf); > > + } > > + } > > +} > > + > > > > > > +/** > > + * struct mtk_ecc_engine - Information relative to the ECC > > + * @req_ctx: Save request context and tweak the original request > > to fit the > > + * engine needs > > + * @oob_per_section: OOB size for each section to store OOB > > free/ECC bytes > > + * @oob_per_section_idx: The index for @oob_per_section in spare > > size array > > + * @oob_ecc: OOB size for each section to store the ECC parity > > + * @oob_free: OOB size for each section to store the OOB free > > bytes > > + * @oob_free_protected: OOB free bytes will be protected by the > > ECC engine > > + * @section_size: The size of each section > > + * @read_empty: Indicate whether empty page for one read operation > > + * @nsteps: The number of the sections > > + * @src_page_buf: Buffer used to store source data buffer when > > write > > + * @src_oob_buf: Buffer used to store source OOB buffer when write > > + * @bounce_page_buf: Data bounce buffer > > + * @bounce_oob_buf: OOB bounce buffer > > + * @ecc: The ECC engine private data structure > > + * @ecc_cfg: The configuration of each ECC operation > > + * @bbm_ctl: Information relative to the BBM swap > > + */ > > +struct mtk_ecc_engine { > > + struct nand_ecc_req_tweak_ctx req_ctx; > > + > > + u32 oob_per_section; > > + u32 oob_per_section_idx; > > + u32 oob_ecc; > > + u32 oob_free; > > + u32 oob_free_protected; > > + u32 section_size; > > + > > + bool read_empty; > > + u32 nsteps; > > + > > + u8 *src_page_buf; > > + u8 *src_oob_buf; > > + u8 *bounce_page_buf; > > + u8 *bounce_oob_buf; > > + > > + struct mtk_ecc *ecc; > > + struct mtk_ecc_config ecc_cfg; > > + struct mtk_ecc_bbm_ctl bbm_ctl; > > +}; > > This and above should not be exported and be located in the driver. > I will fix this. Thanks Xiangsheng Hou
Hello, xiangsheng.hou@mediatek.com wrote on Fri, 10 Dec 2021 17:09:14 +0800: > Hi Miquel, > > On Thu, 2021-12-09 at 11:32 +0100, Miquel Raynal wrote: > > Hi Xiangsheng, > > > > xiangsheng.hou@mediatek.com wrote on Tue, 30 Nov 2021 16:31:59 +0800: > > > > > > > > +static void mtk_ecc_no_bbm_swap(struct nand_device *a, u8 *b, u8 > > > *c) > > > +{ > > > + /* nop */ > > > > Is this really useful? > > For 512 bytes page size, it is no need to do BBM swap due to the ECC > engine step size will be 512 bytes. > > However, there have 512 bytes SLC NAND page size in history, although > have not seen such SPI/Parallel NAND device for now. > > Do you think there no need to consider this small page device? Actually I was talking about the empty helper itself. But let's keep that aside for now, it's fine. > > > > > > +} > > > + > > > +static void mtk_ecc_bbm_swap(struct nand_device *nand, u8 > > > *databuf, u8 *oobbuf) > > > +{ > > > + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); > > > + int step_size = nand->ecc.ctx.conf.step_size; > > > + u32 bbm_pos = eng->bbm_ctl.position; > > > + > > > + bbm_pos += eng->bbm_ctl.section * step_size; > > > + > > > + swap(oobbuf[0], databuf[bbm_pos]); > > > +} > > > + > > > +static void mtk_ecc_set_bbm_ctl(struct mtk_ecc_bbm_ctl *bbm_ctl, > > > + struct nand_device *nand) > > > +{ > > > + if (nanddev_page_size(nand) == 512) { > > > + bbm_ctl->bbm_swap = mtk_ecc_no_bbm_swap; > > > + } else { > > > + bbm_ctl->bbm_swap = mtk_ecc_bbm_swap; > > > + bbm_ctl->section = nanddev_page_size(nand) / > > > + mtk_ecc_data_len(nand); > > > + bbm_ctl->position = nanddev_page_size(nand) % > > > + mtk_ecc_data_len(nand); > > > + } > > > +} > > > > > > + > > > +static struct device *mtk_ecc_get_engine_dev(struct device *dev) > > > +{ > > > + struct platform_device *eccpdev; > > > + struct device_node *np; > > > + > > > + /* > > > + * The device node is only the host controller, > > > + * not the actual ECC engine when pipelined case. > > > + */ > > > + np = of_parse_phandle(dev->of_node, "nand-ecc-engine", 0); > > > + if (!np) > > > + return NULL; > > > + > > > + eccpdev = of_find_device_by_node(np); > > > + if (!eccpdev) { > > > + of_node_put(np); > > > + return NULL; > > > + } > > > + > > > + platform_device_put(eccpdev); > > > + of_node_put(np); > > > + > > > + return &eccpdev->dev; > > > +} > > > > As this will be the exact same function for all the pipelined > > engines, > > I am tempted to put this in the core. I'll soon send a iteration, > > stay > > tuned. > > > > Look forward to the function. I sent the new version yesterday but I * forgot to CC: you * forgot about that function as well Let's ignore this comment for now, send your driver with the same function in it and I'll clean that up later. Here is the new iteration, sorry for forgetting to send it to you as well: https://lore.kernel.org/linux-mtd/20211209174046.535229-1-miquel.raynal@bootlin.com/T/ And here is a Github branch as well: https://github.com/miquelraynal/linux/tree/ecc-engine > > > + struct nand_page_io_req *req) > > > +{ > > > + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); > > > + int step_size = nand->ecc.ctx.conf.step_size; > > > + void *databuf, *oobbuf; > > > + int i; > > > + > > > + if (req->type == NAND_PAGE_WRITE) { > > > + databuf = (void *)req->databuf.out; > > > + oobbuf = (void *)req->oobbuf.out; > > > + > > > + /* > > > + * Convert the source databuf and oobbuf to MTK ECC > > > + * on-flash data format. > > > + */ > > > + for (i = 0; i < eng->nsteps; i++) { > > > + if (i == eng->bbm_ctl.section) > > > + eng->bbm_ctl.bbm_swap(nand, > > > + databuf, oobbuf); > > > > Do you really need this swap? Isn't the overall move enough to put > > the > > BBM at the right place? > > > > For OPS_RAW mode, need organize flash data in the MTK ECC engine data > format. Other operation in this function only organize data by section > and not include BBM swap. > > For other mode, this function will not be called. Can you try to explain this with an ascii schema again? I'm sorry but I don't follow it. Is the BBM placed in the first bytes of the first oob area by the engine? Or is it place somewhere else? Thanks, Miquèl
Hi Miquel, On Fri, 2021-12-10 at 10:34 +0100, Miquel Raynal wrote: > Hello, > > xiangsheng.hou@mediatek.com wrote on Fri, 10 Dec 2021 17:09:14 +0800: > > > > > As this will be the exact same function for all the pipelined > > > engines, > > > I am tempted to put this in the core. I'll soon send a iteration, > > > stay > > > tuned. > > > > > > > Look forward to the function. > > I sent the new version yesterday but I > * forgot to CC: you > * forgot about that function as well > > Let's ignore this comment for now, send your driver with the same > function in it and I'll clean that up later. > > Here is the new iteration, sorry for forgetting to send it to you as > well: > https://lore.kernel.org/linux-mtd/20211209174046.535229-1-miquel.raynal@bootlin.com/T/ > And here is a Github branch as well: > https://github.com/miquelraynal/linux/tree/ecc-engine Got it, Thanks. > > > > > + struct nand_page_io_req *req) > > > > +{ > > > > + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); > > > > + int step_size = nand->ecc.ctx.conf.step_size; > > > > + void *databuf, *oobbuf; > > > > + int i; > > > > + > > > > + if (req->type == NAND_PAGE_WRITE) { > > > > + databuf = (void *)req->databuf.out; > > > > + oobbuf = (void *)req->oobbuf.out; > > > > + > > > > + /* > > > > + * Convert the source databuf and oobbuf to MTK > > > > ECC > > > > + * on-flash data format. > > > > + */ > > > > + for (i = 0; i < eng->nsteps; i++) { > > > > + if (i == eng->bbm_ctl.section) > > > > + eng->bbm_ctl.bbm_swap(nand, > > > > + databuf, > > > > oobbuf); > > > > > > Do you really need this swap? Isn't the overall move enough to > > > put > > > the > > > BBM at the right place? > > > > > > > For OPS_RAW mode, need organize flash data in the MTK ECC engine > > data > > format. Other operation in this function only organize data by > > section > > and not include BBM swap. > > > > For other mode, this function will not be called. > > Can you try to explain this with an ascii schema again? I'm sorry but > I > don't follow it. Is the BBM placed in the first bytes of the first > oob > area by the engine? Or is it place somewhere else? > Yes, the BBM will at the first OOB area in NAND standard layout after BBM swap. 0. Differential on-flash data layout NAND standard page layout +------------------------------+------------+ | | | | main area | OOB area | | | | +------------------------------+------------+ MTK ECC on-flash page layout (2 section for example) +------------+--------+------------+--------+ | | | | | | section(0) | OOB(0) | section(1) | OOB(1) | | | | | | +------------+--------+------------+--------+ The standard BBM position will be section(1) main data, need do the BBM swap operation. request buffer include req->databuf and req->oobbuf. +----------------------------+ | | | req->databuf | | | +----------------------------+ +-------------+ | | | req->oobbuf | | | +-------------+ 1. For the OPS_RAW mode Expect the on-flash data format is like MTK ECC layout. The snfi controller will put the on-flash data as is spi_mem_op->data.buf.out. Therefore, the ECC engine have to reorganize the request data and OOB buffer in 4 part for each section in OPS_RAW mode. 1) BBM swap, only for the section need do the swap 2) section main data 3) OOB free data 4) OOB ECC data The BBM swap will ensure the BBM position in MTK ECC on-flash layout is same as NAND standard layout in OPS_RAW mode. for (i = 0; i < eng->nsteps; i++) { /* part 1: BBM swap */ if (i == eng->bbm_ctl.section) eng->bbm_ctl.bbm_swap(nand, databuf, oobbuf); /* part 2: main data in this section */ memcpy(mtk_ecc_section_ptr(nand, i), databuf + mtk_ecc_data_off(nand, i), step_size); /* part 3: OOB free data */ memcpy(mtk_ecc_oob_free_ptr(nand, i), oobbuf + mtk_ecc_oob_free_position(nand, i), eng->oob_free); /* part 4: OOB ECC data */ memcpy(mtk_ecc_oob_free_ptr(nand, i) + eng->oob_free, oobbuf + eng->oob_free * eng->nsteps + i * eng->oob_ecc, eng->oob_ecc); } 2. For non OPS_RAW mode The snfi have a function called auto format with ECC enable. This will auto reorganize the request data and oob data in MTK ECC page layout by the snfi controller except the BBM position. Therefore, the ECC engine only need do the BBM swap after set OOB data bytes in OPS_AUTO or after memcpy oob data in OPS_PLACE_OOB for write operation. The BBM swap also ensure the BBM position in MTK ECC on-flash layout is same as NAND standard layout in non OPS_RAW mode. if (req->ooblen) { if (req->mode == MTD_OPS_AUTO_OOB) { ret = mtd_ooblayout_set_databytes(mtd, req->oobbuf.out, eng->bounce_oob_buf, req->ooboffs, mtd->oobavail); if (ret) return ret; } else { memcpy(eng->bounce_oob_buf + req->ooboffs, req->oobbuf.out, req->ooblen); } } eng->bbm_ctl.bbm_swap(nand, (void *)req->databuf.out, eng->bounce_oob_buf); Thanks Xiangsheng Hou
Hi Xiangsheng, xiangsheng.hou@mediatek.com wrote on Sat, 11 Dec 2021 11:25:46 +0800: > Hi Miquel, > > On Fri, 2021-12-10 at 10:34 +0100, Miquel Raynal wrote: > > Hello, > > > > xiangsheng.hou@mediatek.com wrote on Fri, 10 Dec 2021 17:09:14 +0800: > > > > > > > As this will be the exact same function for all the pipelined > > > > engines, > > > > I am tempted to put this in the core. I'll soon send a iteration, > > > > stay > > > > tuned. > > > > > > > > > > Look forward to the function. > > > > I sent the new version yesterday but I > > * forgot to CC: you > > * forgot about that function as well > > > > Let's ignore this comment for now, send your driver with the same > > function in it and I'll clean that up later. > > > > Here is the new iteration, sorry for forgetting to send it to you as > > well: > > > https://lore.kernel.org/linux-mtd/20211209174046.535229-1-miquel.raynal@bootlin.com/T/ > > And here is a Github branch as well: > > https://github.com/miquelraynal/linux/tree/ecc-engine > > Got it, Thanks. > > > > > > > > + struct nand_page_io_req *req) > > > > > +{ > > > > > + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); > > > > > + int step_size = nand->ecc.ctx.conf.step_size; > > > > > + void *databuf, *oobbuf; > > > > > + int i; > > > > > + > > > > > + if (req->type == NAND_PAGE_WRITE) { > > > > > + databuf = (void *)req->databuf.out; > > > > > + oobbuf = (void *)req->oobbuf.out; > > > > > + > > > > > + /* > > > > > + * Convert the source databuf and oobbuf to MTK > > > > > ECC > > > > > + * on-flash data format. > > > > > + */ > > > > > + for (i = 0; i < eng->nsteps; i++) { > > > > > + if (i == eng->bbm_ctl.section) > > > > > + eng->bbm_ctl.bbm_swap(nand, > > > > > + databuf, > > > > > oobbuf); > > > > > > > > Do you really need this swap? Isn't the overall move enough to > > > > put > > > > the > > > > BBM at the right place? > > > > > > > > > > For OPS_RAW mode, need organize flash data in the MTK ECC engine > > > data > > > format. Other operation in this function only organize data by > > > section > > > and not include BBM swap. > > > > > > For other mode, this function will not be called. > > > > Can you try to explain this with an ascii schema again? I'm sorry but > > I > > don't follow it. Is the BBM placed in the first bytes of the first > > oob > > area by the engine? Or is it place somewhere else? > > > > Yes, the BBM will at the first OOB area in NAND standard layout after > BBM swap. > > 0. Differential on-flash data layout > > NAND standard page layout > +------------------------------+------------+ > | | | > | main area | OOB area | > | | | > +------------------------------+------------+ > > MTK ECC on-flash page layout (2 section for example) > +------------+--------+------------+--------+ > | | | | | > | section(0) | OOB(0) | section(1) | OOB(1) | > | | | | | > +------------+--------+------------+--------+ I think we are aligned on that part. The BBM is purely a user conception, it is not something wired in the hardware. What I mean is: why do *you* think the BBM should be located in the middle of section #1 ? There is one layout: the layout from the NAND/MTD perspective. There is another layout: the layout of your ECC engine. Just consider that the BBM should be at byte 0 of OOB #0 and you will not need any BBM swap operation anymore. I don't understand why you absolutely want to put it in section #1. > The standard BBM position will be section(1) main data, > need do the BBM swap operation. > > request buffer include req->databuf and req->oobbuf. > +----------------------------+ > | | > | req->databuf | > | | > +----------------------------+ > > +-------------+ > | | > | req->oobbuf | > | | > +-------------+ > > 1. For the OPS_RAW mode > > Expect the on-flash data format is like MTK ECC layout. > The snfi controller will put the on-flash data as is > spi_mem_op->data.buf.out. > > Therefore, the ECC engine have to reorganize the request > data and OOB buffer in 4 part for each section in > OPS_RAW mode. > > 1) BBM swap, only for the section need do the swap > 2) section main data > 3) OOB free data > 4) OOB ECC data > > The BBM swap will ensure the BBM position in MTK ECC > on-flash layout is same as NAND standard layout in > OPS_RAW mode. > > for (i = 0; i < eng->nsteps; i++) { > > /* part 1: BBM swap */ > if (i == eng->bbm_ctl.section) > eng->bbm_ctl.bbm_swap(nand, > databuf, oobbuf); > > /* part 2: main data in this section */ > memcpy(mtk_ecc_section_ptr(nand, i), > databuf + mtk_ecc_data_off(nand, i), > step_size); > > /* part 3: OOB free data */ > memcpy(mtk_ecc_oob_free_ptr(nand, i), > oobbuf + mtk_ecc_oob_free_position(nand, i), > eng->oob_free); > > /* part 4: OOB ECC data */ > memcpy(mtk_ecc_oob_free_ptr(nand, i) + eng->oob_free, > oobbuf + eng->oob_free * eng->nsteps + > i * eng->oob_ecc, > eng->oob_ecc); > } > > 2. For non OPS_RAW mode > > The snfi have a function called auto format with ECC enable. > This will auto reorganize the request data and oob data in > MTK ECC page layout by the snfi controller except the BBM position. > > Therefore, the ECC engine only need do the BBM swap after set OOB data > bytes in OPS_AUTO or after memcpy oob data in OPS_PLACE_OOB for write > operation. > > The BBM swap also ensure the BBM position in MTK ECC on-flash > layout is > same as NAND standard layout in non OPS_RAW mode. > > if (req->ooblen) { > if (req->mode == MTD_OPS_AUTO_OOB) { > ret = mtd_ooblayout_set_databytes(mtd, > req->oobbuf.out, > eng->bounce_oob_buf, > req->ooboffs, > mtd->oobavail); > if (ret) > return ret; > } else { > memcpy(eng->bounce_oob_buf + req->ooboffs, > req->oobbuf.out, > req->ooblen); > } > } > > eng->bbm_ctl.bbm_swap(nand, (void *)req->databuf.out, > eng->bounce_oob_buf); > > Thanks > Xiangsheng Hou Thanks, Miquèl
Hi Miquel, On Mon, 2021-12-13 at 10:29 +0100, Miquel Raynal wrote: > Hi Xiangsheng, > > xiangsheng.hou@mediatek.com wrote on Sat, 11 Dec 2021 11:25:46 +0800: > > > > > > > > > > > + struct nand_page_io_req *req) > > > > > > +{ > > > > > > + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); > > > > > > + int step_size = nand->ecc.ctx.conf.step_size; > > > > > > + void *databuf, *oobbuf; > > > > > > + int i; > > > > > > + > > > > > > + if (req->type == NAND_PAGE_WRITE) { > > > > > > + databuf = (void *)req->databuf.out; > > > > > > + oobbuf = (void *)req->oobbuf.out; > > > > > > + > > > > > > + /* > > > > > > + * Convert the source databuf and oobbuf to MTK > > > > > > ECC > > > > > > + * on-flash data format. > > > > > > + */ > > > > > > + for (i = 0; i < eng->nsteps; i++) { > > > > > > + if (i == eng->bbm_ctl.section) > > > > > > + eng->bbm_ctl.bbm_swap(nand, > > > > > > + databuf, > > > > > > oobbuf); > > > > > > > > > > Do you really need this swap? Isn't the overall move enough > > > > > to > > > > > put > > > > > the > > > > > BBM at the right place? > > > > > > > > > > > > > For OPS_RAW mode, need organize flash data in the MTK ECC > > > > engine > > > > data > > > > format. Other operation in this function only organize data by > > > > section > > > > and not include BBM swap. > > > > > > > > For other mode, this function will not be called. > > > > > > Can you try to explain this with an ascii schema again? I'm sorry > > > but > > > I > > > don't follow it. Is the BBM placed in the first bytes of the > > > first > > > oob > > > area by the engine? Or is it place somewhere else? > > > > > > > Yes, the BBM will at the first OOB area in NAND standard layout > > after > > BBM swap. > > > > 0. Differential on-flash data layout > > > > NAND standard page layout > > +------------------------------+------------+ > > > | | > > > main area | OOB area | > > > | | > > > > +------------------------------+------------+ > > > > MTK ECC on-flash page layout (2 section for example) > > +------------+--------+------------+--------+ > > > | | | | > > > section(0) | OOB(0) | section(1) | OOB(1) | > > > | | | | > > > > +------------+--------+------------+--------+ > > I think we are aligned on that part. > > The BBM is purely a user conception, it is not something wired in the > hardware. What I mean is: why do *you* think the BBM should be > located > in the middle of section #1 ? Take NAND page 2KB and OOB 64 bytes for example. For the NAND perspective the BBM is located at OOB #0, the column address is 2048 in one page, and this should not only a user conception. No matter what layout, the BBM position need at column 2048(OOB #0). Because of the NAND device specification arrange this. That is, The BBM position of a worn bad block(user mark) need consistent with a factory bad block(flash vendor mark). For the MTK ECC engine reorganize data by section in unit. The step size will be 1024 bytes and the OOB area for each section will be 32 bytes with NAND page 2KB and OOB 64 bytes. The on-flash page data lauout for MTK ECC engine, column 2048 will be main data in the middle of section #1. +----------------+-------+----------------+-------+ | | | | | | 1024B | 32B | 1024B | 32B | | | | | | +----------------+-------+----------------+-------+ > > There is one layout: the layout from the NAND/MTD perspective. > There is another layout: the layout of your ECC engine. > > Just consider that the BBM should be at byte 0 of OOB #0 and you will > not need any BBM swap operation anymore. I don't understand why you > absolutely want to put it in section #1. For the read/write both in OPS_RAW mode, the data layout will be organized by the ECC engine, no matter how the layout comply with, this will be workable. However, it will be chaotic when mix OPS_RAW operation with OPS_AUTO_OOB/OPS_PLACE_OOB. That is, the OPS_RAW mode on-flash data layout need comply with OPS_AUTO_OOB/OPS_PLACE_OOB, this is the reorganize function purpose. Just take the mtd/tests/nandbiterrs.c for example. It will write data in OPS_PLACE_OOB mode and rewrite data in OPS_RAW mode after insert one bitflip. Then read in OPS_PLACE_OOB mode to check the bitflips. > > The standard BBM position will be section(1) main data, > > need do the BBM swap operation. > > > > request buffer include req->databuf and req->oobbuf. > > +----------------------------+ > > > | > > > req->databuf | > > > | > > > > +----------------------------+ > > > > +-------------+ > > > | > > > req->oobbuf | > > > | > > > > +-------------+ > > > > 1. For the OPS_RAW mode > > > > Expect the on-flash data format is like MTK ECC layout. > > The snfi controller will put the on-flash data as is > > spi_mem_op->data.buf.out. > > > > Therefore, the ECC engine have to reorganize the request > > data and OOB buffer in 4 part for each section in > > OPS_RAW mode. > > > > 1) BBM swap, only for the section need do the swap > > 2) section main data > > 3) OOB free data > > 4) OOB ECC data > > > > The BBM swap will ensure the BBM position in MTK ECC > > on-flash layout is same as NAND standard layout in > > OPS_RAW mode. > > > > for (i = 0; i < eng->nsteps; i++) { > > > > /* part 1: BBM swap */ > > if (i == eng->bbm_ctl.section) > > eng->bbm_ctl.bbm_swap(nand, > > databuf, oobbuf); > > > > /* part 2: main data in this section */ > > memcpy(mtk_ecc_section_ptr(nand, i), > > databuf + mtk_ecc_data_off(nand, i), > > step_size); > > > > /* part 3: OOB free data */ > > memcpy(mtk_ecc_oob_free_ptr(nand, i), > > oobbuf + mtk_ecc_oob_free_position(nand, i), > > eng->oob_free); > > > > /* part 4: OOB ECC data */ > > memcpy(mtk_ecc_oob_free_ptr(nand, i) + eng->oob_free, > > oobbuf + eng->oob_free * eng->nsteps + > > i * eng->oob_ecc, > > eng->oob_ecc); > > } > > > > 2. For non OPS_RAW mode > > > > The snfi have a function called auto format with ECC enable. > > This will auto reorganize the request data and oob data in > > MTK ECC page layout by the snfi controller except the BBM position. > > > > Therefore, the ECC engine only need do the BBM swap after set OOB > > data > > bytes in OPS_AUTO or after memcpy oob data in OPS_PLACE_OOB for > > write > > operation. > > > > The BBM swap also ensure the BBM position in MTK ECC on-flash > > layout is > > same as NAND standard layout in non OPS_RAW mode. > > > > if (req->ooblen) { > > if (req->mode == MTD_OPS_AUTO_OOB) { > > ret = mtd_ooblayout_set_databytes(mtd, > > req->oobbuf.out, > > eng- > > >bounce_oob_buf, > > req->ooboffs, > > mtd->oobavail); > > if (ret) > > return ret; > > } else { > > memcpy(eng->bounce_oob_buf + req->ooboffs, > > req->oobbuf.out, > > req->ooblen); > > } > > } > > > > eng->bbm_ctl.bbm_swap(nand, (void *)req->databuf.out, > > eng->bounce_oob_buf); > > Thanks Xiangsheng Hou
Hi xiangsheng.hou, xiangsheng.hou@mediatek.com wrote on Tue, 14 Dec 2021 11:32:14 +0800: > Hi Miquel, > > > On Mon, 2021-12-13 at 10:29 +0100, Miquel Raynal wrote: > > Hi Xiangsheng, > > > > xiangsheng.hou@mediatek.com wrote on Sat, 11 Dec 2021 11:25:46 +0800: > > > > > > > > > > > > > > + struct nand_page_io_req *req) > > > > > > > +{ > > > > > > > + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); > > > > > > > + int step_size = nand->ecc.ctx.conf.step_size; > > > > > > > + void *databuf, *oobbuf; > > > > > > > + int i; > > > > > > > + > > > > > > > + if (req->type == NAND_PAGE_WRITE) { > > > > > > > + databuf = (void *)req->databuf.out; > > > > > > > + oobbuf = (void *)req->oobbuf.out; > > > > > > > + > > > > > > > + /* > > > > > > > + * Convert the source databuf and oobbuf to MTK > > > > > > > ECC > > > > > > > + * on-flash data format. > > > > > > > + */ > > > > > > > + for (i = 0; i < eng->nsteps; i++) { > > > > > > > + if (i == eng->bbm_ctl.section) > > > > > > > + eng->bbm_ctl.bbm_swap(nand, > > > > > > > + databuf, > > > > > > > oobbuf); > > > > > > > > > > > > Do you really need this swap? Isn't the overall move enough > > > > > > to > > > > > > put > > > > > > the > > > > > > BBM at the right place? > > > > > > > > > > > > > > > > For OPS_RAW mode, need organize flash data in the MTK ECC > > > > > engine > > > > > data > > > > > format. Other operation in this function only organize data by > > > > > section > > > > > and not include BBM swap. > > > > > > > > > > For other mode, this function will not be called. > > > > > > > > Can you try to explain this with an ascii schema again? I'm sorry > > > > but > > > > I > > > > don't follow it. Is the BBM placed in the first bytes of the > > > > first > > > > oob > > > > area by the engine? Or is it place somewhere else? > > > > > > > > > > Yes, the BBM will at the first OOB area in NAND standard layout > > > after > > > BBM swap. > > > > > > 0. Differential on-flash data layout > > > > > > NAND standard page layout > > > +------------------------------+------------+ > > > > | | > > > > main area | OOB area | > > > > | | > > > > > > +------------------------------+------------+ > > > > > > MTK ECC on-flash page layout (2 section for example) > > > +------------+--------+------------+--------+ > > > > | | | | > > > > section(0) | OOB(0) | section(1) | OOB(1) | > > > > | | | | > > > > > > +------------+--------+------------+--------+ > > > > I think we are aligned on that part. > > > > The BBM is purely a user conception, it is not something wired in the > > hardware. What I mean is: why do *you* think the BBM should be > > located > > in the middle of section #1 ? > > Take NAND page 2KB and OOB 64 bytes for example. > > For the NAND perspective the BBM is located at OOB #0, the column > address is 2048 in one page, and this should not only a user > conception. > > No matter what layout, the BBM position need at column 2048(OOB #0). > Because of the NAND device specification arrange this. > > That is, The BBM position of a worn bad block(user mark) need > consistent with a factory bad block(flash vendor mark). Yeah actually you're right, I guess the factory bad blocks will be located at the "wrong" position regarding the engine layout... Fine for this function. I'll send today a v5 for the series with a new helper: nand_ecc_get_engine_dev() (so you don't need to have your own) as well as a change in the API of spi_mem_generic_supports_op() that you must use. Thanks, Miquèl > For the MTK ECC engine reorganize data by section in unit. > The step size will be 1024 bytes and the OOB area for each section will > be 32 bytes with NAND page 2KB and OOB 64 bytes. > > The on-flash page data lauout for MTK ECC engine, > column 2048 will be main data in the middle of section #1. > +----------------+-------+----------------+-------+ > | | | | | > | 1024B | 32B | 1024B | 32B | > | | | | | > +----------------+-------+----------------+-------+ > > > > > There is one layout: the layout from the NAND/MTD perspective. > > There is another layout: the layout of your ECC engine. > > > > Just consider that the BBM should be at byte 0 of OOB #0 and you will > > not need any BBM swap operation anymore. I don't understand why you > > absolutely want to put it in section #1. > > For the read/write both in OPS_RAW mode, the data layout will be > organized by the ECC engine, no matter how the layout comply with, this > will be workable. > > However, it will be chaotic when mix OPS_RAW operation with > OPS_AUTO_OOB/OPS_PLACE_OOB. > > That is, the OPS_RAW mode on-flash data layout need comply with > OPS_AUTO_OOB/OPS_PLACE_OOB, this is the reorganize function purpose. > > Just take the mtd/tests/nandbiterrs.c for example. > It will write data in OPS_PLACE_OOB mode and rewrite data in OPS_RAW > mode after insert one bitflip. Then read in OPS_PLACE_OOB mode to check > the bitflips. > > > > The standard BBM position will be section(1) main data, > > > need do the BBM swap operation. > > > > > > request buffer include req->databuf and req->oobbuf. > > > +----------------------------+ > > > > | > > > > req->databuf | > > > > | > > > > > > +----------------------------+ > > > > > > +-------------+ > > > > | > > > > req->oobbuf | > > > > | > > > > > > +-------------+ > > > > > > 1. For the OPS_RAW mode > > > > > > Expect the on-flash data format is like MTK ECC layout. > > > The snfi controller will put the on-flash data as is > > > spi_mem_op->data.buf.out. > > > > > > Therefore, the ECC engine have to reorganize the request > > > data and OOB buffer in 4 part for each section in > > > OPS_RAW mode. > > > > > > 1) BBM swap, only for the section need do the swap > > > 2) section main data > > > 3) OOB free data > > > 4) OOB ECC data > > > > > > The BBM swap will ensure the BBM position in MTK ECC > > > on-flash layout is same as NAND standard layout in > > > OPS_RAW mode. > > > > > > for (i = 0; i < eng->nsteps; i++) { > > > > > > /* part 1: BBM swap */ > > > if (i == eng->bbm_ctl.section) > > > eng->bbm_ctl.bbm_swap(nand, > > > databuf, oobbuf); > > > > > > /* part 2: main data in this section */ > > > memcpy(mtk_ecc_section_ptr(nand, i), > > > databuf + mtk_ecc_data_off(nand, i), > > > step_size); > > > > > > /* part 3: OOB free data */ > > > memcpy(mtk_ecc_oob_free_ptr(nand, i), > > > oobbuf + mtk_ecc_oob_free_position(nand, i), > > > eng->oob_free); > > > > > > /* part 4: OOB ECC data */ > > > memcpy(mtk_ecc_oob_free_ptr(nand, i) + eng->oob_free, > > > oobbuf + eng->oob_free * eng->nsteps + > > > i * eng->oob_ecc, > > > eng->oob_ecc); > > > } > > > > > > 2. For non OPS_RAW mode > > > > > > The snfi have a function called auto format with ECC enable. > > > This will auto reorganize the request data and oob data in > > > MTK ECC page layout by the snfi controller except the BBM position. > > > > > > Therefore, the ECC engine only need do the BBM swap after set OOB > > > data > > > bytes in OPS_AUTO or after memcpy oob data in OPS_PLACE_OOB for > > > write > > > operation. > > > > > > The BBM swap also ensure the BBM position in MTK ECC on-flash > > > layout is > > > same as NAND standard layout in non OPS_RAW mode. > > > > > > if (req->ooblen) { > > > if (req->mode == MTD_OPS_AUTO_OOB) { > > > ret = mtd_ooblayout_set_databytes(mtd, > > > req->oobbuf.out, > > > eng- > > > >bounce_oob_buf, > > > req->ooboffs, > > > mtd->oobavail); > > > if (ret) > > > return ret; > > > } else { > > > memcpy(eng->bounce_oob_buf + req->ooboffs, > > > req->oobbuf.out, > > > req->ooblen); > > > } > > > } > > > > > > eng->bbm_ctl.bbm_swap(nand, (void *)req->databuf.out, > > > eng->bounce_oob_buf); > > > > > Thanks > Xiangsheng Hou
diff --git a/drivers/mtd/nand/ecc-mtk.c b/drivers/mtd/nand/ecc-mtk.c index 31d7c77d5c59..c44499b3d0a5 100644 --- a/drivers/mtd/nand/ecc-mtk.c +++ b/drivers/mtd/nand/ecc-mtk.c @@ -16,6 +16,7 @@ #include <linux/of_platform.h> #include <linux/mutex.h> +#include <linux/mtd/nand.h> #include <linux/mtd/nand-ecc-mtk.h> #define ECC_IDLE_MASK BIT(0) @@ -41,11 +42,17 @@ #define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE) #define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON) +#define OOB_FREE_MAX_SIZE 8 +#define OOB_FREE_MIN_SIZE 1 + struct mtk_ecc_caps { u32 err_mask; const u8 *ecc_strength; const u32 *ecc_regs; u8 num_ecc_strength; + const u8 *spare_size; + u8 num_spare_size; + u32 max_section_size; u8 ecc_mode_shift; u32 parity_bits; int pg_irq_sel; @@ -79,6 +86,12 @@ static const u8 ecc_strength_mt7622[] = { 4, 6, 8, 10, 12, 14, 16 }; +/* spare size for each section that each IP supports */ +static const u8 spare_size_mt7622[] = { + 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, + 52, 62, 61, 63, 64, 67, 74 +}; + enum mtk_ecc_regs { ECC_ENCPAR00, ECC_ENCIRQ_EN, @@ -447,6 +460,604 @@ unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc) } EXPORT_SYMBOL(mtk_ecc_get_parity_bits); +static inline int mtk_ecc_data_off(struct nand_device *nand, int i) +{ + int eccsize = nand->ecc.ctx.conf.step_size; + + return i * eccsize; +} + +static inline int mtk_ecc_oob_free_position(struct nand_device *nand, int i) +{ + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); + int position; + + if (i < eng->bbm_ctl.section) + position = (i + 1) * eng->oob_free; + else if (i == eng->bbm_ctl.section) + position = 0; + else + position = i * eng->oob_free; + + return position; +} + +static inline int mtk_ecc_data_len(struct nand_device *nand) +{ + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); + int eccsize = nand->ecc.ctx.conf.step_size; + int eccbytes = eng->oob_ecc; + + return eccsize + eng->oob_free + eccbytes; +} + +static inline u8 *mtk_ecc_section_ptr(struct nand_device *nand, int i) +{ + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); + + return eng->bounce_page_buf + i * mtk_ecc_data_len(nand); +} + +static inline u8 *mtk_ecc_oob_free_ptr(struct nand_device *nand, int i) +{ + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); + int eccsize = nand->ecc.ctx.conf.step_size; + + return eng->bounce_page_buf + i * mtk_ecc_data_len(nand) + eccsize; +} + +static void mtk_ecc_no_bbm_swap(struct nand_device *a, u8 *b, u8 *c) +{ + /* nop */ +} + +static void mtk_ecc_bbm_swap(struct nand_device *nand, u8 *databuf, u8 *oobbuf) +{ + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); + int step_size = nand->ecc.ctx.conf.step_size; + u32 bbm_pos = eng->bbm_ctl.position; + + bbm_pos += eng->bbm_ctl.section * step_size; + + swap(oobbuf[0], databuf[bbm_pos]); +} + +static void mtk_ecc_set_bbm_ctl(struct mtk_ecc_bbm_ctl *bbm_ctl, + struct nand_device *nand) +{ + if (nanddev_page_size(nand) == 512) { + bbm_ctl->bbm_swap = mtk_ecc_no_bbm_swap; + } else { + bbm_ctl->bbm_swap = mtk_ecc_bbm_swap; + bbm_ctl->section = nanddev_page_size(nand) / + mtk_ecc_data_len(nand); + bbm_ctl->position = nanddev_page_size(nand) % + mtk_ecc_data_len(nand); + } +} + +static int mtk_ecc_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oob_region) +{ + struct nand_device *nand = mtd_to_nanddev(mtd); + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); + struct nand_ecc_props *conf = &nand->ecc.ctx.conf; + u32 eccsteps, bbm_bytes = 0; + + eccsteps = mtd->writesize / conf->step_size; + + if (section >= eccsteps) + return -ERANGE; + + /* Reserve 1 byte for BBM only for section 0 */ + if (section == 0) + bbm_bytes = 1; + + oob_region->length = eng->oob_free - bbm_bytes; + oob_region->offset = section * eng->oob_free + bbm_bytes; + + return 0; +} + +static int mtk_ecc_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oob_region) +{ + struct nand_device *nand = mtd_to_nanddev(mtd); + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); + + if (section) + return -ERANGE; + + oob_region->offset = eng->oob_free * eng->nsteps; + oob_region->length = mtd->oobsize - oob_region->offset; + + return 0; +} + +static const struct mtd_ooblayout_ops mtk_ecc_ooblayout_ops = { + .free = mtk_ecc_ooblayout_free, + .ecc = mtk_ecc_ooblayout_ecc, +}; + +const struct mtd_ooblayout_ops *mtk_ecc_get_ooblayout(void) +{ + return &mtk_ecc_ooblayout_ops; +} + +static struct device *mtk_ecc_get_engine_dev(struct device *dev) +{ + struct platform_device *eccpdev; + struct device_node *np; + + /* + * The device node is only the host controller, + * not the actual ECC engine when pipelined case. + */ + np = of_parse_phandle(dev->of_node, "nand-ecc-engine", 0); + if (!np) + return NULL; + + eccpdev = of_find_device_by_node(np); + if (!eccpdev) { + of_node_put(np); + return NULL; + } + + platform_device_put(eccpdev); + of_node_put(np); + + return &eccpdev->dev; +} + +/* + * mtk_ecc_data_format() - Convert to/from MTK ECC on-flash data format + * + * MTK ECC engine organize page data by section, the on-flash format as bellow: + * || section 0 || section 1 || ... + * || data | OOB free | OOB ECC || data || OOB free | OOB ECC || ... + * + * Terefore, it`s necessary to convert data when reading/writing in raw mode. + */ +static void mtk_ecc_data_format(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); + int step_size = nand->ecc.ctx.conf.step_size; + void *databuf, *oobbuf; + int i; + + if (req->type == NAND_PAGE_WRITE) { + databuf = (void *)req->databuf.out; + oobbuf = (void *)req->oobbuf.out; + + /* + * Convert the source databuf and oobbuf to MTK ECC + * on-flash data format. + */ + for (i = 0; i < eng->nsteps; i++) { + if (i == eng->bbm_ctl.section) + eng->bbm_ctl.bbm_swap(nand, + databuf, oobbuf); + memcpy(mtk_ecc_section_ptr(nand, i), + databuf + mtk_ecc_data_off(nand, i), + step_size); + + memcpy(mtk_ecc_oob_free_ptr(nand, i), + oobbuf + mtk_ecc_oob_free_position(nand, i), + eng->oob_free); + + memcpy(mtk_ecc_oob_free_ptr(nand, i) + eng->oob_free, + oobbuf + eng->oob_free * eng->nsteps + + i * eng->oob_ecc, + eng->oob_ecc); + } + + req->databuf.out = eng->bounce_page_buf; + req->oobbuf.out = eng->bounce_oob_buf; + } else { + databuf = req->databuf.in; + oobbuf = req->oobbuf.in; + + /* + * Convert the on-flash MTK ECC data format to + * destination databuf and oobbuf. + */ + memcpy(eng->bounce_page_buf, databuf, + nanddev_page_size(nand)); + memcpy(eng->bounce_oob_buf, oobbuf, + nanddev_per_page_oobsize(nand)); + + for (i = 0; i < eng->nsteps; i++) { + memcpy(databuf + mtk_ecc_data_off(nand, i), + mtk_ecc_section_ptr(nand, i), step_size); + + memcpy(oobbuf + mtk_ecc_oob_free_position(nand, i), + mtk_ecc_section_ptr(nand, i) + step_size, + eng->oob_free); + + memcpy(oobbuf + eng->oob_free * eng->nsteps + + i * eng->oob_ecc, + mtk_ecc_section_ptr(nand, i) + step_size + + eng->oob_free, + eng->oob_ecc); + + if (i == eng->bbm_ctl.section) + eng->bbm_ctl.bbm_swap(nand, + databuf, oobbuf); + } + } +} + +static void mtk_ecc_oob_free_shift(struct nand_device *nand, + u8 *dst_buf, u8 *src_buf, bool write) +{ + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); + u32 position; + int i; + + for (i = 0; i < eng->nsteps; i++) { + if (i < eng->bbm_ctl.section) + position = (i + 1) * eng->oob_free; + else if (i == eng->bbm_ctl.section) + position = 0; + else + position = i * eng->oob_free; + + if (write) + memcpy(dst_buf + i * eng->oob_free, src_buf + position, + eng->oob_free); + else + memcpy(dst_buf + position, src_buf + i * eng->oob_free, + eng->oob_free); + } +} + +static void mtk_ecc_set_section_size_and_strength(struct nand_device *nand) +{ + struct nand_ecc_props *reqs = &nand->ecc.requirements; + struct nand_ecc_props *user = &nand->ecc.user_conf; + struct nand_ecc_props *conf = &nand->ecc.ctx.conf; + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); + + /* Configure the correction depending on the NAND device topology */ + if (user->step_size && user->strength) { + conf->step_size = user->step_size; + conf->strength = user->strength; + } else if (reqs->step_size && reqs->strength) { + conf->step_size = reqs->step_size; + conf->strength = reqs->strength; + } + + /* + * Align ECC strength and ECC size. + * The MTK HW ECC engine only support 512 and 1024 ECC size. + */ + if (conf->step_size < 1024) { + if (nanddev_page_size(nand) > 512 && + eng->ecc->caps->max_section_size > 512) { + conf->step_size = 1024; + conf->strength <<= 1; + } else { + conf->step_size = 512; + } + } else { + conf->step_size = 1024; + } + + eng->section_size = conf->step_size; +} + +static int mtk_ecc_set_spare_per_section(struct nand_device *nand) +{ + struct nand_ecc_props *conf = &nand->ecc.ctx.conf; + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); + const u8 *spare = eng->ecc->caps->spare_size; + u32 i, closest_spare = 0; + + eng->nsteps = nanddev_page_size(nand) / conf->step_size; + eng->oob_per_section = nanddev_per_page_oobsize(nand) / eng->nsteps; + + if (conf->step_size == 1024) + eng->oob_per_section >>= 1; + + if (eng->oob_per_section < spare[0]) { + dev_err(eng->ecc->dev, "OOB size per section too small %d\n", + eng->oob_per_section); + return -EINVAL; + } + + for (i = 0; i < eng->ecc->caps->num_spare_size; i++) { + if (eng->oob_per_section >= spare[i] && + spare[i] >= spare[closest_spare]) { + closest_spare = i; + if (eng->oob_per_section == spare[i]) + break; + } + } + + eng->oob_per_section = spare[closest_spare]; + eng->oob_per_section_idx = closest_spare; + + if (conf->step_size == 1024) + eng->oob_per_section <<= 1; + + return 0; +} + +int mtk_ecc_prepare_io_req_pipelined(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); + struct mtd_info *mtd = nanddev_to_mtd(nand); + int ret; + + nand_ecc_tweak_req(&eng->req_ctx, req); + + /* Store the source buffer data to avoid modify source data */ + if (req->type == NAND_PAGE_WRITE) { + if (req->datalen) + memcpy(eng->src_page_buf + req->dataoffs, + req->databuf.out, + req->datalen); + + if (req->ooblen) + memcpy(eng->src_oob_buf + req->ooboffs, + req->oobbuf.out, + req->ooblen); + } + + if (req->mode == MTD_OPS_RAW) { + if (req->type == NAND_PAGE_WRITE) + mtk_ecc_data_format(nand, req); + + return 0; + } + + eng->ecc_cfg.mode = ECC_NFI_MODE; + eng->ecc_cfg.sectors = eng->nsteps; + eng->ecc_cfg.op = ECC_DECODE; + + if (req->type == NAND_PAGE_READ) + return mtk_ecc_enable(eng->ecc, &eng->ecc_cfg); + + memset(eng->bounce_oob_buf, 0xff, nanddev_per_page_oobsize(nand)); + if (req->ooblen) { + if (req->mode == MTD_OPS_AUTO_OOB) { + ret = mtd_ooblayout_set_databytes(mtd, + req->oobbuf.out, + eng->bounce_oob_buf, + req->ooboffs, + mtd->oobavail); + if (ret) + return ret; + } else { + memcpy(eng->bounce_oob_buf + req->ooboffs, + req->oobbuf.out, + req->ooblen); + } + } + + eng->bbm_ctl.bbm_swap(nand, (void *)req->databuf.out, + eng->bounce_oob_buf); + mtk_ecc_oob_free_shift(nand, (void *)req->oobbuf.out, + eng->bounce_oob_buf, true); + + eng->ecc_cfg.op = ECC_ENCODE; + + return mtk_ecc_enable(eng->ecc, &eng->ecc_cfg); +} + +int mtk_ecc_finish_io_req_pipelined(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); + struct mtd_info *mtd = nanddev_to_mtd(nand); + struct mtk_ecc_stats stats; + int ret; + + if (req->type == NAND_PAGE_WRITE) { + /* Restore the source buffer data */ + if (req->datalen) + memcpy((void *)req->databuf.out, + eng->src_page_buf + req->dataoffs, + req->datalen); + + if (req->ooblen) + memcpy((void *)req->oobbuf.out, + eng->src_oob_buf + req->ooboffs, + req->ooblen); + + if (req->mode != MTD_OPS_RAW) + mtk_ecc_disable(eng->ecc); + + nand_ecc_restore_req(&eng->req_ctx, req); + + return 0; + } + + if (req->mode == MTD_OPS_RAW) { + mtk_ecc_data_format(nand, req); + nand_ecc_restore_req(&eng->req_ctx, req); + + return 0; + } + + ret = mtk_ecc_wait_done(eng->ecc, ECC_DECODE); + if (ret) { + ret = -ETIMEDOUT; + goto out; + } + + if (eng->read_empty) { + memset(req->databuf.in, 0xff, nanddev_page_size(nand)); + memset(req->oobbuf.in, 0xff, nanddev_per_page_oobsize(nand)); + ret = 0; + + goto out; + } + + mtk_ecc_get_stats(eng->ecc, &stats, eng->nsteps); + mtd->ecc_stats.corrected += stats.corrected; + mtd->ecc_stats.failed += stats.failed; + + /* + * Return -EBADMSG when exit uncorrect ECC error. + * Otherwise, return the bitflips. + */ + if (stats.failed) + ret = -EBADMSG; + else + ret = stats.bitflips; + + memset(eng->bounce_oob_buf, 0xff, nanddev_per_page_oobsize(nand)); + mtk_ecc_oob_free_shift(nand, eng->bounce_oob_buf, req->oobbuf.in, false); + eng->bbm_ctl.bbm_swap(nand, req->databuf.in, eng->bounce_oob_buf); + + if (req->ooblen) { + if (req->mode == MTD_OPS_AUTO_OOB) + ret = mtd_ooblayout_get_databytes(mtd, + req->oobbuf.in, + eng->bounce_oob_buf, + req->ooboffs, + mtd->oobavail); + else + memcpy(req->oobbuf.in, + eng->bounce_oob_buf + req->ooboffs, + req->ooblen); + } + +out: + mtk_ecc_disable(eng->ecc); + nand_ecc_restore_req(&eng->req_ctx, req); + + return ret; +} + +int mtk_ecc_init_ctx_pipelined(struct nand_device *nand) +{ + struct nand_ecc_props *conf = &nand->ecc.ctx.conf; + struct mtd_info *mtd = nanddev_to_mtd(nand); + struct mtk_ecc_engine *eng; + struct device *dev; + int free, ret; + + /* + * In the case of a pipelined engine, the device registering the ECC + * engine is not the actual ECC engine device but the host controller. + */ + dev = mtk_ecc_get_engine_dev(nand->ecc.engine->dev); + if (!dev) + return -EINVAL; + + eng = devm_kzalloc(dev, sizeof(*eng), GFP_KERNEL); + if (!eng) + return -ENOMEM; + + nand->ecc.ctx.priv = eng; + nand->ecc.engine->priv = eng; + + eng->ecc = dev_get_drvdata(dev); + + mtk_ecc_set_section_size_and_strength(nand); + + ret = mtk_ecc_set_spare_per_section(nand); + if (ret) + return ret; + + clk_prepare_enable(eng->ecc->clk); + mtk_ecc_hw_init(eng->ecc); + + /* Calculate OOB free bytes except ECC parity data */ + free = (conf->strength * mtk_ecc_get_parity_bits(eng->ecc) + + 7) >> 3; + free = eng->oob_per_section - free; + + /* + * Enhance ECC strength if OOB left is bigger than max FDM size + * or reduce ECC strength if OOB size is not enough for ECC + * parity data. + */ + if (free > OOB_FREE_MAX_SIZE) + eng->oob_ecc = eng->oob_per_section - OOB_FREE_MAX_SIZE; + else if (free < 0) + eng->oob_ecc = eng->oob_per_section - OOB_FREE_MIN_SIZE; + + /* Calculate and adjust ECC strenth based on OOB ECC bytes */ + conf->strength = (eng->oob_ecc << 3) / + mtk_ecc_get_parity_bits(eng->ecc); + mtk_ecc_adjust_strength(eng->ecc, &conf->strength); + + eng->oob_ecc = DIV_ROUND_UP(conf->strength * + mtk_ecc_get_parity_bits(eng->ecc), 8); + + eng->oob_free = eng->oob_per_section - eng->oob_ecc; + if (eng->oob_free > OOB_FREE_MAX_SIZE) + eng->oob_free = OOB_FREE_MAX_SIZE; + + eng->oob_free_protected = OOB_FREE_MIN_SIZE; + + eng->oob_ecc = eng->oob_per_section - eng->oob_free; + + if (!mtd->ooblayout) + mtd_set_ooblayout(mtd, mtk_ecc_get_ooblayout()); + + ret = nand_ecc_init_req_tweaking(&eng->req_ctx, nand); + if (ret) + return ret; + + eng->src_page_buf = kmalloc(nanddev_page_size(nand) + + nanddev_per_page_oobsize(nand), GFP_KERNEL); + eng->bounce_page_buf = kmalloc(nanddev_page_size(nand) + + nanddev_per_page_oobsize(nand), GFP_KERNEL); + if (!eng->src_page_buf || !eng->bounce_page_buf) { + ret = -ENOMEM; + goto cleanup_req_tweak; + } + + eng->src_oob_buf = eng->src_page_buf + nanddev_page_size(nand); + eng->bounce_oob_buf = eng->bounce_page_buf + nanddev_page_size(nand); + + mtk_ecc_set_bbm_ctl(&eng->bbm_ctl, nand); + eng->ecc_cfg.strength = conf->strength; + eng->ecc_cfg.len = conf->step_size + eng->oob_free_protected; + mtd->bitflip_threshold = conf->strength; + + return 0; + +cleanup_req_tweak: + nand_ecc_cleanup_req_tweaking(&eng->req_ctx); + + return ret; +} + +void mtk_ecc_cleanup_ctx_pipelined(struct nand_device *nand) +{ + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand); + + if (eng) { + nand_ecc_cleanup_req_tweaking(&eng->req_ctx); + kfree(eng->src_page_buf); + kfree(eng->bounce_page_buf); + } +} + +/* + * The MTK ECC engine work at pipelined situation, + * will be registered by the drivers that wrap it. + */ +static struct nand_ecc_engine_ops mtk_ecc_engine_pipelined_ops = { + .init_ctx = mtk_ecc_init_ctx_pipelined, + .cleanup_ctx = mtk_ecc_cleanup_ctx_pipelined, + .prepare_io_req = mtk_ecc_prepare_io_req_pipelined, + .finish_io_req = mtk_ecc_finish_io_req_pipelined, +}; + +struct nand_ecc_engine_ops *mtk_ecc_get_pipelined_ops(void) +{ + return &mtk_ecc_engine_pipelined_ops; +} +EXPORT_SYMBOL(mtk_ecc_get_pipelined_ops); + static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = { .err_mask = 0x3f, .ecc_strength = ecc_strength_mt2701, @@ -472,6 +1083,9 @@ static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = { .ecc_strength = ecc_strength_mt7622, .ecc_regs = mt7622_ecc_regs, .num_ecc_strength = 7, + .spare_size = spare_size_mt7622, + .num_spare_size = 19, + .max_section_size = 1024, .ecc_mode_shift = 4, .parity_bits = 13, .pg_irq_sel = 0, diff --git a/include/linux/mtd/nand-ecc-mtk.h b/include/linux/mtd/nand-ecc-mtk.h index 0e48c36e6ca0..6d550032cbd9 100644 --- a/include/linux/mtd/nand-ecc-mtk.h +++ b/include/linux/mtd/nand-ecc-mtk.h @@ -33,6 +33,61 @@ struct mtk_ecc_config { u32 len; }; +/** + * struct mtk_ecc_bbm_ctl - Information relative to the BBM swap + * @bbm_swap: BBM swap function + * @section: Section number in data area for swap + * @position: Position in @section for swap with BBM + */ +struct mtk_ecc_bbm_ctl { + void (*bbm_swap)(struct nand_device *nand, u8 *databuf, u8 *oobbuf); + u32 section; + u32 position; +}; + +/** + * struct mtk_ecc_engine - Information relative to the ECC + * @req_ctx: Save request context and tweak the original request to fit the + * engine needs + * @oob_per_section: OOB size for each section to store OOB free/ECC bytes + * @oob_per_section_idx: The index for @oob_per_section in spare size array + * @oob_ecc: OOB size for each section to store the ECC parity + * @oob_free: OOB size for each section to store the OOB free bytes + * @oob_free_protected: OOB free bytes will be protected by the ECC engine + * @section_size: The size of each section + * @read_empty: Indicate whether empty page for one read operation + * @nsteps: The number of the sections + * @src_page_buf: Buffer used to store source data buffer when write + * @src_oob_buf: Buffer used to store source OOB buffer when write + * @bounce_page_buf: Data bounce buffer + * @bounce_oob_buf: OOB bounce buffer + * @ecc: The ECC engine private data structure + * @ecc_cfg: The configuration of each ECC operation + * @bbm_ctl: Information relative to the BBM swap + */ +struct mtk_ecc_engine { + struct nand_ecc_req_tweak_ctx req_ctx; + + u32 oob_per_section; + u32 oob_per_section_idx; + u32 oob_ecc; + u32 oob_free; + u32 oob_free_protected; + u32 section_size; + + bool read_empty; + u32 nsteps; + + u8 *src_page_buf; + u8 *src_oob_buf; + u8 *bounce_page_buf; + u8 *bounce_oob_buf; + + struct mtk_ecc *ecc; + struct mtk_ecc_config ecc_cfg; + struct mtk_ecc_bbm_ctl bbm_ctl; +}; + int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32); void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int); int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation); @@ -44,4 +99,17 @@ unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc); struct mtk_ecc *of_mtk_ecc_get(struct device_node *); void mtk_ecc_release(struct mtk_ecc *); +#if IS_ENABLED(CONFIG_MTD_NAND_ECC_MTK) + +struct nand_ecc_engine_ops *mtk_ecc_get_pipelined_ops(void); + +#else /* !CONFIG_MTD_NAND_ECC_MTK */ + +struct nand_ecc_engine_ops *mtk_ecc_get_pipelined_ops(void) +{ + return NULL; +} + +#endif /* CONFIG_MTD_NAND_ECC_MTK */ + #endif
Convert the Mediatek HW ECC engine to the ECC infrastructure with pipelined case. Signed-off-by: Xiangsheng Hou <xiangsheng.hou@mediatek.com> --- drivers/mtd/nand/ecc-mtk.c | 614 +++++++++++++++++++++++++++++++ include/linux/mtd/nand-ecc-mtk.h | 68 ++++ 2 files changed, 682 insertions(+)