From patchwork Fri Nov 30 11:43:58 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Igor Konopko X-Patchwork-Id: 10706183 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 4689D1057 for ; Fri, 30 Nov 2018 11:49:23 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 367782FFE7 for ; Fri, 30 Nov 2018 11:49:23 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 2B3B92FFEC; Fri, 30 Nov 2018 11:49:23 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-7.9 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id EBE772FFE7 for ; Fri, 30 Nov 2018 11:49:21 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726533AbeK3W6V (ORCPT ); Fri, 30 Nov 2018 17:58:21 -0500 Received: from mga03.intel.com ([134.134.136.65]:41003 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726512AbeK3W6V (ORCPT ); Fri, 30 Nov 2018 17:58:21 -0500 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by orsmga103.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 30 Nov 2018 03:49:20 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.56,298,1539673200"; d="scan'208";a="108549575" Received: from gklab-106-154.igk.intel.com ([10.102.106.154]) by FMSMGA003.fm.intel.com with ESMTP; 30 Nov 2018 03:49:19 -0800 From: Igor Konopko To: mb@lightnvm.io Cc: linux-block@vger.kernel.org, javier@cnexlabs.com, hans.holmberg@cnexlabs.com, igor.j.konopko@intel.com Subject: [PATCH v5 1/5] lightnvm: pblk: Move lba list to partial read context Date: Fri, 30 Nov 2018 12:43:58 +0100 Message-Id: <20181130114402.43793-2-igor.j.konopko@intel.com> X-Mailer: git-send-email 2.14.5 In-Reply-To: <20181130114402.43793-1-igor.j.konopko@intel.com> References: <20181130114402.43793-1-igor.j.konopko@intel.com> MIME-Version: 1.0 Sender: linux-block-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-block@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Currently DMA allocated memory is reused on partial read for lba_list_mem and lba_list_media arrays. In preparation for dynamic DMA pool sizes we need to move this arrays into pblk_pr_ctx structures. Reviewed-by: Javier González Signed-off-by: Igor Konopko --- drivers/lightnvm/pblk-read.c | 20 +++++--------------- drivers/lightnvm/pblk.h | 2 ++ 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c index 9fba614adeeb..19917d3c19b3 100644 --- a/drivers/lightnvm/pblk-read.c +++ b/drivers/lightnvm/pblk-read.c @@ -224,7 +224,6 @@ static void pblk_end_partial_read(struct nvm_rq *rqd) unsigned long *read_bitmap = pr_ctx->bitmap; int nr_secs = pr_ctx->orig_nr_secs; int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs); - __le64 *lba_list_mem, *lba_list_media; void *src_p, *dst_p; int hole, i; @@ -237,13 +236,9 @@ static void pblk_end_partial_read(struct nvm_rq *rqd) rqd->ppa_list[0] = ppa; } - /* Re-use allocated memory for intermediate lbas */ - lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size); - lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size); - for (i = 0; i < nr_secs; i++) { - lba_list_media[i] = meta_list[i].lba; - meta_list[i].lba = lba_list_mem[i]; + pr_ctx->lba_list_media[i] = meta_list[i].lba; + meta_list[i].lba = pr_ctx->lba_list_mem[i]; } /* Fill the holes in the original bio */ @@ -255,7 +250,7 @@ static void pblk_end_partial_read(struct nvm_rq *rqd) line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]); kref_put(&line->ref, pblk_line_put); - meta_list[hole].lba = lba_list_media[i]; + meta_list[hole].lba = pr_ctx->lba_list_media[i]; src_bv = new_bio->bi_io_vec[i++]; dst_bv = bio->bi_io_vec[bio_init_idx + hole]; @@ -295,13 +290,9 @@ static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd, struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd); struct pblk_pr_ctx *pr_ctx; struct bio *new_bio, *bio = r_ctx->private; - __le64 *lba_list_mem; int nr_secs = rqd->nr_ppas; int i; - /* Re-use allocated memory for intermediate lbas */ - lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size); - new_bio = bio_alloc(GFP_KERNEL, nr_holes); if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes)) @@ -312,12 +303,12 @@ static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd, goto fail_free_pages; } - pr_ctx = kmalloc(sizeof(struct pblk_pr_ctx), GFP_KERNEL); + pr_ctx = kzalloc(sizeof(struct pblk_pr_ctx), GFP_KERNEL); if (!pr_ctx) goto fail_free_pages; for (i = 0; i < nr_secs; i++) - lba_list_mem[i] = meta_list[i].lba; + pr_ctx->lba_list_mem[i] = meta_list[i].lba; new_bio->bi_iter.bi_sector = 0; /* internal bio */ bio_set_op_attrs(new_bio, REQ_OP_READ, 0); @@ -325,7 +316,6 @@ static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd, rqd->bio = new_bio; rqd->nr_ppas = nr_holes; - pr_ctx->ppa_ptr = NULL; pr_ctx->orig_bio = bio; bitmap_copy(pr_ctx->bitmap, read_bitmap, NVM_MAX_VLBA); pr_ctx->bio_init_idx = bio_init_idx; diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index e5b88a25d4d6..0e9d3960ac4c 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h @@ -132,6 +132,8 @@ struct pblk_pr_ctx { unsigned int bio_init_idx; void *ppa_ptr; dma_addr_t dma_ppa_list; + __le64 lba_list_mem[NVM_MAX_VLBA]; + __le64 lba_list_media[NVM_MAX_VLBA]; }; /* Pad context */ From patchwork Fri Nov 30 11:43:59 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Igor Konopko X-Patchwork-Id: 10706185 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id C44541057 for ; Fri, 30 Nov 2018 11:49:25 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id B3CCB2FFE7 for ; Fri, 30 Nov 2018 11:49:25 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id A7C462FFEC; Fri, 30 Nov 2018 11:49:25 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-7.9 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id B793C2FFE7 for ; Fri, 30 Nov 2018 11:49:24 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726546AbeK3W6Y (ORCPT ); Fri, 30 Nov 2018 17:58:24 -0500 Received: from mga03.intel.com ([134.134.136.65]:41003 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726512AbeK3W6Y (ORCPT ); Fri, 30 Nov 2018 17:58:24 -0500 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by orsmga103.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 30 Nov 2018 03:49:23 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.56,298,1539673200"; d="scan'208";a="108549584" Received: from gklab-106-154.igk.intel.com ([10.102.106.154]) by FMSMGA003.fm.intel.com with ESMTP; 30 Nov 2018 03:49:21 -0800 From: Igor Konopko To: mb@lightnvm.io Cc: linux-block@vger.kernel.org, javier@cnexlabs.com, hans.holmberg@cnexlabs.com, igor.j.konopko@intel.com Subject: [PATCH v5 2/5] lightnvm: pblk: Helpers for OOB metadata Date: Fri, 30 Nov 2018 12:43:59 +0100 Message-Id: <20181130114402.43793-3-igor.j.konopko@intel.com> X-Mailer: git-send-email 2.14.5 In-Reply-To: <20181130114402.43793-1-igor.j.konopko@intel.com> References: <20181130114402.43793-1-igor.j.konopko@intel.com> MIME-Version: 1.0 Sender: linux-block-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-block@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Currently pblk assumes that size of OOB metadata on drive is always equal to size of pblk_sec_meta struct. This commit add helpers which will allow to handle different sizes of OOB metadata on drive in the future. Still, after this patch only OOB metadata equal to 16 bytes is supported. Reviewed-by: Javier González Signed-off-by: Igor Konopko --- drivers/lightnvm/pblk-core.c | 5 +++-- drivers/lightnvm/pblk-init.c | 6 +++++ drivers/lightnvm/pblk-map.c | 20 +++++++++++------ drivers/lightnvm/pblk-read.c | 48 ++++++++++++++++++++++++++-------------- drivers/lightnvm/pblk-recovery.c | 16 +++++++++----- drivers/lightnvm/pblk.h | 6 +++++ 6 files changed, 69 insertions(+), 32 deletions(-) diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index f1b411e7c7c9..e732b2d12a23 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -796,10 +796,11 @@ static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line, rqd.is_seq = 1; for (i = 0; i < lm->smeta_sec; i++, paddr++) { - struct pblk_sec_meta *meta_list = rqd.meta_list; + struct pblk_sec_meta *meta = pblk_get_meta(pblk, + rqd.meta_list, i); rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id); - meta_list[i].lba = lba_list[paddr] = addr_empty; + meta->lba = lba_list[paddr] = addr_empty; } ret = pblk_submit_io_sync_sem(pblk, &rqd); diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 72ad3e70318c..33361bfb85c3 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -405,6 +405,12 @@ static int pblk_core_init(struct pblk *pblk) queue_max_hw_sectors(dev->q) / (geo->csecs >> SECTOR_SHIFT)); pblk_set_sec_per_write(pblk, pblk->min_write_pgs); + pblk->oob_meta_size = geo->sos; + if (pblk->oob_meta_size != sizeof(struct pblk_sec_meta)) { + pblk_err(pblk, "Unsupported metadata size\n"); + return -EINVAL; + } + pblk->pad_dist = kcalloc(pblk->min_write_pgs - 1, sizeof(atomic64_t), GFP_KERNEL); if (!pblk->pad_dist) diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c index 5a3c28cce8ab..81e503ec384e 100644 --- a/drivers/lightnvm/pblk-map.c +++ b/drivers/lightnvm/pblk-map.c @@ -22,7 +22,7 @@ static int pblk_map_page_data(struct pblk *pblk, unsigned int sentry, struct ppa_addr *ppa_list, unsigned long *lun_bitmap, - struct pblk_sec_meta *meta_list, + void *meta_list, unsigned int valid_secs) { struct pblk_line *line = pblk_line_get_data(pblk); @@ -58,6 +58,7 @@ static int pblk_map_page_data(struct pblk *pblk, unsigned int sentry, paddr = pblk_alloc_page(pblk, line, nr_secs); for (i = 0; i < nr_secs; i++, paddr++) { + struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i); __le64 addr_empty = cpu_to_le64(ADDR_EMPTY); /* ppa to be sent to the device */ @@ -74,14 +75,15 @@ static int pblk_map_page_data(struct pblk *pblk, unsigned int sentry, kref_get(&line->ref); w_ctx = pblk_rb_w_ctx(&pblk->rwb, sentry + i); w_ctx->ppa = ppa_list[i]; - meta_list[i].lba = cpu_to_le64(w_ctx->lba); + meta->lba = cpu_to_le64(w_ctx->lba); lba_list[paddr] = cpu_to_le64(w_ctx->lba); if (lba_list[paddr] != addr_empty) line->nr_valid_lbas++; else atomic64_inc(&pblk->pad_wa); } else { - lba_list[paddr] = meta_list[i].lba = addr_empty; + lba_list[paddr] = addr_empty; + meta->lba = addr_empty; __pblk_map_invalidate(pblk, line, paddr); } } @@ -94,7 +96,8 @@ int pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry, unsigned long *lun_bitmap, unsigned int valid_secs, unsigned int off) { - struct pblk_sec_meta *meta_list = rqd->meta_list; + void *meta_list = rqd->meta_list; + void *meta_buffer; struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); unsigned int map_secs; int min = pblk->min_write_pgs; @@ -103,9 +106,10 @@ int pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry, for (i = off; i < rqd->nr_ppas; i += min) { map_secs = (i + min > valid_secs) ? (valid_secs % min) : min; + meta_buffer = pblk_get_meta(pblk, meta_list, i); ret = pblk_map_page_data(pblk, sentry + i, &ppa_list[i], - lun_bitmap, &meta_list[i], map_secs); + lun_bitmap, meta_buffer, map_secs); if (ret) return ret; } @@ -121,7 +125,8 @@ int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd, struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; struct pblk_line_meta *lm = &pblk->lm; - struct pblk_sec_meta *meta_list = rqd->meta_list; + void *meta_list = rqd->meta_list; + void *meta_buffer; struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); struct pblk_line *e_line, *d_line; unsigned int map_secs; @@ -132,9 +137,10 @@ int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd, for (i = 0; i < rqd->nr_ppas; i += min) { map_secs = (i + min > valid_secs) ? (valid_secs % min) : min; + meta_buffer = pblk_get_meta(pblk, meta_list, i); ret = pblk_map_page_data(pblk, sentry + i, &ppa_list[i], - lun_bitmap, &meta_list[i], map_secs); + lun_bitmap, meta_buffer, map_secs); if (ret) return ret; diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c index 19917d3c19b3..6becd85ca4c6 100644 --- a/drivers/lightnvm/pblk-read.c +++ b/drivers/lightnvm/pblk-read.c @@ -43,7 +43,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio, sector_t blba, unsigned long *read_bitmap) { - struct pblk_sec_meta *meta_list = rqd->meta_list; + void *meta_list = rqd->meta_list; struct ppa_addr ppas[NVM_MAX_VLBA]; int nr_secs = rqd->nr_ppas; bool advanced_bio = false; @@ -53,12 +53,15 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, for (i = 0; i < nr_secs; i++) { struct ppa_addr p = ppas[i]; + struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i); sector_t lba = blba + i; retry: if (pblk_ppa_empty(p)) { + __le64 addr_empty = cpu_to_le64(ADDR_EMPTY); + WARN_ON(test_and_set_bit(i, read_bitmap)); - meta_list[i].lba = cpu_to_le64(ADDR_EMPTY); + meta->lba = addr_empty; if (unlikely(!advanced_bio)) { bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE); @@ -78,7 +81,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, goto retry; } WARN_ON(test_and_set_bit(i, read_bitmap)); - meta_list[i].lba = cpu_to_le64(lba); + meta->lba = cpu_to_le64(lba); advanced_bio = true; #ifdef CONFIG_NVM_PBLK_DEBUG atomic_long_inc(&pblk->cache_reads); @@ -105,12 +108,13 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd, sector_t blba) { - struct pblk_sec_meta *meta_lba_list = rqd->meta_list; + void *meta_list = rqd->meta_list; int nr_lbas = rqd->nr_ppas; int i; for (i = 0; i < nr_lbas; i++) { - u64 lba = le64_to_cpu(meta_lba_list[i].lba); + struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i); + u64 lba = le64_to_cpu(meta->lba); if (lba == ADDR_EMPTY) continue; @@ -134,17 +138,19 @@ static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd, static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd, u64 *lba_list, int nr_lbas) { - struct pblk_sec_meta *meta_lba_list = rqd->meta_list; + void *meta_lba_list = rqd->meta_list; int i, j; for (i = 0, j = 0; i < nr_lbas; i++) { + struct pblk_sec_meta *meta = pblk_get_meta(pblk, + meta_lba_list, j); u64 lba = lba_list[i]; u64 meta_lba; if (lba == ADDR_EMPTY) continue; - meta_lba = le64_to_cpu(meta_lba_list[j].lba); + meta_lba = le64_to_cpu(meta->lba); if (lba != meta_lba) { #ifdef CONFIG_NVM_PBLK_DEBUG @@ -216,10 +222,11 @@ static void pblk_end_partial_read(struct nvm_rq *rqd) struct pblk *pblk = rqd->private; struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd); struct pblk_pr_ctx *pr_ctx = r_ctx->private; + struct pblk_sec_meta *meta; struct bio *new_bio = rqd->bio; struct bio *bio = pr_ctx->orig_bio; struct bio_vec src_bv, dst_bv; - struct pblk_sec_meta *meta_list = rqd->meta_list; + void *meta_list = rqd->meta_list; int bio_init_idx = pr_ctx->bio_init_idx; unsigned long *read_bitmap = pr_ctx->bitmap; int nr_secs = pr_ctx->orig_nr_secs; @@ -237,8 +244,9 @@ static void pblk_end_partial_read(struct nvm_rq *rqd) } for (i = 0; i < nr_secs; i++) { - pr_ctx->lba_list_media[i] = meta_list[i].lba; - meta_list[i].lba = pr_ctx->lba_list_mem[i]; + meta = pblk_get_meta(pblk, meta_list, i); + pr_ctx->lba_list_media[i] = le64_to_cpu(meta->lba); + meta->lba = cpu_to_le64(pr_ctx->lba_list_mem[i]); } /* Fill the holes in the original bio */ @@ -250,7 +258,8 @@ static void pblk_end_partial_read(struct nvm_rq *rqd) line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]); kref_put(&line->ref, pblk_line_put); - meta_list[hole].lba = pr_ctx->lba_list_media[i]; + meta = pblk_get_meta(pblk, meta_list, hole); + meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]); src_bv = new_bio->bi_io_vec[i++]; dst_bv = bio->bi_io_vec[bio_init_idx + hole]; @@ -286,7 +295,7 @@ static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd, unsigned long *read_bitmap, int nr_holes) { - struct pblk_sec_meta *meta_list = rqd->meta_list; + void *meta_list = rqd->meta_list; struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd); struct pblk_pr_ctx *pr_ctx; struct bio *new_bio, *bio = r_ctx->private; @@ -307,8 +316,11 @@ static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd, if (!pr_ctx) goto fail_free_pages; - for (i = 0; i < nr_secs; i++) - pr_ctx->lba_list_mem[i] = meta_list[i].lba; + for (i = 0; i < nr_secs; i++) { + struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i); + + pr_ctx->lba_list_mem[i] = le64_to_cpu(meta->lba); + } new_bio->bi_iter.bi_sector = 0; /* internal bio */ bio_set_op_attrs(new_bio, REQ_OP_READ, 0); @@ -373,7 +385,7 @@ static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd, static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio, sector_t lba, unsigned long *read_bitmap) { - struct pblk_sec_meta *meta_list = rqd->meta_list; + struct pblk_sec_meta *meta = pblk_get_meta(pblk, rqd->meta_list, 0); struct ppa_addr ppa; pblk_lookup_l2p_seq(pblk, &ppa, lba, 1); @@ -384,8 +396,10 @@ static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio, retry: if (pblk_ppa_empty(ppa)) { + __le64 addr_empty = cpu_to_le64(ADDR_EMPTY); + WARN_ON(test_and_set_bit(0, read_bitmap)); - meta_list[0].lba = cpu_to_le64(ADDR_EMPTY); + meta->lba = addr_empty; return; } @@ -399,7 +413,7 @@ static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio, } WARN_ON(test_and_set_bit(0, read_bitmap)); - meta_list[0].lba = cpu_to_le64(lba); + meta->lba = cpu_to_le64(lba); #ifdef CONFIG_NVM_PBLK_DEBUG atomic_long_inc(&pblk->cache_reads); diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c index 4c726506a831..e4dd634ba05f 100644 --- a/drivers/lightnvm/pblk-recovery.c +++ b/drivers/lightnvm/pblk-recovery.c @@ -127,7 +127,7 @@ static u64 pblk_sec_in_open_line(struct pblk *pblk, struct pblk_line *line) struct pblk_recov_alloc { struct ppa_addr *ppa_list; - struct pblk_sec_meta *meta_list; + void *meta_list; struct nvm_rq *rqd; void *data; dma_addr_t dma_ppa_list; @@ -161,7 +161,7 @@ static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line, { struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; - struct pblk_sec_meta *meta_list; + void *meta_list; struct pblk_pad_rq *pad_rq; struct nvm_rq *rqd; struct bio *bio; @@ -240,12 +240,15 @@ static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line, for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++) { struct ppa_addr dev_ppa; + struct pblk_sec_meta *meta; __le64 addr_empty = cpu_to_le64(ADDR_EMPTY); dev_ppa = addr_to_gen_ppa(pblk, w_ptr, line->id); pblk_map_invalidate(pblk, dev_ppa); - lba_list[w_ptr] = meta_list[i].lba = addr_empty; + lba_list[w_ptr] = addr_empty; + meta = pblk_get_meta(pblk, meta_list, i); + meta->lba = addr_empty; rqd->ppa_list[i] = dev_ppa; } } @@ -340,7 +343,7 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line, struct pblk_line_meta *lm = &pblk->lm; struct nvm_geo *geo = &dev->geo; struct ppa_addr *ppa_list; - struct pblk_sec_meta *meta_list; + void *meta_list; struct nvm_rq *rqd; struct bio *bio; void *data; @@ -438,7 +441,8 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line, } for (i = 0; i < rqd->nr_ppas; i++) { - u64 lba = le64_to_cpu(meta_list[i].lba); + struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i); + u64 lba = le64_to_cpu(meta->lba); lba_list[paddr++] = cpu_to_le64(lba); @@ -467,7 +471,7 @@ static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line) struct nvm_geo *geo = &dev->geo; struct nvm_rq *rqd; struct ppa_addr *ppa_list; - struct pblk_sec_meta *meta_list; + void *meta_list; struct pblk_recov_alloc p; void *data; dma_addr_t dma_ppa_list, dma_meta_list; diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index 0e9d3960ac4c..80f356688803 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h @@ -634,6 +634,7 @@ struct pblk { int min_write_pgs; /* Minimum amount of pages required by controller */ int max_write_pgs; /* Maximum amount of pages supported by controller */ + int oob_meta_size; /* Size of OOB sector metadata */ sector_t capacity; /* Device capacity when bad blocks are subtracted */ @@ -1380,6 +1381,11 @@ static inline unsigned int pblk_get_min_chks(struct pblk *pblk) */ return DIV_ROUND_UP(100, pblk->op) * lm->blk_per_line; +} +static inline struct pblk_sec_meta *pblk_get_meta(struct pblk *pblk, + void *meta, int index) +{ + return meta + pblk->oob_meta_size * index; } #endif /* PBLK_H_ */ From patchwork Fri Nov 30 11:44:00 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Igor Konopko X-Patchwork-Id: 10706187 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 969C514BD for ; Fri, 30 Nov 2018 11:49:27 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 860F92FFE7 for ; Fri, 30 Nov 2018 11:49:27 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 79A2F2FFEC; Fri, 30 Nov 2018 11:49:27 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-7.9 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id EA6C12FFE7 for ; Fri, 30 Nov 2018 11:49:26 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726557AbeK3W60 (ORCPT ); Fri, 30 Nov 2018 17:58:26 -0500 Received: from mga03.intel.com ([134.134.136.65]:41003 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726512AbeK3W60 (ORCPT ); Fri, 30 Nov 2018 17:58:26 -0500 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by orsmga103.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 30 Nov 2018 03:49:25 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.56,298,1539673200"; d="scan'208";a="108549591" Received: from gklab-106-154.igk.intel.com ([10.102.106.154]) by FMSMGA003.fm.intel.com with ESMTP; 30 Nov 2018 03:49:24 -0800 From: Igor Konopko To: mb@lightnvm.io Cc: linux-block@vger.kernel.org, javier@cnexlabs.com, hans.holmberg@cnexlabs.com, igor.j.konopko@intel.com Subject: [PATCH v5 3/5] lightnvm: Flexible DMA pool entry size Date: Fri, 30 Nov 2018 12:44:00 +0100 Message-Id: <20181130114402.43793-4-igor.j.konopko@intel.com> X-Mailer: git-send-email 2.14.5 In-Reply-To: <20181130114402.43793-1-igor.j.konopko@intel.com> References: <20181130114402.43793-1-igor.j.konopko@intel.com> MIME-Version: 1.0 Sender: linux-block-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-block@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Currently whole lightnvm and pblk uses single DMA pool, for which entry size is always equal to PAGE_SIZE. PPA list always needs 8B*64, so there is only 56B*64 space for OOB meta. Since NVMe OOB meta can be bigger, such as 128B, this solution is not robustness. This patch add the possiblity to support OOB meta above 56b by changing DMA pool size based on OOB meta size. It also allows pblk to use OOB metadata >=16B. Reviewed-by: Javier González Signed-off-by: Igor Konopko --- drivers/lightnvm/core.c | 9 +++++++-- drivers/lightnvm/pblk-core.c | 8 ++++---- drivers/lightnvm/pblk-init.c | 2 +- drivers/lightnvm/pblk-recovery.c | 4 ++-- drivers/lightnvm/pblk.h | 6 +++++- drivers/nvme/host/lightnvm.c | 5 +++-- include/linux/lightnvm.h | 2 +- 7 files changed, 23 insertions(+), 13 deletions(-) diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 69b841d682c7..5f82036fe322 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -1140,7 +1140,7 @@ EXPORT_SYMBOL(nvm_alloc_dev); int nvm_register(struct nvm_dev *dev) { - int ret; + int ret, exp_pool_size; if (!dev->q || !dev->ops) return -EINVAL; @@ -1149,7 +1149,12 @@ int nvm_register(struct nvm_dev *dev) if (ret) return ret; - dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist"); + exp_pool_size = max_t(int, PAGE_SIZE, + (NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos))); + exp_pool_size = round_up(exp_pool_size, PAGE_SIZE); + + dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist", + exp_pool_size); if (!dev->dma_pool) { pr_err("nvm: could not create dma pool\n"); nvm_free(dev); diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index e732b2d12a23..7e3397f8ead1 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -250,8 +250,8 @@ int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd) if (rqd->nr_ppas == 1) return 0; - rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size; - rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size; + rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size(pblk); + rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size(pblk); return 0; } @@ -846,8 +846,8 @@ int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line, if (!meta_list) return -ENOMEM; - ppa_list = meta_list + pblk_dma_meta_size; - dma_ppa_list = dma_meta_list + pblk_dma_meta_size; + ppa_list = meta_list + pblk_dma_meta_size(pblk); + dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk); next_rq: memset(&rqd, 0, sizeof(struct nvm_rq)); diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 33361bfb85c3..ff6a6df369c3 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -406,7 +406,7 @@ static int pblk_core_init(struct pblk *pblk) pblk_set_sec_per_write(pblk, pblk->min_write_pgs); pblk->oob_meta_size = geo->sos; - if (pblk->oob_meta_size != sizeof(struct pblk_sec_meta)) { + if (pblk->oob_meta_size < sizeof(struct pblk_sec_meta)) { pblk_err(pblk, "Unsupported metadata size\n"); return -EINVAL; } diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c index e4dd634ba05f..3a775d10f616 100644 --- a/drivers/lightnvm/pblk-recovery.c +++ b/drivers/lightnvm/pblk-recovery.c @@ -481,8 +481,8 @@ static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line) if (!meta_list) return -ENOMEM; - ppa_list = (void *)(meta_list) + pblk_dma_meta_size; - dma_ppa_list = dma_meta_list + pblk_dma_meta_size; + ppa_list = (void *)(meta_list) + pblk_dma_meta_size(pblk); + dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk); data = kcalloc(pblk->max_write_pgs, geo->csecs, GFP_KERNEL); if (!data) { diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index 80f356688803..9087d53d5c25 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h @@ -104,7 +104,6 @@ enum { PBLK_RL_LOW = 4 }; -#define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * NVM_MAX_VLBA) #define pblk_dma_ppa_size (sizeof(u64) * NVM_MAX_VLBA) /* write buffer completion context */ @@ -1388,4 +1387,9 @@ static inline struct pblk_sec_meta *pblk_get_meta(struct pblk *pblk, { return meta + pblk->oob_meta_size * index; } + +static inline int pblk_dma_meta_size(struct pblk *pblk) +{ + return pblk->oob_meta_size * NVM_MAX_VLBA; +} #endif /* PBLK_H_ */ diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 51d957ccf328..ba268d7cf141 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -732,11 +732,12 @@ static int nvme_nvm_submit_io_sync(struct nvm_dev *dev, struct nvm_rq *rqd) return ret; } -static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name) +static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name, + int size) { struct nvme_ns *ns = nvmdev->q->queuedata; - return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0); + return dma_pool_create(name, ns->ctrl->dev, size, PAGE_SIZE, 0); } static void nvme_nvm_destroy_dma_pool(void *pool) diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 2fdeac1a420d..7afedaddbd15 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -90,7 +90,7 @@ typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int, struct nvm_chk_meta *); typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *); -typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *); +typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int); typedef void (nvm_destroy_dma_pool_fn)(void *); typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t, dma_addr_t *); From patchwork Fri Nov 30 11:44:01 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Igor Konopko X-Patchwork-Id: 10706189 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 7797814BD for ; Fri, 30 Nov 2018 11:49:29 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 65D612FFE7 for ; Fri, 30 Nov 2018 11:49:29 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 5A5392FFEC; Fri, 30 Nov 2018 11:49:29 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-7.9 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 0305E2FFE7 for ; Fri, 30 Nov 2018 11:49:29 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726618AbeK3W63 (ORCPT ); Fri, 30 Nov 2018 17:58:29 -0500 Received: from mga03.intel.com ([134.134.136.65]:41003 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726512AbeK3W62 (ORCPT ); Fri, 30 Nov 2018 17:58:28 -0500 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by orsmga103.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 30 Nov 2018 03:49:28 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.56,298,1539673200"; d="scan'208";a="108549597" Received: from gklab-106-154.igk.intel.com ([10.102.106.154]) by FMSMGA003.fm.intel.com with ESMTP; 30 Nov 2018 03:49:26 -0800 From: Igor Konopko To: mb@lightnvm.io Cc: linux-block@vger.kernel.org, javier@cnexlabs.com, hans.holmberg@cnexlabs.com, igor.j.konopko@intel.com Subject: [PATCH v5 4/5] lightnvm: Disable interleaved metadata Date: Fri, 30 Nov 2018 12:44:01 +0100 Message-Id: <20181130114402.43793-5-igor.j.konopko@intel.com> X-Mailer: git-send-email 2.14.5 In-Reply-To: <20181130114402.43793-1-igor.j.konopko@intel.com> References: <20181130114402.43793-1-igor.j.konopko@intel.com> MIME-Version: 1.0 Sender: linux-block-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-block@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Currently pblk and lightnvm does only check for size of OOB metadata and does not care wheather this meta is located in separate buffer or is interleaved with data in single buffer. In reality only the first scenario is supported, where second mode will break pblk functionality during any IO operation. The goal of this patch is to block creation of pblk devices in case of interleaved metadata Reviewed-by: Javier González Signed-off-by: Igor Konopko --- drivers/lightnvm/pblk-init.c | 6 ++++++ drivers/nvme/host/lightnvm.c | 1 + include/linux/lightnvm.h | 1 + 3 files changed, 8 insertions(+) diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index ff6a6df369c3..e8055b796381 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -1175,6 +1175,12 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, return ERR_PTR(-EINVAL); } + if (geo->ext) { + pblk_err(pblk, "extended metadata not supported\n"); + kfree(pblk); + return ERR_PTR(-EINVAL); + } + spin_lock_init(&pblk->resubmit_lock); spin_lock_init(&pblk->trans_lock); spin_lock_init(&pblk->lock); diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index ba268d7cf141..f145fc0220d6 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -990,6 +990,7 @@ int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node) geo = &dev->geo; geo->csecs = 1 << ns->lba_shift; geo->sos = ns->ms; + geo->ext = ns->ext; dev->q = q; memcpy(dev->name, disk_name, DISK_NAME_LEN); diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 7afedaddbd15..5d865a5d5cdc 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -357,6 +357,7 @@ struct nvm_geo { u32 clba; /* sectors per chunk */ u16 csecs; /* sector size */ u16 sos; /* out-of-band area size */ + bool ext; /* metadata in extended data buffer */ /* device write constrains */ u32 ws_min; /* minimum write size */ From patchwork Fri Nov 30 11:44:02 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Igor Konopko X-Patchwork-Id: 10706191 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id A93E41057 for ; Fri, 30 Nov 2018 11:49:32 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 972692FFE7 for ; Fri, 30 Nov 2018 11:49:32 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 8BAE12FFEC; Fri, 30 Nov 2018 11:49:32 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-7.9 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 8B6D92FFE7 for ; Fri, 30 Nov 2018 11:49:31 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726660AbeK3W6b (ORCPT ); Fri, 30 Nov 2018 17:58:31 -0500 Received: from mga03.intel.com ([134.134.136.65]:41003 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726631AbeK3W6b (ORCPT ); Fri, 30 Nov 2018 17:58:31 -0500 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by orsmga103.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 30 Nov 2018 03:49:30 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.56,298,1539673200"; d="scan'208";a="108549604" Received: from gklab-106-154.igk.intel.com ([10.102.106.154]) by FMSMGA003.fm.intel.com with ESMTP; 30 Nov 2018 03:49:29 -0800 From: Igor Konopko To: mb@lightnvm.io Cc: linux-block@vger.kernel.org, javier@cnexlabs.com, hans.holmberg@cnexlabs.com, igor.j.konopko@intel.com Subject: [PATCH v5 5/5] lightnvm: pblk: Support for packed metadata Date: Fri, 30 Nov 2018 12:44:02 +0100 Message-Id: <20181130114402.43793-6-igor.j.konopko@intel.com> X-Mailer: git-send-email 2.14.5 In-Reply-To: <20181130114402.43793-1-igor.j.konopko@intel.com> References: <20181130114402.43793-1-igor.j.konopko@intel.com> MIME-Version: 1.0 Sender: linux-block-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-block@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP In current pblk implementation, l2p mapping for not closed lines is always stored only in OOB metadata and recovered from it. Such a solution does not provide data integrity when drives does not have such a OOB metadata space. The goal of this patch is to add support for so called packed metadata, which store l2p mapping for open lines in last sector of every write unit. After this set of changes, drives with OOB size <16B will use packed metadata, when >=16B will continue to use OOB metadata. Reviewed-by: Javier González Signed-off-by: Igor Konopko --- drivers/lightnvm/pblk-core.c | 48 ++++++++++++++++++++++++++++++++++++---- drivers/lightnvm/pblk-init.c | 38 ++++++++++++++++++++++++++----- drivers/lightnvm/pblk-map.c | 4 ++-- drivers/lightnvm/pblk-rb.c | 3 +++ drivers/lightnvm/pblk-read.c | 6 +++++ drivers/lightnvm/pblk-recovery.c | 5 +++-- drivers/lightnvm/pblk-sysfs.c | 7 ++++++ drivers/lightnvm/pblk-write.c | 9 ++++---- drivers/lightnvm/pblk.h | 10 ++++++++- 9 files changed, 112 insertions(+), 18 deletions(-) diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index 7e3397f8ead1..1ff165351180 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -376,7 +376,7 @@ void pblk_write_should_kick(struct pblk *pblk) { unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb); - if (secs_avail >= pblk->min_write_pgs) + if (secs_avail >= pblk->min_write_pgs_data) pblk_write_kick(pblk); } @@ -407,7 +407,9 @@ struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line) struct pblk_line_meta *lm = &pblk->lm; struct pblk_line_mgmt *l_mg = &pblk->l_mg; struct list_head *move_list = NULL; - int vsc = le32_to_cpu(*line->vsc); + int packed_meta = (le32_to_cpu(*line->vsc) / pblk->min_write_pgs_data) + * (pblk->min_write_pgs - pblk->min_write_pgs_data); + int vsc = le32_to_cpu(*line->vsc) + packed_meta; lockdep_assert_held(&line->lock); @@ -620,12 +622,15 @@ struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data, } int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail, - unsigned long secs_to_flush) + unsigned long secs_to_flush, bool skip_meta) { int max = pblk->sec_per_write; int min = pblk->min_write_pgs; int secs_to_sync = 0; + if (skip_meta && pblk->min_write_pgs_data != pblk->min_write_pgs) + min = max = pblk->min_write_pgs_data; + if (secs_avail >= max) secs_to_sync = max; else if (secs_avail >= min) @@ -852,7 +857,7 @@ int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line, next_rq: memset(&rqd, 0, sizeof(struct nvm_rq)); - rq_ppas = pblk_calc_secs(pblk, left_ppas, 0); + rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false); rq_len = rq_ppas * geo->csecs; bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len, @@ -2169,3 +2174,38 @@ void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas, } spin_unlock(&pblk->trans_lock); } + +void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd) +{ + void *buffer; + + if (pblk_is_oob_meta_supported(pblk)) { + /* Just use OOB metadata buffer as always */ + buffer = rqd->meta_list; + } else { + /* We need to reuse last page of request (packed metadata) + * in similar way as traditional oob metadata + */ + buffer = page_to_virt( + rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page); + } + + return buffer; +} + +void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd) +{ + void *meta_list = rqd->meta_list; + void *page; + int i = 0; + + if (pblk_is_oob_meta_supported(pblk)) + return; + + page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page); + /* We need to fill oob meta buffer with data from packed metadata */ + for (; i < rqd->nr_ppas; i++) + memcpy(pblk_get_meta(pblk, meta_list, i), + page + (i * sizeof(struct pblk_sec_meta)), + sizeof(struct pblk_sec_meta)); +} diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index e8055b796381..f9a3e47b6a93 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -399,6 +399,7 @@ static int pblk_core_init(struct pblk *pblk) pblk->nr_flush_rst = 0; pblk->min_write_pgs = geo->ws_opt; + pblk->min_write_pgs_data = pblk->min_write_pgs; max_write_ppas = pblk->min_write_pgs * geo->all_luns; pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA); pblk->max_write_pgs = min_t(int, pblk->max_write_pgs, @@ -406,9 +407,35 @@ static int pblk_core_init(struct pblk *pblk) pblk_set_sec_per_write(pblk, pblk->min_write_pgs); pblk->oob_meta_size = geo->sos; - if (pblk->oob_meta_size < sizeof(struct pblk_sec_meta)) { - pblk_err(pblk, "Unsupported metadata size\n"); - return -EINVAL; + if (!pblk_is_oob_meta_supported(pblk)) { + /* For drives which does not have OOB metadata feature + * in order to support recovery feature we need to use + * so called packed metadata. Packed metada will store + * the same information as OOB metadata (l2p table mapping, + * but in the form of the single page at the end of + * every write request. + */ + if (pblk->min_write_pgs + * sizeof(struct pblk_sec_meta) > PAGE_SIZE) { + /* We want to keep all the packed metadata on single + * page per write requests. So we need to ensure that + * it will fit. + * + * This is more like sanity check, since there is + * no device with such a big minimal write size + * (above 1 metabytes). + */ + pblk_err(pblk, "Not supported min write size\n"); + return -EINVAL; + } + /* For packed meta approach we do some simplification. + * On read path we always issue requests which size + * equal to max_write_pgs, with all pages filled with + * user payload except of last one page which will be + * filled with packed metadata. + */ + pblk->max_write_pgs = pblk->min_write_pgs; + pblk->min_write_pgs_data = pblk->min_write_pgs - 1; } pblk->pad_dist = kcalloc(pblk->min_write_pgs - 1, sizeof(atomic64_t), @@ -641,7 +668,7 @@ static int pblk_set_provision(struct pblk *pblk, int nr_free_chks) struct pblk_line_meta *lm = &pblk->lm; struct nvm_geo *geo = &dev->geo; sector_t provisioned; - int sec_meta, blk_meta; + int sec_meta, blk_meta, clba; int minimum; if (geo->op == NVM_TARGET_DEFAULT_OP) @@ -682,7 +709,8 @@ static int pblk_set_provision(struct pblk *pblk, int nr_free_chks) sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines; blk_meta = DIV_ROUND_UP(sec_meta, geo->clba); - pblk->capacity = (provisioned - blk_meta) * geo->clba; + clba = (geo->clba / pblk->min_write_pgs) * pblk->min_write_pgs_data; + pblk->capacity = (provisioned - blk_meta) * clba; atomic_set(&pblk->rl.free_blocks, nr_free_chks); atomic_set(&pblk->rl.free_user_blocks, nr_free_chks); diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c index 81e503ec384e..79df583ea709 100644 --- a/drivers/lightnvm/pblk-map.c +++ b/drivers/lightnvm/pblk-map.c @@ -96,7 +96,7 @@ int pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry, unsigned long *lun_bitmap, unsigned int valid_secs, unsigned int off) { - void *meta_list = rqd->meta_list; + void *meta_list = pblk_get_meta_for_writes(pblk, rqd); void *meta_buffer; struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); unsigned int map_secs; @@ -125,7 +125,7 @@ int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd, struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; struct pblk_line_meta *lm = &pblk->lm; - void *meta_list = rqd->meta_list; + void *meta_list = pblk_get_meta_for_writes(pblk, rqd); void *meta_buffer; struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); struct pblk_line *e_line, *d_line; diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c index 9f7fa0fe9c77..d4ca8c64ee0f 100644 --- a/drivers/lightnvm/pblk-rb.c +++ b/drivers/lightnvm/pblk-rb.c @@ -552,6 +552,9 @@ unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd, to_read = count; } + /* Add space for packed metadata if in use*/ + pad += (pblk->min_write_pgs - pblk->min_write_pgs_data); + c_ctx->sentry = pos; c_ctx->nr_valid = to_read; c_ctx->nr_padded = pad; diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c index 6becd85ca4c6..3789185144da 100644 --- a/drivers/lightnvm/pblk-read.c +++ b/drivers/lightnvm/pblk-read.c @@ -112,6 +112,9 @@ static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd, int nr_lbas = rqd->nr_ppas; int i; + if (!pblk_is_oob_meta_supported(pblk)) + return; + for (i = 0; i < nr_lbas; i++) { struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i); u64 lba = le64_to_cpu(meta->lba); @@ -141,6 +144,9 @@ static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd, void *meta_lba_list = rqd->meta_list; int i, j; + if (!pblk_is_oob_meta_supported(pblk)) + return; + for (i = 0, j = 0; i < nr_lbas; i++) { struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_lba_list, j); diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c index 3a775d10f616..009faf5db40f 100644 --- a/drivers/lightnvm/pblk-recovery.c +++ b/drivers/lightnvm/pblk-recovery.c @@ -191,7 +191,7 @@ static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line, kref_init(&pad_rq->ref); next_pad_rq: - rq_ppas = pblk_calc_secs(pblk, left_ppas, 0); + rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false); if (rq_ppas < pblk->min_write_pgs) { pblk_err(pblk, "corrupted pad line %d\n", line->id); goto fail_free_pad; @@ -371,7 +371,7 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line, next_rq: memset(rqd, 0, pblk_g_rq_size); - rq_ppas = pblk_calc_secs(pblk, left_ppas, 0); + rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false); if (!rq_ppas) rq_ppas = pblk->min_write_pgs; rq_len = rq_ppas * geo->csecs; @@ -440,6 +440,7 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line, goto retry_rq; } + pblk_get_packed_meta(pblk, rqd); for (i = 0; i < rqd->nr_ppas; i++) { struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i); u64 lba = le64_to_cpu(meta->lba); diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c index 2d2818155aa8..7d8958df9472 100644 --- a/drivers/lightnvm/pblk-sysfs.c +++ b/drivers/lightnvm/pblk-sysfs.c @@ -479,6 +479,13 @@ static ssize_t pblk_sysfs_set_sec_per_write(struct pblk *pblk, if (kstrtouint(page, 0, &sec_per_write)) return -EINVAL; + if (!pblk_is_oob_meta_supported(pblk)) { + /* For packed metadata case it is + * not allowed to change sec_per_write. + */ + return -EINVAL; + } + if (sec_per_write < pblk->min_write_pgs || sec_per_write > pblk->max_write_pgs || sec_per_write % pblk->min_write_pgs != 0) diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c index 2bf78f81862d..06d56deb645d 100644 --- a/drivers/lightnvm/pblk-write.c +++ b/drivers/lightnvm/pblk-write.c @@ -348,7 +348,7 @@ static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail, { int secs_to_sync; - secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush); + secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush, true); #ifdef CONFIG_NVM_PBLK_DEBUG if ((!secs_to_sync && secs_to_flush) @@ -569,7 +569,7 @@ static int pblk_submit_write(struct pblk *pblk, int *secs_left) struct bio *bio; struct nvm_rq *rqd; unsigned int secs_avail, secs_to_sync, secs_to_com; - unsigned int secs_to_flush; + unsigned int secs_to_flush, packed_meta_pgs; unsigned long pos; unsigned int resubmit; @@ -607,7 +607,7 @@ static int pblk_submit_write(struct pblk *pblk, int *secs_left) return 0; secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb); - if (!secs_to_flush && secs_avail < pblk->min_write_pgs) + if (!secs_to_flush && secs_avail < pblk->min_write_pgs_data) return 0; secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, @@ -622,7 +622,8 @@ static int pblk_submit_write(struct pblk *pblk, int *secs_left) pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com); } - bio = bio_alloc(GFP_KERNEL, secs_to_sync); + packed_meta_pgs = (pblk->min_write_pgs - pblk->min_write_pgs_data); + bio = bio_alloc(GFP_KERNEL, secs_to_sync + packed_meta_pgs); bio->bi_iter.bi_sector = 0; /* internal bio */ bio_set_op_attrs(bio, REQ_OP_WRITE, 0); diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index 9087d53d5c25..bc40b1381ff6 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h @@ -632,6 +632,7 @@ struct pblk { int state; /* pblk line state */ int min_write_pgs; /* Minimum amount of pages required by controller */ + int min_write_pgs_data; /* Minimum amount of payload pages */ int max_write_pgs; /* Maximum amount of pages supported by controller */ int oob_meta_size; /* Size of OOB sector metadata */ @@ -838,7 +839,7 @@ void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs); u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs); u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs); int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail, - unsigned long secs_to_flush); + unsigned long secs_to_flush, bool skip_meta); void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa, unsigned long *lun_bitmap); void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa); @@ -862,6 +863,8 @@ void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas, u64 *lba_list, int nr_secs); void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas, sector_t blba, int nr_secs); +void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd); +void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd); /* * pblk user I/O write path @@ -1392,4 +1395,9 @@ static inline int pblk_dma_meta_size(struct pblk *pblk) { return pblk->oob_meta_size * NVM_MAX_VLBA; } + +static inline int pblk_is_oob_meta_supported(struct pblk *pblk) +{ + return pblk->oob_meta_size >= sizeof(struct pblk_sec_meta); +} #endif /* PBLK_H_ */