Message ID | 20190314160428.3559-7-igor.j.konopko@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | lightnvm: next set of improvements for 5.2 | expand |
> On 14 Mar 2019, at 09.04, Igor Konopko <igor.j.konopko@intel.com> wrote: > > This patch ensures that smeta/emeta was written properly before even > trying to read it based on chunk table state and write pointer. > > Signed-off-by: Igor Konopko <igor.j.konopko@intel.com> > --- > drivers/lightnvm/pblk-recovery.c | 43 ++++++++++++++++++++++++++++++++++++++-- > 1 file changed, 41 insertions(+), 2 deletions(-) > > diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c > index 688fdeb..ba1691d 100644 > --- a/drivers/lightnvm/pblk-recovery.c > +++ b/drivers/lightnvm/pblk-recovery.c > @@ -653,8 +653,42 @@ static int pblk_line_was_written(struct pblk_line *line, > bppa = pblk->luns[smeta_blk].bppa; > chunk = &line->chks[pblk_ppa_to_pos(geo, bppa)]; > > - if (chunk->state & NVM_CHK_ST_FREE) > - return 0; > + if (chunk->state & NVM_CHK_ST_CLOSED || > + (chunk->state & NVM_CHK_ST_OPEN > + && chunk->wp >= lm->smeta_sec)) > + return 1; > + > + return 0; > +} > + > +static int pblk_line_was_emeta_written(struct pblk_line *line, > + struct pblk *pblk) > +{ > + > + struct pblk_line_meta *lm = &pblk->lm; > + struct nvm_tgt_dev *dev = pblk->dev; > + struct nvm_geo *geo = &dev->geo; > + struct nvm_chk_meta *chunk; > + struct ppa_addr ppa; > + int i, pos; > + int min = pblk->min_write_pgs; > + u64 paddr = line->emeta_ssec; > + > + for (i = 0; i < lm->emeta_sec[0]; i++, paddr++) { > + ppa = addr_to_gen_ppa(pblk, paddr, line->id); > + pos = pblk_ppa_to_pos(geo, ppa); > + while (test_bit(pos, line->blk_bitmap)) { > + paddr += min; > + ppa = addr_to_gen_ppa(pblk, paddr, line->id); > + pos = pblk_ppa_to_pos(geo, ppa); > + } > + chunk = &line->chks[pos]; > + > + if (!(chunk->state & NVM_CHK_ST_CLOSED || > + (chunk->state & NVM_CHK_ST_OPEN > + && chunk->wp > ppa.m.sec))) > + return 0; > + } > > return 1; > } > @@ -788,6 +822,11 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk) > goto next; > } > > + if (!pblk_line_was_emeta_written(line, pblk)) { > + pblk_recov_l2p_from_oob(pblk, line); > + goto next; > + } > + > if (pblk_line_emeta_read(pblk, line, line->emeta->buf)) { > pblk_recov_l2p_from_oob(pblk, line); > goto next; > -- > 2.9.5 I would like to avoid iterating on all chunks again an again to check for different things at boot time. What do you think having this as something like PBLK_LINESTATE_OPEN_INVALID or PBLK_LINESTATE_OPEN_NONRECOV, which you populate when collecting the chunk information? Then this becomes a simple check as opposed to the extra chunk iteration? On the check itself: Is this done for completeness or have you hit a case that is not covered by the smeta/emeta CRC protection?
On 17.03.2019 00:46, Javier González wrote: >> On 14 Mar 2019, at 09.04, Igor Konopko <igor.j.konopko@intel.com> wrote: >> >> This patch ensures that smeta/emeta was written properly before even >> trying to read it based on chunk table state and write pointer. >> >> Signed-off-by: Igor Konopko <igor.j.konopko@intel.com> >> --- >> drivers/lightnvm/pblk-recovery.c | 43 ++++++++++++++++++++++++++++++++++++++-- >> 1 file changed, 41 insertions(+), 2 deletions(-) >> >> diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c >> index 688fdeb..ba1691d 100644 >> --- a/drivers/lightnvm/pblk-recovery.c >> +++ b/drivers/lightnvm/pblk-recovery.c >> @@ -653,8 +653,42 @@ static int pblk_line_was_written(struct pblk_line *line, >> bppa = pblk->luns[smeta_blk].bppa; >> chunk = &line->chks[pblk_ppa_to_pos(geo, bppa)]; >> >> - if (chunk->state & NVM_CHK_ST_FREE) >> - return 0; >> + if (chunk->state & NVM_CHK_ST_CLOSED || >> + (chunk->state & NVM_CHK_ST_OPEN >> + && chunk->wp >= lm->smeta_sec)) >> + return 1; >> + >> + return 0; >> +} >> + >> +static int pblk_line_was_emeta_written(struct pblk_line *line, >> + struct pblk *pblk) >> +{ >> + >> + struct pblk_line_meta *lm = &pblk->lm; >> + struct nvm_tgt_dev *dev = pblk->dev; >> + struct nvm_geo *geo = &dev->geo; >> + struct nvm_chk_meta *chunk; >> + struct ppa_addr ppa; >> + int i, pos; >> + int min = pblk->min_write_pgs; >> + u64 paddr = line->emeta_ssec; >> + >> + for (i = 0; i < lm->emeta_sec[0]; i++, paddr++) { >> + ppa = addr_to_gen_ppa(pblk, paddr, line->id); >> + pos = pblk_ppa_to_pos(geo, ppa); >> + while (test_bit(pos, line->blk_bitmap)) { >> + paddr += min; >> + ppa = addr_to_gen_ppa(pblk, paddr, line->id); >> + pos = pblk_ppa_to_pos(geo, ppa); >> + } >> + chunk = &line->chks[pos]; >> + >> + if (!(chunk->state & NVM_CHK_ST_CLOSED || >> + (chunk->state & NVM_CHK_ST_OPEN >> + && chunk->wp > ppa.m.sec))) >> + return 0; >> + } >> >> return 1; >> } >> @@ -788,6 +822,11 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk) >> goto next; >> } >> >> + if (!pblk_line_was_emeta_written(line, pblk)) { >> + pblk_recov_l2p_from_oob(pblk, line); >> + goto next; >> + } >> + >> if (pblk_line_emeta_read(pblk, line, line->emeta->buf)) { >> pblk_recov_l2p_from_oob(pblk, line); >> goto next; >> -- >> 2.9.5 > > I would like to avoid iterating on all chunks again an again to check > for different things at boot time. What do you think having this as > something like PBLK_LINESTATE_OPEN_INVALID or > PBLK_LINESTATE_OPEN_NONRECOV, which you populate when collecting the > chunk information? Then this becomes a simple check as opposed to the > extra chunk iteration? > Ok, will work on sth different. > On the check itself: Is this done for completeness or have you hit a > case that is not covered by the smeta/emeta CRC protection? > So generally my goal here was to avoid reading blank sectors if not needed. >
On 18.03.2019 13:54, Igor Konopko wrote: > > > On 17.03.2019 00:46, Javier González wrote: >>> On 14 Mar 2019, at 09.04, Igor Konopko <igor.j.konopko@intel.com> wrote: >>> >>> This patch ensures that smeta/emeta was written properly before even >>> trying to read it based on chunk table state and write pointer. >>> >>> Signed-off-by: Igor Konopko <igor.j.konopko@intel.com> >>> --- >>> drivers/lightnvm/pblk-recovery.c | 43 >>> ++++++++++++++++++++++++++++++++++++++-- >>> 1 file changed, 41 insertions(+), 2 deletions(-) >>> >>> diff --git a/drivers/lightnvm/pblk-recovery.c >>> b/drivers/lightnvm/pblk-recovery.c >>> index 688fdeb..ba1691d 100644 >>> --- a/drivers/lightnvm/pblk-recovery.c >>> +++ b/drivers/lightnvm/pblk-recovery.c >>> @@ -653,8 +653,42 @@ static int pblk_line_was_written(struct >>> pblk_line *line, >>> bppa = pblk->luns[smeta_blk].bppa; >>> chunk = &line->chks[pblk_ppa_to_pos(geo, bppa)]; >>> >>> - if (chunk->state & NVM_CHK_ST_FREE) >>> - return 0; >>> + if (chunk->state & NVM_CHK_ST_CLOSED || >>> + (chunk->state & NVM_CHK_ST_OPEN >>> + && chunk->wp >= lm->smeta_sec)) >>> + return 1; >>> + >>> + return 0; >>> +} >>> + >>> +static int pblk_line_was_emeta_written(struct pblk_line *line, >>> + struct pblk *pblk) >>> +{ >>> + >>> + struct pblk_line_meta *lm = &pblk->lm; >>> + struct nvm_tgt_dev *dev = pblk->dev; >>> + struct nvm_geo *geo = &dev->geo; >>> + struct nvm_chk_meta *chunk; >>> + struct ppa_addr ppa; >>> + int i, pos; >>> + int min = pblk->min_write_pgs; >>> + u64 paddr = line->emeta_ssec; >>> + >>> + for (i = 0; i < lm->emeta_sec[0]; i++, paddr++) { >>> + ppa = addr_to_gen_ppa(pblk, paddr, line->id); >>> + pos = pblk_ppa_to_pos(geo, ppa); >>> + while (test_bit(pos, line->blk_bitmap)) { >>> + paddr += min; >>> + ppa = addr_to_gen_ppa(pblk, paddr, line->id); >>> + pos = pblk_ppa_to_pos(geo, ppa); >>> + } >>> + chunk = &line->chks[pos]; >>> + >>> + if (!(chunk->state & NVM_CHK_ST_CLOSED || >>> + (chunk->state & NVM_CHK_ST_OPEN >>> + && chunk->wp > ppa.m.sec))) >>> + return 0; >>> + } >>> >>> return 1; >>> } >>> @@ -788,6 +822,11 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk) >>> goto next; >>> } >>> >>> + if (!pblk_line_was_emeta_written(line, pblk)) { >>> + pblk_recov_l2p_from_oob(pblk, line); >>> + goto next; >>> + } >>> + >>> if (pblk_line_emeta_read(pblk, line, line->emeta->buf)) { >>> pblk_recov_l2p_from_oob(pblk, line); >>> goto next; >>> -- >>> 2.9.5 >> >> I would like to avoid iterating on all chunks again an again to check >> for different things at boot time. What do you think having this as >> something like PBLK_LINESTATE_OPEN_INVALID or >> PBLK_LINESTATE_OPEN_NONRECOV, which you populate when collecting the >> chunk information? Then this becomes a simple check as opposed to the >> extra chunk iteration? >> > > Ok, will work on sth different. I rethink my changes again and I messed up. Emeta check is redundant, since pblk_line_is_open() will handle the scenario when emeta was not fully written (since some chunk will be in open state). So I'll drop the emeta part of that patch from v2 and will keep only smeta fix part. > >> On the check itself: Is this done for completeness or have you hit a >> case that is not covered by the smeta/emeta CRC protection? >> > > So generally my goal here was to avoid reading blank sectors if not needed. > >>
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c index 688fdeb..ba1691d 100644 --- a/drivers/lightnvm/pblk-recovery.c +++ b/drivers/lightnvm/pblk-recovery.c @@ -653,8 +653,42 @@ static int pblk_line_was_written(struct pblk_line *line, bppa = pblk->luns[smeta_blk].bppa; chunk = &line->chks[pblk_ppa_to_pos(geo, bppa)]; - if (chunk->state & NVM_CHK_ST_FREE) - return 0; + if (chunk->state & NVM_CHK_ST_CLOSED || + (chunk->state & NVM_CHK_ST_OPEN + && chunk->wp >= lm->smeta_sec)) + return 1; + + return 0; +} + +static int pblk_line_was_emeta_written(struct pblk_line *line, + struct pblk *pblk) +{ + + struct pblk_line_meta *lm = &pblk->lm; + struct nvm_tgt_dev *dev = pblk->dev; + struct nvm_geo *geo = &dev->geo; + struct nvm_chk_meta *chunk; + struct ppa_addr ppa; + int i, pos; + int min = pblk->min_write_pgs; + u64 paddr = line->emeta_ssec; + + for (i = 0; i < lm->emeta_sec[0]; i++, paddr++) { + ppa = addr_to_gen_ppa(pblk, paddr, line->id); + pos = pblk_ppa_to_pos(geo, ppa); + while (test_bit(pos, line->blk_bitmap)) { + paddr += min; + ppa = addr_to_gen_ppa(pblk, paddr, line->id); + pos = pblk_ppa_to_pos(geo, ppa); + } + chunk = &line->chks[pos]; + + if (!(chunk->state & NVM_CHK_ST_CLOSED || + (chunk->state & NVM_CHK_ST_OPEN + && chunk->wp > ppa.m.sec))) + return 0; + } return 1; } @@ -788,6 +822,11 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk) goto next; } + if (!pblk_line_was_emeta_written(line, pblk)) { + pblk_recov_l2p_from_oob(pblk, line); + goto next; + } + if (pblk_line_emeta_read(pblk, line, line->emeta->buf)) { pblk_recov_l2p_from_oob(pblk, line); goto next;
This patch ensures that smeta/emeta was written properly before even trying to read it based on chunk table state and write pointer. Signed-off-by: Igor Konopko <igor.j.konopko@intel.com> --- drivers/lightnvm/pblk-recovery.c | 43 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 41 insertions(+), 2 deletions(-)