@@ -71,6 +71,10 @@ static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512),
"arena->info2off: %#llx is unaligned\n", arena->info2off);
+ /*
+ * btt_sb is critial information and need proper write
+ * nvdimm_flush will be called (deepflush)
+ */
ret = arena_write_bytes(arena, arena->info2off, super,
sizeof(struct btt_sb), 0);
if (ret)
@@ -385,7 +389,8 @@ static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
{
int ret;
- ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
+ ret = __btt_log_write(arena, lane, sub, ent,
+ NVDIMM_IO_ATOMIC|NVDIMM_NO_DEEPFLUSH);
if (ret)
return ret;
@@ -430,7 +435,7 @@ static int btt_map_init(struct arena_info *arena)
dev_WARN_ONCE(to_dev(arena), size < 512,
"chunk size: %#zx is unaligned\n", size);
ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
- size, 0);
+ size, NVDIMM_NO_DEEPFLUSH);
if (ret)
goto free;
@@ -474,7 +479,7 @@ static int btt_log_init(struct arena_info *arena)
dev_WARN_ONCE(to_dev(arena), size < 512,
"chunk size: %#zx is unaligned\n", size);
ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
- size, 0);
+ size, NVDIMM_NO_DEEPFLUSH);
if (ret)
goto free;
@@ -488,7 +493,7 @@ static int btt_log_init(struct arena_info *arena)
ent.old_map = cpu_to_le32(arena->external_nlba + i);
ent.new_map = cpu_to_le32(arena->external_nlba + i);
ent.seq = cpu_to_le32(LOG_SEQ_INIT);
- ret = __btt_log_write(arena, i, 0, &ent, 0);
+ ret = __btt_log_write(arena, i, 0, &ent, NVDIMM_NO_DEEPFLUSH);
if (ret)
goto free;
}
@@ -519,7 +524,7 @@ static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
unsigned long chunk = min(len, PAGE_SIZE);
ret = arena_write_bytes(arena, nsoff, zero_page,
- chunk, 0);
+ chunk, NVDIMM_NO_DEEPFLUSH);
if (ret)
break;
len -= chunk;
@@ -593,7 +598,8 @@ static int btt_freelist_init(struct arena_info *arena)
* to complete the map write. So fix up the map.
*/
ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
- le32_to_cpu(log_new.new_map), 0, 0, 0);
+ le32_to_cpu(log_new.new_map), 0, 0,
+ NVDIMM_NO_DEEPFLUSH);
if (ret)
return ret;
}
@@ -1124,7 +1130,8 @@ static int btt_data_write(struct arena_info *arena, u32 lba,
u64 nsoff = to_namespace_offset(arena, lba);
void *mem = kmap_atomic(page);
- ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
+ ret = arena_write_bytes(arena, nsoff, mem + off, len,
+ NVDIMM_IO_ATOMIC|NVDIMM_NO_DEEPFLUSH);
kunmap_atomic(mem);
return ret;
@@ -1168,11 +1175,11 @@ static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
if (rw)
ret = arena_write_bytes(arena, meta_nsoff,
mem + bv.bv_offset, cur_len,
- NVDIMM_IO_ATOMIC);
+ NVDIMM_IO_ATOMIC|NVDIMM_NO_DEEPFLUSH);
else
ret = arena_read_bytes(arena, meta_nsoff,
mem + bv.bv_offset, cur_len,
- NVDIMM_IO_ATOMIC);
+ NVDIMM_IO_ATOMIC|NVDIMM_NO_DEEPFLUSH);
kunmap_atomic(mem);
if (ret)
@@ -1263,7 +1270,8 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
ret = btt_data_read(arena, page, off, postmap, cur_len);
if (ret) {
/* Media error - set the e_flag */
- if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC))
+ if (btt_map_write(arena, premap, postmap, 0, 1,
+ NVDIMM_IO_ATOMIC|NVDIMM_NO_DEEPFLUSH))
dev_warn_ratelimited(to_dev(arena),
"Error persistently tracking bad blocks at %#x\n",
premap);
@@ -1396,7 +1404,7 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
goto out_map;
ret = btt_map_write(arena, premap, new_postmap, 0, 0,
- NVDIMM_IO_ATOMIC);
+ NVDIMM_IO_ATOMIC|NVDIMM_NO_DEEPFLUSH);
if (ret)
goto out_map;
@@ -294,9 +294,14 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
}
memcpy_flushcache(nsio->addr + offset, buf, size);
- ret = nvdimm_flush(to_nd_region(ndns->dev.parent), NULL);
- if (ret)
+ if (!(flags & NVDIMM_NO_DEEPFLUSH)) {
+ ret = nvdimm_flush(to_nd_region(ndns->dev.parent), NULL);
+ if (ret)
+ rc = ret;
+ } else {
rc = ret;
+ pmem_wmb();
+ }
return rc;
}
@@ -22,7 +22,11 @@ enum {
*/
ND_MAX_LANES = 256,
INT_LBASIZE_ALIGNMENT = 64,
+ /*
+ * NVDIMM_IO_ATOMIC | NVDIMM_NO_DEEPFLUSH is support.
+ */
NVDIMM_IO_ATOMIC = 1,
+ NVDIMM_NO_DEEPFLUSH = 2,
};
struct nvdimm_drvdata {
Reason: we can have a global control of deepflush in the nfit module by "no_deepflush" param. In the case of "no_deepflush=0", we still need control data deepflush or not by the NVDIMM_NO_DEEPFLUSH flag. In the BTT, the btt information block(btt_sb) will use deepflush. Other like the data blocks(512B or 4KB),4 bytes btt_map and 16 bytes bflog will not use the deepflush. so that, during the runtime, no deepflush will be called in the BTT. How: Add flag NVDIMM_NO_DEEPFLUSH which can use with NVDIMM_IO_ATOMIC like NVDIMM_NO_DEEPFLUSH | NVDIMM_IO_ATOMIC. "if (!(flags & NVDIMM_NO_DEEPFLUSH))", nvdimm_flush() will be called, otherwise, the pmem_wmb() called to fense all previous write. Signed-off-by: Dennis.Wu <dennis.wu@intel.com> --- drivers/nvdimm/btt.c | 30 +++++++++++++++++++----------- drivers/nvdimm/claim.c | 9 +++++++-- drivers/nvdimm/nd.h | 4 ++++ 3 files changed, 30 insertions(+), 13 deletions(-)