@@ -1388,6 +1388,7 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
{
struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
struct nd_region *nd_region;
+ struct badblocks *bb;
struct btt *btt;
size_t rawsize;
@@ -1398,6 +1399,16 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
if (rawsize < ARENA_MIN_SIZE) {
return -ENXIO;
}
+
+ bb = nvdimm_namespace_badblocks(ndns, 0);
+ if (IS_ERR(bb)) {
+ if (PTR_ERR(bb) == -ENOENT)
+ bb = NULL;
+ else
+ return PTR_ERR(bb);
+ }
+
+ ndns->bb = bb;
nd_region = to_nd_region(nd_btt->dev.parent);
btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
nd_region);
@@ -277,12 +277,22 @@ int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata)
{
int rc;
struct device *dev;
+ struct badblocks *bb;
struct btt_sb *btt_sb;
struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
if (ndns->force_raw)
return -ENODEV;
+ bb = nvdimm_namespace_badblocks(ndns, 0);
+ if (IS_ERR(bb)) {
+ if (PTR_ERR(bb) == -ENOENT)
+ bb = NULL;
+ else
+ return PTR_ERR(bb);
+ }
+
+ ndns->bb = bb;
nvdimm_bus_lock(&ndns->dev);
dev = __nd_btt_create(nd_region, 0, NULL, ndns);
nvdimm_bus_unlock(&ndns->dev);
@@ -305,6 +305,7 @@ int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata)
{
int rc;
struct device *dev;
+ struct badblocks *bb;
struct nd_pfn *nd_pfn;
struct nd_pfn_sb *pfn_sb;
struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
@@ -312,6 +313,15 @@ int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata)
if (ndns->force_raw)
return -ENODEV;
+ bb = nvdimm_namespace_badblocks(ndns, 0);
+ if (IS_ERR(bb)) {
+ if (PTR_ERR(bb) == -ENOENT)
+ bb = NULL;
+ else
+ return PTR_ERR(bb);
+ }
+
+ ndns->bb = bb;
nvdimm_bus_lock(&ndns->dev);
dev = __nd_pfn_create(nd_region, NULL, PFN_MODE_NONE, ndns);
nvdimm_bus_unlock(&ndns->dev);
@@ -249,9 +249,14 @@ static int pmem_rw_bytes(struct nd_namespace_common *ndns,
return -EFAULT;
}
- if (rw == READ)
+ if (rw == READ) {
+ unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
+
+ if (unlikely(is_bad_pmem(ndns->bb, offset / 512,
+ sz_align)))
+ return -EIO;
rc = memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
- else {
+ } else {
memcpy_to_pmem(pmem->virt_addr + offset, buf, size);
wmb_pmem();
}
@@ -468,6 +468,12 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
to_nd_blk_region(dev)->disable(nvdimm_bus, dev);
}
+ if (dev->parent && is_nd_pmem(dev->parent) && !probe) {
+ struct nd_namespace_common *ndns = to_ndns(dev);
+
+ /* badblocks list is only valid between ->probe and ->remove */
+ ndns->bb = NULL;
+ }
if (dev->parent && is_nd_blk(dev->parent) && probe) {
nd_region = to_nd_region(dev->parent);
nvdimm_bus_lock(dev);
@@ -34,12 +34,14 @@ static inline struct nd_device_driver *to_nd_device_driver(
* @force_raw: ignore other personalities for the namespace (e.g. btt)
* @dev: device model node
* @claim: when set a another personality has taken ownership of the namespace
+ * @bb: badblocks list only valid while namespace is active
* @rw_bytes: access the raw namespace capacity with byte-aligned transfers
*/
struct nd_namespace_common {
int force_raw;
struct device dev;
struct device *claim;
+ struct badblocks *bb;
int (*rw_bytes)(struct nd_namespace_common *, resource_size_t offset,
void *buf, size_t size, int rw);
};
Support badblock checking in all the pmem read paths that do not go through the block layer. This protects info block reads (btt or pfn) as well as data reads to a pmem namespace via a btt instance. Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/nvdimm/btt.c | 11 +++++++++++ drivers/nvdimm/btt_devs.c | 10 ++++++++++ drivers/nvdimm/pfn_devs.c | 10 ++++++++++ drivers/nvdimm/pmem.c | 9 +++++++-- drivers/nvdimm/region_devs.c | 6 ++++++ include/linux/nd.h | 2 ++ 6 files changed, 46 insertions(+), 2 deletions(-) -- To unsubscribe from this list: send the line "unsubscribe linux-block" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html