@@ -125,6 +125,7 @@ static struct pmem_device *pmem_alloc(struct device *dev,
struct resource *res, int id)
{
struct pmem_device *pmem;
+ struct request_queue *q;
pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
if (!pmem)
@@ -142,6 +143,10 @@ static struct pmem_device *pmem_alloc(struct device *dev,
return ERR_PTR(-EBUSY);
}
+ q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
+ if (!q)
+ return ERR_PTR(-ENOMEM);
+
pmem->pfn_flags = PFN_DEV;
if (pmem_should_map_pages(dev)) {
pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, res,
@@ -152,9 +157,12 @@ static struct pmem_device *pmem_alloc(struct device *dev,
pmem->phys_addr, pmem->size,
ARCH_MEMREMAP_PMEM);
- if (IS_ERR(pmem->virt_addr))
+ if (IS_ERR(pmem->virt_addr)) {
+ blk_cleanup_queue(q);
return (void __force *) pmem->virt_addr;
+ }
+ pmem->pmem_queue = q;
return pmem;
}
@@ -174,10 +182,6 @@ static int pmem_attach_disk(struct device *dev,
int nid = dev_to_node(dev);
struct gendisk *disk;
- pmem->pmem_queue = blk_alloc_queue_node(GFP_KERNEL, nid);
- if (!pmem->pmem_queue)
- return -ENOMEM;
-
blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
@@ -412,19 +416,22 @@ static int nd_pmem_probe(struct device *dev)
dev_set_drvdata(dev, pmem);
ndns->rw_bytes = pmem_rw_bytes;
- if (is_nd_btt(dev))
+ if (is_nd_btt(dev)) {
+ /* btt allocates its own request_queue */
+ blk_cleanup_queue(pmem->pmem_queue);
+ pmem->pmem_queue = NULL;
return nvdimm_namespace_attach_btt(ndns);
+ }
if (is_nd_pfn(dev))
return nvdimm_namespace_attach_pfn(ndns);
- if (nd_btt_probe(ndns, pmem) == 0) {
- /* we'll come back as btt-pmem */
- return -ENXIO;
- }
-
- if (nd_pfn_probe(ndns, pmem) == 0) {
- /* we'll come back as pfn-pmem */
+ if (nd_btt_probe(ndns, pmem) == 0 || nd_pfn_probe(ndns, pmem) == 0) {
+ /*
+ * We'll come back as either btt-pmem, or pfn-pmem, so
+ * drop the queue allocation for now.
+ */
+ blk_cleanup_queue(pmem->pmem_queue);
return -ENXIO;
}
Before the dynamically allocated struct pages from devm_memremap_pages() can be put to use outside the driver, we need a mechanism to track whether they are still in use at teardown. Towards that goal reorder the initialization sequence to allow the 'q_usage_counter' from the request_queue to be used by the devm_memremap_pages() implementation (in subsequent patches). Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- drivers/nvdimm/pmem.c | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-)