@@ -625,7 +625,8 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
DMA_TO_DEVICE : DMA_FROM_DEVICE;
int ret = BLK_MQ_RQ_QUEUE_ERROR;
- sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
+ sg_init_unmappable_table(iod->sg, blk_rq_nr_phys_segments(req));
+
iod->nents = blk_rq_map_sg(q, req, iod->sg);
if (!iod->nents)
goto out;
@@ -35,6 +35,18 @@ config NVME_TARGET_RDMA
If unsure, say N.
+config NVME_TARGET_RDMA_P2PMEM
+ bool "Support Peer-2-Peer memory (Experimental)"
+ depends on NVME_TARGET_RDMA
+ select SG_UNMAPPABLE
+ help
+ This enable experimental support for using Peer 2 Peer PCI
+ memory in the NVME RDMA target driver. Enabling this could trigger
+ BUG_ONs when using the target with architectures or block devices
+ that do not currently support DMAing to unmappable memory.
+
+ If unsure, say N.
+
config NVME_TARGET_FC
tristate "NVMe over Fabrics FC target driver"
depends on NVME_TARGET
@@ -75,7 +75,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
bio_set_op_attrs(bio, op, op_flags);
for_each_sg(req->sg, sg, req->sg_cnt, i) {
- while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
+ while (bio_add_pfn(bio, sg_pfn_t(sg), sg->length, sg->offset)
!= sg->length) {
struct bio *prev = bio;
@@ -193,11 +193,30 @@ static void nvmet_rdma_free_sgl(struct scatterlist *sgl, unsigned int nents)
if (!sgl || !nents)
return;
- for_each_sg(sgl, sg, nents, count)
- __free_page(sg_page(sg));
+ for_each_sg(sgl, sg, nents, count) {
+ struct page *pg = pfn_t_to_page(sg_pfn_t(sg));
+
+ if (pg)
+ __free_page(pg);
+ }
+
kfree(sgl);
}
+#ifdef CONFIG_NVME_TARGET_RDMA_P2PMEM
+static void nvmet_rdma_init_sg(struct scatterlist *sg,
+ unsigned int nent)
+{
+ sg_init_unmappable_table(sg, nent);
+}
+#else
+static void nvmet_rdma_init_sg(struct scatterlist *sg,
+ unsigned int nent)
+{
+ sg_init_table(sg, nent);
+}
+#endif
+
static int nvmet_rdma_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
u32 length)
{
@@ -211,7 +230,7 @@ static int nvmet_rdma_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
if (!sg)
goto out;
- sg_init_table(sg, nent);
+ nvmet_rdma_init_sg(sg, nent);
while (length) {
u32 page_len = min_t(u32, length, PAGE_SIZE);
@@ -231,7 +250,9 @@ static int nvmet_rdma_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
out_free_pages:
while (i > 0) {
i--;
- __free_page(sg_page(&sg[i]));
+ page = pfn_t_to_page(sg_pfn_t(&sg[i]));
+ if (page)
+ __free_page(page);
}
kfree(sg);
out: