@@ -873,13 +873,17 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->sq = sq;
req->ops = ops;
req->sg = NULL;
+ req->md_sg = NULL;
req->sg_cnt = 0;
+ req->md_sg_cnt = 0;
req->transfer_len = 0;
+ req->md_len = 0;
req->cqe->status = 0;
req->cqe->sq_head = 0;
req->ns = NULL;
req->error_loc = NVMET_NO_ERROR_LOC;
req->error_slba = 0;
+ req->use_md = false;
trace_nvmet_req_init(req, req->cmd);
@@ -962,6 +966,7 @@ bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
int nvmet_req_alloc_sgl(struct nvmet_req *req)
{
struct pci_dev *p2p_dev = NULL;
+ int data_len = req->transfer_len - req->md_len;
if (IS_ENABLED(CONFIG_PCI_P2PDMA)) {
if (req->sq->ctrl && req->ns)
@@ -971,11 +976,23 @@ int nvmet_req_alloc_sgl(struct nvmet_req *req)
req->p2p_dev = NULL;
if (req->sq->qid && p2p_dev) {
req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
- req->transfer_len);
- if (req->sg) {
- req->p2p_dev = p2p_dev;
- return 0;
+ data_len);
+ if (!req->sg)
+ goto fallback;
+
+ if (req->md_len) {
+ req->md_sg =
+ pci_p2pmem_alloc_sgl(p2p_dev,
+ &req->md_sg_cnt,
+ req->md_len);
+ if (!req->md_sg) {
+ pci_p2pmem_free_sgl(req->p2p_dev,
+ req->sg);
+ goto fallback;
+ }
}
+ req->p2p_dev = p2p_dev;
+ return 0;
}
/*
@@ -984,23 +1001,40 @@ int nvmet_req_alloc_sgl(struct nvmet_req *req)
*/
}
- req->sg = sgl_alloc(req->transfer_len, GFP_KERNEL, &req->sg_cnt);
+fallback:
+ req->sg = sgl_alloc(data_len, GFP_KERNEL, &req->sg_cnt);
if (unlikely(!req->sg))
return -ENOMEM;
+ if (req->md_len) {
+ req->md_sg = sgl_alloc(req->md_len, GFP_KERNEL,
+ &req->md_sg_cnt);
+ if (unlikely(!req->md_sg)) {
+ sgl_free(req->sg);
+ return -ENOMEM;
+ }
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgl);
void nvmet_req_free_sgl(struct nvmet_req *req)
{
- if (req->p2p_dev)
+ if (req->p2p_dev) {
pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
- else
+ if (req->md_sg)
+ pci_p2pmem_free_sgl(req->p2p_dev, req->md_sg);
+ } else {
sgl_free(req->sg);
+ if (req->md_sg)
+ sgl_free(req->md_sg);
+ }
req->sg = NULL;
+ req->md_sg = NULL;
req->sg_cnt = 0;
+ req->md_sg_cnt = 0;
}
EXPORT_SYMBOL_GPL(nvmet_req_free_sgl);
@@ -302,6 +302,7 @@ struct nvmet_req {
struct nvmet_cq *cq;
struct nvmet_ns *ns;
struct scatterlist *sg;
+ struct scatterlist *md_sg;
struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
union {
struct {
@@ -315,8 +316,10 @@ struct nvmet_req {
} f;
};
int sg_cnt;
+ int md_sg_cnt;
/* data length as parsed from the SGL descriptor: */
size_t transfer_len;
+ size_t md_len;
struct nvmet_port *port;
@@ -327,6 +330,8 @@ struct nvmet_req {
struct device *p2p_client;
u16 error_loc;
u64 error_slba;
+ /* Metadata support */
+ bool use_md;
};
extern struct workqueue_struct *buffered_io_wq;