diff mbox series

[v12,08/26] nvme-tcp: Add DDP data-path

Message ID 20230712161513.134860-9-aaptel@nvidia.com (mailing list archive)
State RFC
Headers show
Series nvme-tcp receive offloads | expand

Checks

Context Check Description
netdev/tree_selection success Guessing tree name failed - patch did not apply, async

Commit Message

Aurelien Aptel July 12, 2023, 4:14 p.m. UTC
From: Boris Pismenny <borisp@nvidia.com>

Introduce the NVMe-TCP DDP data-path offload.
Using this interface, the NIC hardware will scatter TCP payload directly
to the BIO pages according to the command_id in the PDU.
To maintain the correctness of the network stack, the driver is expected
to construct SKBs that point to the BIO pages.

The data-path interface contains two routines: setup/teardown.
The setup provides the mapping from command_id to the request buffers,
while the teardown removes this mapping.

For efficiency, we introduce an asynchronous nvme completion, which is
split between NVMe-TCP and the NIC driver as follows:
NVMe-TCP performs the specific completion, while NIC driver performs the
generic mq_blk completion.

Signed-off-by: Boris Pismenny <borisp@nvidia.com>
Signed-off-by: Ben Ben-Ishay <benishay@nvidia.com>
Signed-off-by: Or Gerlitz <ogerlitz@nvidia.com>
Signed-off-by: Yoray Zack <yorayz@nvidia.com>
Signed-off-by: Shai Malin <smalin@nvidia.com>
Signed-off-by: Aurelien Aptel <aaptel@nvidia.com>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
---
 drivers/nvme/host/tcp.c | 121 ++++++++++++++++++++++++++++++++++++++--
 1 file changed, 116 insertions(+), 5 deletions(-)

Comments

Sagi Grimberg Aug. 9, 2023, 7:35 a.m. UTC | #1
On 7/12/23 19:14, Aurelien Aptel wrote:
> From: Boris Pismenny <borisp@nvidia.com>
> 
> Introduce the NVMe-TCP DDP data-path offload.
> Using this interface, the NIC hardware will scatter TCP payload directly
> to the BIO pages according to the command_id in the PDU.
> To maintain the correctness of the network stack, the driver is expected
> to construct SKBs that point to the BIO pages.
> 
> The data-path interface contains two routines: setup/teardown.
> The setup provides the mapping from command_id to the request buffers,
> while the teardown removes this mapping.
> 
> For efficiency, we introduce an asynchronous nvme completion, which is
> split between NVMe-TCP and the NIC driver as follows:
> NVMe-TCP performs the specific completion, while NIC driver performs the
> generic mq_blk completion.
> 
> Signed-off-by: Boris Pismenny <borisp@nvidia.com>
> Signed-off-by: Ben Ben-Ishay <benishay@nvidia.com>
> Signed-off-by: Or Gerlitz <ogerlitz@nvidia.com>
> Signed-off-by: Yoray Zack <yorayz@nvidia.com>
> Signed-off-by: Shai Malin <smalin@nvidia.com>
> Signed-off-by: Aurelien Aptel <aaptel@nvidia.com>
> Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
> ---
>   drivers/nvme/host/tcp.c | 121 ++++++++++++++++++++++++++++++++++++++--
>   1 file changed, 116 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
> index 7d3b0ac65c03..6057cd424a19 100644
> --- a/drivers/nvme/host/tcp.c
> +++ b/drivers/nvme/host/tcp.c
> @@ -116,6 +116,13 @@ struct nvme_tcp_request {
>   	size_t			offset;
>   	size_t			data_sent;
>   	enum nvme_tcp_send_state state;
> +
> +#ifdef CONFIG_ULP_DDP
> +	bool			offloaded;
> +	struct ulp_ddp_io	ddp;
> +	__le16			ddp_status;
> +	union nvme_result	result;
> +#endif
>   };
>   
>   enum nvme_tcp_queue_flags {
> @@ -354,11 +361,75 @@ static inline bool is_netdev_ulp_offload_active(struct net_device *netdev,
>   	return false;
>   }
>   
> +static int nvme_tcp_req_map_ddp_sg(struct nvme_tcp_request *req, struct request *rq)

Why do you pass both req and rq? You can derive each from the other.

> +{
> +	int ret;
> +
> +	req->ddp.sg_table.sgl = req->ddp.first_sgl;
> +	ret = sg_alloc_table_chained(&req->ddp.sg_table,
> +				     blk_rq_nr_phys_segments(rq),
> +				     req->ddp.sg_table.sgl, SG_CHUNK_SIZE);
> +	if (ret)
> +		return -ENOMEM;
> +	req->ddp.nents = blk_rq_map_sg(rq->q, rq, req->ddp.sg_table.sgl);

General question, I'm assuming that the hca knows how to deal with
a controller that sends c2hdata in parts?


> +	return 0;
> +}
> +
>   static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags);
> +static void nvme_tcp_ddp_teardown_done(void *ddp_ctx);
>   static const struct ulp_ddp_ulp_ops nvme_tcp_ddp_ulp_ops = {
>   	.resync_request		= nvme_tcp_resync_request,
> +	.ddp_teardown_done	= nvme_tcp_ddp_teardown_done,
>   };
>   
> +static void nvme_tcp_teardown_ddp(struct nvme_tcp_queue *queue,
> +				  struct request *rq)
> +{
> +	struct net_device *netdev = queue->ctrl->offloading_netdev;
> +	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
> +
> +	netdev->netdev_ops->ulp_ddp_ops->teardown(netdev, queue->sock->sk,
> +						  &req->ddp, rq);
> +	sg_free_table_chained(&req->ddp.sg_table, SG_CHUNK_SIZE);
> +}
> +
> +static void nvme_tcp_ddp_teardown_done(void *ddp_ctx)
> +{
> +	struct request *rq = ddp_ctx;
> +	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
> +
> +	if (!nvme_try_complete_req(rq, req->ddp_status, req->result))
> +		nvme_complete_rq(rq);
> +}
> +
> +static int nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue, u16 command_id,
> +			      struct request *rq)

I think you can use nvme_cid(rq) instead of passing the command_id.

> +{
> +	struct net_device *netdev = queue->ctrl->offloading_netdev;
> +	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
> +	int ret;
> +
> +	if (rq_data_dir(rq) != READ ||
> +	    queue->ctrl->offload_io_threshold > blk_rq_payload_bytes(rq))
> +		return 0;
> +
> +	req->ddp.command_id = command_id;
> +	ret = nvme_tcp_req_map_ddp_sg(req, rq);

Don't see why map_ddp_sg is not open-coded here, its the only call-site,
and its pretty much does exactly what its called.

> +	if (ret)
> +		return -ENOMEM;
> +
> +	ret = netdev->netdev_ops->ulp_ddp_ops->setup(netdev, queue->sock->sk,
> +						     &req->ddp);
> +	if (ret) {
> +		sg_free_table_chained(&req->ddp.sg_table, SG_CHUNK_SIZE);
> +		return ret;
> +	}
> +
> +	/* if successful, sg table is freed in nvme_tcp_teardown_ddp() */
> +	req->offloaded = true;
> +	return 0;
> +}
> +
>   static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue)
>   {
>   	struct net_device *netdev = queue->ctrl->offloading_netdev;
> @@ -491,6 +562,12 @@ static inline bool is_netdev_ulp_offload_active(struct net_device *netdev,
>   	return false;
>   }
>   
> +static int nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue, u16 command_id,
> +			      struct request *rq)
> +{
> +	return 0;
> +}
> +
>   static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue)
>   {
>   	return -EOPNOTSUPP;
> @@ -778,6 +855,26 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
>   	queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
>   }
>   
> +static void nvme_tcp_complete_request(struct request *rq,
> +				      __le16 status,
> +				      union nvme_result result,
> +				      __u16 command_id)
> +{
> +#ifdef CONFIG_ULP_DDP
> +	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
> +
> +	if (req->offloaded) {
> +		req->ddp_status = status;
> +		req->result = result;
> +		nvme_tcp_teardown_ddp(req->queue, rq);
> +		return;
> +	}
> +#endif
> +
> +	if (!nvme_try_complete_req(rq, status, result))
> +		nvme_complete_rq(rq);
> +}
> +
>   static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
>   		struct nvme_completion *cqe)
>   {
> @@ -797,10 +894,9 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
>   	if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
>   		req->status = cqe->status;
>   
> -	if (!nvme_try_complete_req(rq, req->status, cqe->result))
> -		nvme_complete_rq(rq);
> +	nvme_tcp_complete_request(rq, req->status, cqe->result,
> +				  cqe->command_id);
>   	queue->nr_cqe++;
> -
>   	return 0;
>   }
>   
> @@ -998,10 +1094,13 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
>   
>   static inline void nvme_tcp_end_request(struct request *rq, u16 status)
>   {
> +	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
> +	struct nvme_tcp_queue *queue = req->queue;
> +	struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
>   	union nvme_result res = {};
>   
> -	if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
> -		nvme_complete_rq(rq);
> +	nvme_tcp_complete_request(rq, cpu_to_le16(status << 1), res,
> +				  pdu->command_id);
>   }
>   
>   static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
> @@ -1308,6 +1407,15 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
>   	else
>   		msg.msg_flags |= MSG_EOR;
>   
> +	if (test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags)) {
> +		ret = nvme_tcp_setup_ddp(queue, pdu->cmd.common.command_id,
> +					 blk_mq_rq_from_pdu(req));
> +		WARN_ONCE(ret, "ddp setup failed (queue 0x%x, cid 0x%x, ret=%d)",
> +			  nvme_tcp_queue_id(queue),
> +			  pdu->cmd.common.command_id,
> +			  ret);
> +	}

Any reason why this is done here when sending the command pdu and not
in setup time?

> +
>   	if (queue->hdr_digest && !req->offset)
>   		nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
>   
> @@ -2739,6 +2847,9 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
>   	if (ret)
>   		return ret;
>   
> +#ifdef CONFIG_ULP_DDP
> +	req->offloaded = false;
> +#endif
>   	req->state = NVME_TCP_SEND_CMD_PDU;
>   	req->status = cpu_to_le16(NVME_SC_SUCCESS);
>   	req->offset = 0;
Aurelien Aptel Aug. 14, 2023, 4:12 p.m. UTC | #2
Sagi Grimberg <sagi@grimberg.me> writes:
> On 7/12/23 19:14, Aurelien Aptel wrote:
>> +static int nvme_tcp_req_map_ddp_sg(struct nvme_tcp_request *req, struct request *rq)
>
> Why do you pass both req and rq? You can derive each from the other.

Thanks, we will remove the redundant parameter.

>> +{
>> +     int ret;
>> +
>> +     req->ddp.sg_table.sgl = req->ddp.first_sgl;
>> +     ret = sg_alloc_table_chained(&req->ddp.sg_table,
>> +                                  blk_rq_nr_phys_segments(rq),
>> +                                  req->ddp.sg_table.sgl, SG_CHUNK_SIZE);
>> +     if (ret)
>> +             return -ENOMEM;
>> +     req->ddp.nents = blk_rq_map_sg(rq->q, rq, req->ddp.sg_table.sgl);
>
> General question, I'm assuming that the hca knows how to deal with
> a controller that sends c2hdata in parts?

Yes, the hardware supports the offloading of multiple c2hdata PDUs per IO.

>> +static int nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue, u16 command_id,
>> +                           struct request *rq)
>
> I think you can use nvme_cid(rq) instead of passing the command_id.

Thanks, we will use it.

>> +{
>> +     struct net_device *netdev = queue->ctrl->offloading_netdev;
>> +     struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
>> +     int ret;
>> +
>> +     if (rq_data_dir(rq) != READ ||
>> +         queue->ctrl->offload_io_threshold > blk_rq_payload_bytes(rq))
>> +             return 0;
>> +
>> +     req->ddp.command_id = command_id;
>> +     ret = nvme_tcp_req_map_ddp_sg(req, rq);
>
> Don't see why map_ddp_sg is not open-coded here, its the only call-site,
> and its pretty much does exactly what its called.

Sure, we will open code it.

>> @@ -1308,6 +1407,15 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
>>       else
>>               msg.msg_flags |= MSG_EOR;
>>
>> +     if (test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags)) {
>> +             ret = nvme_tcp_setup_ddp(queue, pdu->cmd.common.command_id,
>> +                                      blk_mq_rq_from_pdu(req));
>> +             WARN_ONCE(ret, "ddp setup failed (queue 0x%x, cid 0x%x, ret=%d)",
>> +                       nvme_tcp_queue_id(queue),
>> +                       pdu->cmd.common.command_id,
>> +                       ret);
>> +     }
>
> Any reason why this is done here when sending the command pdu and not
> in setup time?

We wish to interact with the HW from the same CPU per queue, hence we
are calling setup_ddp() after queue->io_cpu == raw_smp_processor_id()
was checked in nvme_tcp_queue_request().
Sagi Grimberg Aug. 14, 2023, 7:01 p.m. UTC | #3
>>> @@ -1308,6 +1407,15 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
>>>        else
>>>                msg.msg_flags |= MSG_EOR;
>>>
>>> +     if (test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags)) {
>>> +             ret = nvme_tcp_setup_ddp(queue, pdu->cmd.common.command_id,
>>> +                                      blk_mq_rq_from_pdu(req));
>>> +             WARN_ONCE(ret, "ddp setup failed (queue 0x%x, cid 0x%x, ret=%d)",
>>> +                       nvme_tcp_queue_id(queue),
>>> +                       pdu->cmd.common.command_id,
>>> +                       ret);
>>> +     }
>>
>> Any reason why this is done here when sending the command pdu and not
>> in setup time?
> 
> We wish to interact with the HW from the same CPU per queue, hence we
> are calling setup_ddp() after queue->io_cpu == raw_smp_processor_id()
> was checked in nvme_tcp_queue_request().

That is very fragile. You cannot depend on this micro-optimization being
in the code. Is this related to a hidden steering rule you are adding
to the hw?

Which reminds me, in the control patch, you are passing io_cpu, this is
also a dependency that should be avoided, you should use the same 
mechanism as arfs to learn where the socket is being reaped.
Aurelien Aptel Aug. 17, 2023, 1:28 p.m. UTC | #4
Sagi Grimberg <sagi@grimberg.me> writes:
>>>> @@ -1308,6 +1407,15 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
>>>>        else
>>>>                msg.msg_flags |= MSG_EOR;
>>>>
>>>> +     if (test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags)) {
>>>> +             ret = nvme_tcp_setup_ddp(queue, pdu->cmd.common.command_id,
>>>> +                                      blk_mq_rq_from_pdu(req));
>>>> +             WARN_ONCE(ret, "ddp setup failed (queue 0x%x, cid 0x%x, ret=%d)",
>>>> +                       nvme_tcp_queue_id(queue),
>>>> +                       pdu->cmd.common.command_id,
>>>> +                       ret);
>>>> +     }
>>>
>>> Any reason why this is done here when sending the command pdu and not
>>> in setup time?
>>
>> We wish to interact with the HW from the same CPU per queue, hence we
>> are calling setup_ddp() after queue->io_cpu == raw_smp_processor_id()
>> was checked in nvme_tcp_queue_request().
>
> That is very fragile. You cannot depend on this micro-optimization being
> in the code. Is this related to a hidden steering rule you are adding
> to the hw?

We are using a steering rule in order to redirect packets into the
offload engine. This rule also helps with aligning the nvme-tcp
connection with a specific core.

> Which reminds me, in the control patch, you are passing io_cpu, this is
> also a dependency that should be avoided, you should use the same
> mechanism as arfs to learn where the socket is being reaped.

We can use queue->sock->sk->sk_incoming_cpu instead of queue->io_cpu as
it is used in the nvme-tcp target.
diff mbox series

Patch

diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 7d3b0ac65c03..6057cd424a19 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -116,6 +116,13 @@  struct nvme_tcp_request {
 	size_t			offset;
 	size_t			data_sent;
 	enum nvme_tcp_send_state state;
+
+#ifdef CONFIG_ULP_DDP
+	bool			offloaded;
+	struct ulp_ddp_io	ddp;
+	__le16			ddp_status;
+	union nvme_result	result;
+#endif
 };
 
 enum nvme_tcp_queue_flags {
@@ -354,11 +361,75 @@  static inline bool is_netdev_ulp_offload_active(struct net_device *netdev,
 	return false;
 }
 
+static int nvme_tcp_req_map_ddp_sg(struct nvme_tcp_request *req, struct request *rq)
+{
+	int ret;
+
+	req->ddp.sg_table.sgl = req->ddp.first_sgl;
+	ret = sg_alloc_table_chained(&req->ddp.sg_table,
+				     blk_rq_nr_phys_segments(rq),
+				     req->ddp.sg_table.sgl, SG_CHUNK_SIZE);
+	if (ret)
+		return -ENOMEM;
+	req->ddp.nents = blk_rq_map_sg(rq->q, rq, req->ddp.sg_table.sgl);
+	return 0;
+}
+
 static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags);
+static void nvme_tcp_ddp_teardown_done(void *ddp_ctx);
 static const struct ulp_ddp_ulp_ops nvme_tcp_ddp_ulp_ops = {
 	.resync_request		= nvme_tcp_resync_request,
+	.ddp_teardown_done	= nvme_tcp_ddp_teardown_done,
 };
 
+static void nvme_tcp_teardown_ddp(struct nvme_tcp_queue *queue,
+				  struct request *rq)
+{
+	struct net_device *netdev = queue->ctrl->offloading_netdev;
+	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+
+	netdev->netdev_ops->ulp_ddp_ops->teardown(netdev, queue->sock->sk,
+						  &req->ddp, rq);
+	sg_free_table_chained(&req->ddp.sg_table, SG_CHUNK_SIZE);
+}
+
+static void nvme_tcp_ddp_teardown_done(void *ddp_ctx)
+{
+	struct request *rq = ddp_ctx;
+	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+
+	if (!nvme_try_complete_req(rq, req->ddp_status, req->result))
+		nvme_complete_rq(rq);
+}
+
+static int nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue, u16 command_id,
+			      struct request *rq)
+{
+	struct net_device *netdev = queue->ctrl->offloading_netdev;
+	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+	int ret;
+
+	if (rq_data_dir(rq) != READ ||
+	    queue->ctrl->offload_io_threshold > blk_rq_payload_bytes(rq))
+		return 0;
+
+	req->ddp.command_id = command_id;
+	ret = nvme_tcp_req_map_ddp_sg(req, rq);
+	if (ret)
+		return -ENOMEM;
+
+	ret = netdev->netdev_ops->ulp_ddp_ops->setup(netdev, queue->sock->sk,
+						     &req->ddp);
+	if (ret) {
+		sg_free_table_chained(&req->ddp.sg_table, SG_CHUNK_SIZE);
+		return ret;
+	}
+
+	/* if successful, sg table is freed in nvme_tcp_teardown_ddp() */
+	req->offloaded = true;
+	return 0;
+}
+
 static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue)
 {
 	struct net_device *netdev = queue->ctrl->offloading_netdev;
@@ -491,6 +562,12 @@  static inline bool is_netdev_ulp_offload_active(struct net_device *netdev,
 	return false;
 }
 
+static int nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue, u16 command_id,
+			      struct request *rq)
+{
+	return 0;
+}
+
 static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue)
 {
 	return -EOPNOTSUPP;
@@ -778,6 +855,26 @@  static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
 	queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
 }
 
+static void nvme_tcp_complete_request(struct request *rq,
+				      __le16 status,
+				      union nvme_result result,
+				      __u16 command_id)
+{
+#ifdef CONFIG_ULP_DDP
+	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+
+	if (req->offloaded) {
+		req->ddp_status = status;
+		req->result = result;
+		nvme_tcp_teardown_ddp(req->queue, rq);
+		return;
+	}
+#endif
+
+	if (!nvme_try_complete_req(rq, status, result))
+		nvme_complete_rq(rq);
+}
+
 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
 		struct nvme_completion *cqe)
 {
@@ -797,10 +894,9 @@  static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
 	if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
 		req->status = cqe->status;
 
-	if (!nvme_try_complete_req(rq, req->status, cqe->result))
-		nvme_complete_rq(rq);
+	nvme_tcp_complete_request(rq, req->status, cqe->result,
+				  cqe->command_id);
 	queue->nr_cqe++;
-
 	return 0;
 }
 
@@ -998,10 +1094,13 @@  static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
 
 static inline void nvme_tcp_end_request(struct request *rq, u16 status)
 {
+	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+	struct nvme_tcp_queue *queue = req->queue;
+	struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
 	union nvme_result res = {};
 
-	if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
-		nvme_complete_rq(rq);
+	nvme_tcp_complete_request(rq, cpu_to_le16(status << 1), res,
+				  pdu->command_id);
 }
 
 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
@@ -1308,6 +1407,15 @@  static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
 	else
 		msg.msg_flags |= MSG_EOR;
 
+	if (test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags)) {
+		ret = nvme_tcp_setup_ddp(queue, pdu->cmd.common.command_id,
+					 blk_mq_rq_from_pdu(req));
+		WARN_ONCE(ret, "ddp setup failed (queue 0x%x, cid 0x%x, ret=%d)",
+			  nvme_tcp_queue_id(queue),
+			  pdu->cmd.common.command_id,
+			  ret);
+	}
+
 	if (queue->hdr_digest && !req->offset)
 		nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
 
@@ -2739,6 +2847,9 @@  static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
 	if (ret)
 		return ret;
 
+#ifdef CONFIG_ULP_DDP
+	req->offloaded = false;
+#endif
 	req->state = NVME_TCP_SEND_CMD_PDU;
 	req->status = cpu_to_le16(NVME_SC_SUCCESS);
 	req->offset = 0;