diff mbox series

[for-next,v2,1/9] RDMA/rxe: Add rxe_is_fenced() subroutine

Message ID 20220630190425.2251-2-rpearsonhpe@gmail.com (mailing list archive)
State Accepted
Commit 8a0256a3cf2de543f4c2dfd3c1edabbf7ea25df0
Headers show
Series RDMA/rxe: Various fixes | expand

Commit Message

Bob Pearson June 30, 2022, 7:04 p.m. UTC
The code thc that decides whether to defer execution of a wqe in
rxe_requester.c is isolated into a subroutine rxe_is_fenced()
and removed from the call to req_next_wqe(). The condition whether
a wqe should be fenced is changed to comply with the IBA. Currently
an operation is fenced if the fence bit is set in the wqe flags and
the last wqe has not completed. For normal operations the IBA
actually only requires that the last read or atomic operation is
complete. 

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
v2 replaces "RDMA/rxe: Fix incorrect fencing"

 drivers/infiniband/sw/rxe/rxe_req.c | 37 ++++++++++++++++++++++++-----
 1 file changed, 31 insertions(+), 6 deletions(-)

Comments

Zhijian Li (Fujitsu) July 18, 2022, 2:13 a.m. UTC | #1
On 01/07/2022 03:04, Bob Pearson wrote:
> The code thc that decides whether to defer execution of a wqe in
> rxe_requester.c is isolated into a subroutine rxe_is_fenced()
> and removed from the call to req_next_wqe(). The condition whether
> a wqe should be fenced is changed to comply with the IBA. Currently
> an operation is fenced if the fence bit is set in the wqe flags and
> the last wqe has not completed. For normal operations the IBA
> actually only requires that the last read or atomic operation is
> complete.
>
> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
Looks good to me.

Reviewed-by: Li Zhijian <lizhijian@fujitsu.com>


> ---
> v2 replaces "RDMA/rxe: Fix incorrect fencing"
>
>   drivers/infiniband/sw/rxe/rxe_req.c | 37 ++++++++++++++++++++++++-----
>   1 file changed, 31 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
> index 9d98237389cf..e8a1664a40eb 100644
> --- a/drivers/infiniband/sw/rxe/rxe_req.c
> +++ b/drivers/infiniband/sw/rxe/rxe_req.c
> @@ -161,16 +161,36 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
>   		     (wqe->state != wqe_state_processing)))
>   		return NULL;
>   
> -	if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) &&
> -						     (index != cons))) {
> -		qp->req.wait_fence = 1;
> -		return NULL;
> -	}
> -
>   	wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
>   	return wqe;
>   }
>   
> +/**
> + * rxe_wqe_is_fenced - check if next wqe is fenced
> + * @qp: the queue pair
> + * @wqe: the next wqe
> + *
> + * Returns: 1 if wqe needs to wait
> + *	    0 if wqe is ready to go
> + */
> +static int rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
> +{
> +	/* Local invalidate fence (LIF) see IBA 10.6.5.1
> +	 * Requires ALL previous operations on the send queue
> +	 * are complete. Make mandatory for the rxe driver.
> +	 */
> +	if (wqe->wr.opcode == IB_WR_LOCAL_INV)
> +		return qp->req.wqe_index != queue_get_consumer(qp->sq.queue,
> +						QUEUE_TYPE_FROM_CLIENT);
> +
> +	/* Fence see IBA 10.8.3.3
> +	 * Requires that all previous read and atomic operations
> +	 * are complete.
> +	 */
> +	return (wqe->wr.send_flags & IB_SEND_FENCE) &&
> +		atomic_read(&qp->req.rd_atomic) != qp->attr.max_rd_atomic;
> +}
> +
>   static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
>   {
>   	switch (opcode) {
> @@ -632,6 +652,11 @@ int rxe_requester(void *arg)
>   	if (unlikely(!wqe))
>   		goto exit;
>   
> +	if (rxe_wqe_is_fenced(qp, wqe)) {
> +		qp->req.wait_fence = 1;
> +		goto exit;
> +	}
> +
>   	if (wqe->mask & WR_LOCAL_OP_MASK) {
>   		ret = rxe_do_local_ops(qp, wqe);
>   		if (unlikely(ret))
diff mbox series

Patch

diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 9d98237389cf..e8a1664a40eb 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -161,16 +161,36 @@  static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
 		     (wqe->state != wqe_state_processing)))
 		return NULL;
 
-	if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) &&
-						     (index != cons))) {
-		qp->req.wait_fence = 1;
-		return NULL;
-	}
-
 	wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
 	return wqe;
 }
 
+/**
+ * rxe_wqe_is_fenced - check if next wqe is fenced
+ * @qp: the queue pair
+ * @wqe: the next wqe
+ *
+ * Returns: 1 if wqe needs to wait
+ *	    0 if wqe is ready to go
+ */
+static int rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+	/* Local invalidate fence (LIF) see IBA 10.6.5.1
+	 * Requires ALL previous operations on the send queue
+	 * are complete. Make mandatory for the rxe driver.
+	 */
+	if (wqe->wr.opcode == IB_WR_LOCAL_INV)
+		return qp->req.wqe_index != queue_get_consumer(qp->sq.queue,
+						QUEUE_TYPE_FROM_CLIENT);
+
+	/* Fence see IBA 10.8.3.3
+	 * Requires that all previous read and atomic operations
+	 * are complete.
+	 */
+	return (wqe->wr.send_flags & IB_SEND_FENCE) &&
+		atomic_read(&qp->req.rd_atomic) != qp->attr.max_rd_atomic;
+}
+
 static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
 {
 	switch (opcode) {
@@ -632,6 +652,11 @@  int rxe_requester(void *arg)
 	if (unlikely(!wqe))
 		goto exit;
 
+	if (rxe_wqe_is_fenced(qp, wqe)) {
+		qp->req.wait_fence = 1;
+		goto exit;
+	}
+
 	if (wqe->mask & WR_LOCAL_OP_MASK) {
 		ret = rxe_do_local_ops(qp, wqe);
 		if (unlikely(ret))