diff mbox series

[1/1] IB: mlx4: move the variable into the function

Message ID 20190319080023.8609-1-yanjun.zhu@oracle.com (mailing list archive)
State Rejected
Headers show
Series [1/1] IB: mlx4: move the variable into the function | expand

Commit Message

Zhu Yanjun March 19, 2019, 8 a.m. UTC
The variable cur_qp is used in the function mlx4_ib_poll_one. Outside
of this function, this variable is not used. So this variable is moved
into this function.

Signed-off-by: Zhu Yanjun <yanjun.zhu@oracle.com>
---
 drivers/infiniband/hw/mlx4/cq.c | 29 ++++++++++++++---------------
 1 file changed, 14 insertions(+), 15 deletions(-)

Comments

Leon Romanovsky March 19, 2019, 9:18 a.m. UTC | #1
On Tue, Mar 19, 2019 at 04:00:23AM -0400, Zhu Yanjun wrote:
> The variable cur_qp is used in the function mlx4_ib_poll_one. Outside
> of this function, this variable is not used. So this variable is moved
> into this function.
>
> Signed-off-by: Zhu Yanjun <yanjun.zhu@oracle.com>
> ---
>  drivers/infiniband/hw/mlx4/cq.c | 29 ++++++++++++++---------------
>  1 file changed, 14 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
> index 43512347b4f0..6cedca504873 100644
> --- a/drivers/infiniband/hw/mlx4/cq.c
> +++ b/drivers/infiniband/hw/mlx4/cq.c
> @@ -669,7 +669,6 @@ static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
>  }
>
>  static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
> -			    struct mlx4_ib_qp **cur_qp,
>  			    struct ib_wc *wc)
>  {
>  	struct mlx4_cqe *cqe;
> @@ -683,6 +682,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
>  	u32 g_mlpath_rqpn;
>  	u16 wqe_ctr;
>  	unsigned tail = 0;
> +	struct mlx4_ib_qp *cur_qp = NULL;
>
>  repoll:
>  	cqe = next_cqe_sw(cq);
> @@ -720,8 +720,9 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
>  		goto repoll;
>  	}
>
> -	if (!*cur_qp ||
> -	    (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
> +	if (!cur_qp ||
> +	    (be32_to_cpu(cqe->vlan_my_qpn) &
> +	     MLX4_CQE_QPN_MASK) != cur_qp->mqp.qpn) {
>  		/*
>  		 * We do not have to take the QP table lock here,
>  		 * because CQs will be locked while QPs are removed
> @@ -729,10 +729,10 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
>  		 */
>  		mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
>  				       be32_to_cpu(cqe->vlan_my_qpn));
> -		*cur_qp = to_mibqp(mqp);
> +		cur_qp = to_mibqp(mqp);
>  	}
>
> -	wc->qp = &(*cur_qp)->ibqp;
> +	wc->qp = &cur_qp->ibqp;
>
>  	if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
>  		u32 srq_num;
> @@ -744,15 +744,15 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
>  	}
>
>  	if (is_send) {
> -		wq = &(*cur_qp)->sq;
> -		if (!(*cur_qp)->sq_signal_bits) {
> +		wq = &cur_qp->sq;
> +		if (!cur_qp->sq_signal_bits) {
>  			wqe_ctr = be16_to_cpu(cqe->wqe_index);
>  			wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
>  		}
>  		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
>  		++wq->tail;
> -	} else if ((*cur_qp)->ibqp.srq) {
> -		srq = to_msrq((*cur_qp)->ibqp.srq);
> +	} else if (cur_qp->ibqp.srq) {
> +		srq = to_msrq(cur_qp->ibqp.srq);
>  		wqe_ctr = be16_to_cpu(cqe->wqe_index);
>  		wc->wr_id = srq->wrid[wqe_ctr];
>  		mlx4_ib_free_srq_wqe(srq, wqe_ctr);
> @@ -762,7 +762,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
>  		wc->wr_id = srq->wrid[wqe_ctr];
>  		mlx4_ib_free_srq_wqe(srq, wqe_ctr);
>  	} else {
> -		wq	  = &(*cur_qp)->rq;
> +		wq	  = &cur_qp->rq;
>  		tail	  = wq->tail & (wq->wqe_cnt - 1);
>  		wc->wr_id = wq->wrid[tail];
>  		++wq->tail;
> @@ -847,13 +847,13 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
>  		}
>
>  		is_eth = (rdma_port_get_link_layer(wc->qp->device,
> -						  (*cur_qp)->port) ==
> +						  cur_qp->port) ==
>  			  IB_LINK_LAYER_ETHERNET);
>  		if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
> -			if ((*cur_qp)->mlx4_ib_qp_type &
> +			if (cur_qp->mlx4_ib_qp_type &
>  			    (MLX4_IB_QPT_PROXY_SMI_OWNER |
>  			     MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
> -				use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
> +				use_tunnel_data(cur_qp, cq, wc, tail, cqe,
>  						is_eth);
>  				return 0;
>  			}
> @@ -891,7 +891,6 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
>  int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
>  {
>  	struct mlx4_ib_cq *cq = to_mcq(ibcq);
> -	struct mlx4_ib_qp *cur_qp = NULL;
>  	unsigned long flags;
>  	int npolled;
>  	struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
> @@ -903,7 +902,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
>  	}
>
>  	for (npolled = 0; npolled < num_entries; ++npolled) {
> -		if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled))
> +		if (mlx4_ib_poll_one(cq, wc + npolled))

The commit message and patch are incorrect. cur_qp is used here as
"global variable" which is preserved during for loop.

Thanks

>  			break;
>  	}
>
> --
> 2.17.1
>
Zhu Yanjun March 19, 2019, 9:44 a.m. UTC | #2
@@ -683,6 +682,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
      u32 g_mlpath_rqpn;
      u16 wqe_ctr;
      unsigned tail = 0;
+    static struct mlx4_ib_qp *cur_qp = NULL;

Thanks a lot.

If a static is added, now can it get the same effect and make the source 
code compact?


  repoll:
      cqe = next_cqe_sw(cq);
@@ -720,8 +720,9 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,

On 2019/3/19 17:18, Leon Romanovsky wrote:
> The commit message and patch are incorrect. cur_qp is used here as
> "global variable" which is preserved during for loop.


Thanks. Probably a static should fix this. If you agree, I will send V2.


>
> Thanks
Leon Romanovsky March 19, 2019, 1:21 p.m. UTC | #3
On Tue, Mar 19, 2019 at 05:44:26PM +0800, Yanjun Zhu wrote:
> @@ -683,6 +682,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
>      u32 g_mlpath_rqpn;
>      u16 wqe_ctr;
>      unsigned tail = 0;
> +    static struct mlx4_ib_qp *cur_qp = NULL;
>
> Thanks a lot.
>
> If a static is added, now can it get the same effect and make the source
> code compact?

It will break consecutive calls to mlx4_ib_poll_cq(), because cur_qp
will have "old" value from previous call.

>
>
>  repoll:
>      cqe = next_cqe_sw(cq);
> @@ -720,8 +720,9 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
>
> On 2019/3/19 17:18, Leon Romanovsky wrote:
> > The commit message and patch are incorrect. cur_qp is used here as
> > "global variable" which is preserved during for loop.
>
>
> Thanks. Probably a static should fix this. If you agree, I will send V2.

No, it won't fix.

>
>
> >
> > Thanks
Zhu Yanjun March 20, 2019, 1:56 a.m. UTC | #4
On 2019/3/19 21:21, Leon Romanovsky wrote:
> On Tue, Mar 19, 2019 at 05:44:26PM +0800, Yanjun Zhu wrote:
>> @@ -683,6 +682,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
>>       u32 g_mlpath_rqpn;
>>       u16 wqe_ctr;
>>       unsigned tail = 0;
>> +    static struct mlx4_ib_qp *cur_qp = NULL;
>>
>> Thanks a lot.
>>
>> If a static is added, now can it get the same effect and make the source
>> code compact?
> It will break consecutive calls to mlx4_ib_poll_cq(), because cur_qp
> will have "old" value from previous call.

Yeah. I agree with you. I have also realized this problem. Thanks. 
Please ignore this patch.

Zhu Yanjun

>
>>
>>   repoll:
>>       cqe = next_cqe_sw(cq);
>> @@ -720,8 +720,9 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
>>
>> On 2019/3/19 17:18, Leon Romanovsky wrote:
>>> The commit message and patch are incorrect. cur_qp is used here as
>>> "global variable" which is preserved during for loop.
>>
>> Thanks. Probably a static should fix this. If you agree, I will send V2.
> No, it won't fix.
>
>>
>>> Thanks
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 43512347b4f0..6cedca504873 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -669,7 +669,6 @@  static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
 }
 
 static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
-			    struct mlx4_ib_qp **cur_qp,
 			    struct ib_wc *wc)
 {
 	struct mlx4_cqe *cqe;
@@ -683,6 +682,7 @@  static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
 	u32 g_mlpath_rqpn;
 	u16 wqe_ctr;
 	unsigned tail = 0;
+	struct mlx4_ib_qp *cur_qp = NULL;
 
 repoll:
 	cqe = next_cqe_sw(cq);
@@ -720,8 +720,9 @@  static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
 		goto repoll;
 	}
 
-	if (!*cur_qp ||
-	    (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
+	if (!cur_qp ||
+	    (be32_to_cpu(cqe->vlan_my_qpn) &
+	     MLX4_CQE_QPN_MASK) != cur_qp->mqp.qpn) {
 		/*
 		 * We do not have to take the QP table lock here,
 		 * because CQs will be locked while QPs are removed
@@ -729,10 +729,10 @@  static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
 		 */
 		mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
 				       be32_to_cpu(cqe->vlan_my_qpn));
-		*cur_qp = to_mibqp(mqp);
+		cur_qp = to_mibqp(mqp);
 	}
 
-	wc->qp = &(*cur_qp)->ibqp;
+	wc->qp = &cur_qp->ibqp;
 
 	if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
 		u32 srq_num;
@@ -744,15 +744,15 @@  static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
 	}
 
 	if (is_send) {
-		wq = &(*cur_qp)->sq;
-		if (!(*cur_qp)->sq_signal_bits) {
+		wq = &cur_qp->sq;
+		if (!cur_qp->sq_signal_bits) {
 			wqe_ctr = be16_to_cpu(cqe->wqe_index);
 			wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
 		}
 		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
 		++wq->tail;
-	} else if ((*cur_qp)->ibqp.srq) {
-		srq = to_msrq((*cur_qp)->ibqp.srq);
+	} else if (cur_qp->ibqp.srq) {
+		srq = to_msrq(cur_qp->ibqp.srq);
 		wqe_ctr = be16_to_cpu(cqe->wqe_index);
 		wc->wr_id = srq->wrid[wqe_ctr];
 		mlx4_ib_free_srq_wqe(srq, wqe_ctr);
@@ -762,7 +762,7 @@  static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
 		wc->wr_id = srq->wrid[wqe_ctr];
 		mlx4_ib_free_srq_wqe(srq, wqe_ctr);
 	} else {
-		wq	  = &(*cur_qp)->rq;
+		wq	  = &cur_qp->rq;
 		tail	  = wq->tail & (wq->wqe_cnt - 1);
 		wc->wr_id = wq->wrid[tail];
 		++wq->tail;
@@ -847,13 +847,13 @@  static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
 		}
 
 		is_eth = (rdma_port_get_link_layer(wc->qp->device,
-						  (*cur_qp)->port) ==
+						  cur_qp->port) ==
 			  IB_LINK_LAYER_ETHERNET);
 		if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
-			if ((*cur_qp)->mlx4_ib_qp_type &
+			if (cur_qp->mlx4_ib_qp_type &
 			    (MLX4_IB_QPT_PROXY_SMI_OWNER |
 			     MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
-				use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
+				use_tunnel_data(cur_qp, cq, wc, tail, cqe,
 						is_eth);
 				return 0;
 			}
@@ -891,7 +891,6 @@  static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 {
 	struct mlx4_ib_cq *cq = to_mcq(ibcq);
-	struct mlx4_ib_qp *cur_qp = NULL;
 	unsigned long flags;
 	int npolled;
 	struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
@@ -903,7 +902,7 @@  int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 	}
 
 	for (npolled = 0; npolled < num_entries; ++npolled) {
-		if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled))
+		if (mlx4_ib_poll_one(cq, wc + npolled))
 			break;
 	}