Message ID | 20221027185510.33808-15-rpearsonhpe@gmail.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | RDMA/rxe: Enable scatter/gather support for skbs | expand |
Hi Bob, Thank you for the patch! Perhaps something to improve: [auto build test WARNING on c9eeabac5e8d27a3f40280908e089058bab39edb] url: https://github.com/intel-lab-lkp/linux/commits/Bob-Pearson/RDMA-rxe-Enable-scatter-gather-support-for-skbs/20221028-025839 base: c9eeabac5e8d27a3f40280908e089058bab39edb patch link: https://lore.kernel.org/r/20221027185510.33808-15-rpearsonhpe%40gmail.com patch subject: [PATCH for-next 14/17] RDMA/rxe: Extend response packets for frags config: arm64-allyesconfig compiler: clang version 16.0.0 (https://github.com/llvm/llvm-project 791a7ae1ba3efd6bca96338e10ffde557ba83920) reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # install arm64 cross compiling tool for clang build # apt-get install binutils-aarch64-linux-gnu # https://github.com/intel-lab-lkp/linux/commit/36b2d4a5d83e1c9256d3b5fb3ce4989cac1e5ce9 git remote add linux-review https://github.com/intel-lab-lkp/linux git fetch --no-tags linux-review Bob-Pearson/RDMA-rxe-Enable-scatter-gather-support-for-skbs/20221028-025839 git checkout 36b2d4a5d83e1c9256d3b5fb3ce4989cac1e5ce9 # save the config file mkdir build_dir && cp config build_dir/.config COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=arm64 SHELL=/bin/bash drivers/infiniband/sw/rxe/ If you fix the issue, kindly add following tag where applicable | Reported-by: kernel test robot <lkp@intel.com> All warnings (new ones prefixed by >>): >> drivers/infiniband/sw/rxe/rxe_resp.c:833:6: warning: variable 'state' is used uninitialized whenever 'if' condition is true [-Wsometimes-uninitialized] if (err) ^~~ drivers/infiniband/sw/rxe/rxe_resp.c:868:9: note: uninitialized use occurs here return state; ^~~~~ drivers/infiniband/sw/rxe/rxe_resp.c:833:2: note: remove the 'if' if its condition is always false if (err) ^~~~~~~~ drivers/infiniband/sw/rxe/rxe_resp.c:771:2: note: variable 'state' is declared here enum resp_states state; ^ 1 warning generated. vim +833 drivers/infiniband/sw/rxe/rxe_resp.c 761 762 /* RDMA read response. If res is not NULL, then we have a current RDMA request 763 * being processed or replayed. 764 */ 765 static enum resp_states read_reply(struct rxe_qp *qp, 766 struct rxe_pkt_info *req_pkt) 767 { 768 struct rxe_pkt_info ack_pkt; 769 struct sk_buff *skb; 770 int mtu = qp->mtu; 771 enum resp_states state; 772 int payload; 773 int opcode; 774 int err; 775 struct resp_res *res = qp->resp.res; 776 struct rxe_mr *mr; 777 int skb_offset = 0; 778 bool frag; 779 enum rxe_mr_copy_op op; 780 781 if (!res) { 782 res = rxe_prepare_res(qp, req_pkt, RXE_READ_MASK); 783 qp->resp.res = res; 784 } 785 786 if (res->state == rdatm_res_state_new) { 787 if (!res->replay) { 788 mr = qp->resp.mr; 789 qp->resp.mr = NULL; 790 } else { 791 mr = rxe_recheck_mr(qp, res->read.rkey); 792 if (!mr) { 793 state = RESPST_ERR_RKEY_VIOLATION; 794 goto err_out; 795 } 796 } 797 798 if (res->read.resid <= mtu) 799 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY; 800 else 801 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST; 802 } else { 803 mr = rxe_recheck_mr(qp, res->read.rkey); 804 if (!mr) { 805 state = RESPST_ERR_RKEY_VIOLATION; 806 goto err_out; 807 } 808 809 if (res->read.resid > mtu) 810 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE; 811 else 812 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST; 813 } 814 815 payload = min_t(int, res->read.resid, mtu); 816 817 skb = prepare_ack_packet(qp, &ack_pkt, opcode, payload, 818 res->cur_psn, AETH_ACK_UNLIMITED, &frag); 819 if (!skb) { 820 state = RESPST_ERR_RNR; 821 goto err_put_mr; 822 } 823 824 op = frag ? RXE_FRAG_FROM_MR : RXE_COPY_FROM_MR; 825 err = rxe_copy_mr_data(skb, mr, res->read.va, payload_addr(&ack_pkt), 826 skb_offset, payload, op); 827 if (err) { 828 state = RESPST_ERR_RKEY_VIOLATION; 829 goto err_free_skb; 830 } 831 832 err = rxe_prepare_pad_icrc(&ack_pkt, skb, payload, frag); > 833 if (err) 834 goto err_free_skb; 835 836 err = rxe_xmit_packet(qp, &ack_pkt, skb); 837 if (err) { 838 /* rxe_xmit_packet will consume the packet */ 839 state = RESPST_ERR_RNR; 840 goto err_put_mr; 841 } 842 843 res->read.va += payload; 844 res->read.resid -= payload; 845 res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK; 846 847 if (res->read.resid > 0) { 848 state = RESPST_DONE; 849 } else { 850 qp->resp.res = NULL; 851 if (!res->replay) 852 qp->resp.opcode = -1; 853 if (psn_compare(res->cur_psn, qp->resp.psn) >= 0) 854 qp->resp.psn = res->cur_psn; 855 state = RESPST_CLEANUP; 856 } 857 858 /* keep these after all error exits */ 859 res->state = rdatm_res_state_next; 860 rxe_put(mr); 861 return state; 862 863 err_free_skb: 864 kfree_skb(skb); 865 err_put_mr: 866 rxe_put(mr); 867 err_out: 868 return state; 869 } 870
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 8868415b71b6..79dcd0f37140 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -660,10 +660,8 @@ static enum resp_states atomic_reply(struct rxe_qp *qp, static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, struct rxe_pkt_info *ack, - int opcode, - int payload, - u32 psn, - u8 syndrome) + int opcode, int payload, u32 psn, + u8 syndrome, bool *fragp) { struct rxe_dev *rxe = to_rdev(qp->ibqp.device); struct sk_buff *skb; @@ -682,7 +680,7 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, ack->psn = psn; ack->port_num = 1; - skb = rxe_init_packet(qp, &qp->pri_av, ack, NULL); + skb = rxe_init_packet(qp, &qp->pri_av, ack, fragp); if (!skb) return NULL; @@ -698,12 +696,14 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, atmack_set_orig(ack, qp->resp.res->atomic.orig_val); err = rxe_prepare(&qp->pri_av, ack, skb); - if (err) { - kfree_skb(skb); - return NULL; - } + if (err) + goto err_free_skb; return skb; + +err_free_skb: + kfree_skb(skb); + return NULL; } /** @@ -775,6 +775,8 @@ static enum resp_states read_reply(struct rxe_qp *qp, struct resp_res *res = qp->resp.res; struct rxe_mr *mr; int skb_offset = 0; + bool frag; + enum rxe_mr_copy_op op; if (!res) { res = rxe_prepare_res(qp, req_pkt, RXE_READ_MASK); @@ -787,8 +789,10 @@ static enum resp_states read_reply(struct rxe_qp *qp, qp->resp.mr = NULL; } else { mr = rxe_recheck_mr(qp, res->read.rkey); - if (!mr) - return RESPST_ERR_RKEY_VIOLATION; + if (!mr) { + state = RESPST_ERR_RKEY_VIOLATION; + goto err_out; + } } if (res->read.resid <= mtu) @@ -797,8 +801,10 @@ static enum resp_states read_reply(struct rxe_qp *qp, opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST; } else { mr = rxe_recheck_mr(qp, res->read.rkey); - if (!mr) - return RESPST_ERR_RKEY_VIOLATION; + if (!mr) { + state = RESPST_ERR_RKEY_VIOLATION; + goto err_out; + } if (res->read.resid > mtu) opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE; @@ -806,35 +812,33 @@ static enum resp_states read_reply(struct rxe_qp *qp, opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST; } - res->state = rdatm_res_state_next; - payload = min_t(int, res->read.resid, mtu); skb = prepare_ack_packet(qp, &ack_pkt, opcode, payload, - res->cur_psn, AETH_ACK_UNLIMITED); - if (!skb) - return RESPST_ERR_RNR; + res->cur_psn, AETH_ACK_UNLIMITED, &frag); + if (!skb) { + state = RESPST_ERR_RNR; + goto err_put_mr; + } + op = frag ? RXE_FRAG_FROM_MR : RXE_COPY_FROM_MR; err = rxe_copy_mr_data(skb, mr, res->read.va, payload_addr(&ack_pkt), - skb_offset, payload, RXE_COPY_FROM_MR); + skb_offset, payload, op); if (err) { - kfree_skb(skb); - rxe_put(mr); - return RESPST_ERR_RKEY_VIOLATION; + state = RESPST_ERR_RKEY_VIOLATION; + goto err_free_skb; } - if (mr) - rxe_put(mr); - - if (bth_pad(&ack_pkt)) { - u8 *pad = payload_addr(&ack_pkt) + payload; - - memset(pad, 0, bth_pad(&ack_pkt)); - } + err = rxe_prepare_pad_icrc(&ack_pkt, skb, payload, frag); + if (err) + goto err_free_skb; err = rxe_xmit_packet(qp, &ack_pkt, skb); - if (err) - return RESPST_ERR_RNR; + if (err) { + /* rxe_xmit_packet will consume the packet */ + state = RESPST_ERR_RNR; + goto err_put_mr; + } res->read.va += payload; res->read.resid -= payload; @@ -851,6 +855,16 @@ static enum resp_states read_reply(struct rxe_qp *qp, state = RESPST_CLEANUP; } + /* keep these after all error exits */ + res->state = rdatm_res_state_next; + rxe_put(mr); + return state; + +err_free_skb: + kfree_skb(skb); +err_put_mr: + rxe_put(mr); +err_out: return state; } @@ -1041,14 +1055,19 @@ static int send_common_ack(struct rxe_qp *qp, u8 syndrome, u32 psn, int opcode, const char *msg) { int err; - struct rxe_pkt_info ack_pkt; + struct rxe_pkt_info ack; struct sk_buff *skb; + int payload = 0; - skb = prepare_ack_packet(qp, &ack_pkt, opcode, 0, psn, syndrome); + skb = prepare_ack_packet(qp, &ack, opcode, payload, + psn, syndrome, NULL); if (!skb) return -ENOMEM; - err = rxe_xmit_packet(qp, &ack_pkt, skb); + /* doesn't fail if frag == false */ + (void)rxe_prepare_pad_icrc(&ack, skb, payload, false); + + err = rxe_xmit_packet(qp, &ack, skb); if (err) pr_err_ratelimited("Failed sending %s\n", msg);
Extend prepare_ack_packet(), read_reply() and send_common_ack() in rxe_resp.c to support fragmented skbs. Adjust calls to these routines for the changed API. This is in preparation for using fragmented skbs. Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> --- drivers/infiniband/sw/rxe/rxe_resp.c | 89 +++++++++++++++++----------- 1 file changed, 54 insertions(+), 35 deletions(-)