Message ID | 20221021200118.2163-11-rpearsonhpe@gmail.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Jason Gunthorpe |
Headers | show |
Series | Implement work queues for rdma_rxe | expand |
Hi Bob, Thank you for the patch! Perhaps something to improve: [auto build test WARNING on 9abf2313adc1ca1b6180c508c25f22f9395cc780] url: https://github.com/intel-lab-lkp/linux/commits/Bob-Pearson/Implement-work-queues-for-rdma_rxe/20221022-040425 base: 9abf2313adc1ca1b6180c508c25f22f9395cc780 patch link: https://lore.kernel.org/r/20221021200118.2163-11-rpearsonhpe%40gmail.com patch subject: [PATCH for-next v2 10/18] RDMA/rxe: Handle qp error in rxe_resp.c config: parisc-allyesconfig compiler: hppa-linux-gcc (GCC) 12.1.0 reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # https://github.com/intel-lab-lkp/linux/commit/89164294e45c7434c93b957bc31354f8057ed6b5 git remote add linux-review https://github.com/intel-lab-lkp/linux git fetch --no-tags linux-review Bob-Pearson/Implement-work-queues-for-rdma_rxe/20221022-040425 git checkout 89164294e45c7434c93b957bc31354f8057ed6b5 # save the config file mkdir build_dir && cp config build_dir/.config COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=parisc SHELL=/bin/bash drivers/infiniband/sw/rxe/ If you fix the issue, kindly add following tag where applicable | Reported-by: kernel test robot <lkp@intel.com> All warnings (new ones prefixed by >>): >> drivers/infiniband/sw/rxe/rxe_resp.c:1253:5: warning: no previous prototype for 'complete_flush' [-Wmissing-prototypes] 1253 | int complete_flush(struct rxe_qp *qp, struct rxe_recv_wqe *wqe) | ^~~~~~~~~~~~~~ vim +/complete_flush +1253 drivers/infiniband/sw/rxe/rxe_resp.c 1252 > 1253 int complete_flush(struct rxe_qp *qp, struct rxe_recv_wqe *wqe) 1254 { 1255 struct rxe_cqe cqe; 1256 struct ib_wc *wc = &cqe.ibwc; 1257 struct ib_uverbs_wc *uwc = &cqe.uibwc; 1258 1259 memset(&cqe, 0, sizeof(cqe)); 1260 1261 if (qp->rcq->is_user) { 1262 uwc->status = IB_WC_WR_FLUSH_ERR; 1263 uwc->qp_num = qp->ibqp.qp_num; 1264 uwc->wr_id = wqe->wr_id; 1265 } else { 1266 wc->status = IB_WC_WR_FLUSH_ERR; 1267 wc->qp = &qp->ibqp; 1268 wc->wr_id = wqe->wr_id; 1269 } 1270 1271 if (rxe_cq_post(qp->rcq, &cqe, 0)) 1272 return -ENOMEM; 1273 1274 return 0; 1275 } 1276
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index dd11dea70bbf..0bcdd1154641 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -1025,7 +1025,6 @@ static enum resp_states do_complete(struct rxe_qp *qp, return RESPST_CLEANUP; } - static int send_common_ack(struct rxe_qp *qp, u8 syndrome, u32 psn, int opcode, const char *msg) { @@ -1240,22 +1239,56 @@ static enum resp_states do_class_d1e_error(struct rxe_qp *qp) } } -static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify) +static void rxe_drain_req_pkts(struct rxe_qp *qp) { struct sk_buff *skb; - struct rxe_queue *q = qp->rq.queue; while ((skb = skb_dequeue(&qp->req_pkts))) { rxe_put(qp); kfree_skb(skb); ib_device_put(qp->ibqp.device); } +} + +int complete_flush(struct rxe_qp *qp, struct rxe_recv_wqe *wqe) +{ + struct rxe_cqe cqe; + struct ib_wc *wc = &cqe.ibwc; + struct ib_uverbs_wc *uwc = &cqe.uibwc; + + memset(&cqe, 0, sizeof(cqe)); - if (notify) - return; + if (qp->rcq->is_user) { + uwc->status = IB_WC_WR_FLUSH_ERR; + uwc->qp_num = qp->ibqp.qp_num; + uwc->wr_id = wqe->wr_id; + } else { + wc->status = IB_WC_WR_FLUSH_ERR; + wc->qp = &qp->ibqp; + wc->wr_id = wqe->wr_id; + } - while (!qp->srq && q && queue_head(q, q->type)) + if (rxe_cq_post(qp->rcq, &cqe, 0)) + return -ENOMEM; + + return 0; +} + +/* drain the receive queue. Complete each wqe with a flush error + * if notify is true or until a cq overflow occurs. + */ +static void rxe_drain_recv_queue(struct rxe_qp *qp, bool notify) +{ + struct rxe_recv_wqe *wqe; + struct rxe_queue *q = qp->rq.queue; + + while ((wqe = queue_head(q, q->type))) { + if (notify && complete_flush(qp, wqe)) + notify = 0; queue_advance_consumer(q, q->type); + } + + qp->resp.wqe = NULL; } int rxe_responder(void *arg) @@ -1264,6 +1297,7 @@ int rxe_responder(void *arg) struct rxe_dev *rxe = to_rdev(qp->ibqp.device); enum resp_states state; struct rxe_pkt_info *pkt = NULL; + bool notify; int ret; if (!rxe_get(qp)) @@ -1271,20 +1305,16 @@ int rxe_responder(void *arg) qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED; - if (!qp->valid) - goto exit; - - switch (qp->resp.state) { - case QP_STATE_RESET: - rxe_drain_req_pkts(qp, false); - qp->resp.wqe = NULL; + if (!qp->valid || qp->resp.state == QP_STATE_ERROR || + qp->resp.state == QP_STATE_RESET) { + notify = qp->valid && (qp->resp.state == QP_STATE_ERROR); + rxe_drain_req_pkts(qp); + rxe_drain_recv_queue(qp, notify); goto exit; - - default: - state = RESPST_GET_REQ; - break; } + state = RESPST_GET_REQ; + while (1) { pr_debug("qp#%d state = %s\n", qp_num(qp), resp_state_name[state]);