@@ -211,6 +211,7 @@ struct hns_roce_qp {
struct hns_roce_wq sq;
struct hns_roce_wq rq;
uint32_t *rdb;
+ uint32_t *sdb;
struct hns_roce_sge_ex sge;
unsigned int next_sge;
int port_num;
@@ -237,6 +237,9 @@ static void hns_roce_v2_clear_qp(struct hns_roce_context *ctx, uint32_t qpn)
ctx->qp_table[tind].table[qpn & ctx->qp_table_mask] = NULL;
}
+static int hns_roce_u_v2_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+ int attr_mask);
+
static int hns_roce_v2_poll_one(struct hns_roce_cq *cq,
struct hns_roce_qp **cur_qp, struct ibv_wc *wc)
{
@@ -248,6 +251,9 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *cq,
struct hns_roce_v2_cqe *cqe = NULL;
struct hns_roce_rinl_sge *sge_list;
uint32_t opcode;
+ struct ibv_qp_attr attr;
+ int attr_mask;
+ int ret;
/* According to CI, find the relative cqe */
cqe = next_cqe_sw_v2(cq);
@@ -314,6 +320,17 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *cq,
if (roce_get_field(cqe->byte_4, CQE_BYTE_4_STATUS_M,
CQE_BYTE_4_STATUS_S) != HNS_ROCE_V2_CQE_SUCCESS) {
hns_roce_v2_handle_error_cqe(cqe, wc);
+
+ /* flush cqe */
+ if ((wc->status != IBV_WC_SUCCESS) &&
+ (wc->status != IBV_WC_WR_FLUSH_ERR)) {
+ attr_mask = IBV_QP_STATE;
+ attr.qp_state = IBV_QPS_ERR;
+ ret = hns_roce_u_v2_modify_qp(&(*cur_qp)->ibv_qp,
+ &attr, attr_mask);
+ if (ret)
+ return ret;
+ }
return V2_CQ_OK;
}
@@ -533,6 +550,8 @@ static int hns_roce_u_v2_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr,
struct hns_roce_context *ctx = to_hr_ctx(ibvqp->context);
struct hns_roce_rc_sq_wqe *rc_sq_wqe;
struct hns_roce_v2_wqe_data_seg *dseg;
+ struct ibv_qp_attr attr;
+ int attr_mask;
pthread_spin_lock(&qp->sq.lock);
@@ -760,7 +779,22 @@ out:
hns_roce_update_sq_db(ctx, qp->ibv_qp.qp_num, qp->sl,
qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
+ if (qp->flags & HNS_ROCE_SUPPORT_SQ_RECORD_DB)
+ *(qp->sdb) = qp->sq.head & 0xffff;
+
qp->next_sge = ind_sge;
+
+ if (ibvqp->state == IBV_QPS_ERR) {
+ attr_mask = IBV_QP_STATE;
+ attr.qp_state = IBV_QPS_ERR;
+
+ ret = hns_roce_u_v2_modify_qp(ibvqp, &attr, attr_mask);
+ if (ret) {
+ pthread_spin_unlock(&qp->sq.lock);
+ *bad_wr = wr;
+ return ret;
+ }
+ }
}
pthread_spin_unlock(&qp->sq.lock);
@@ -778,6 +812,8 @@ static int hns_roce_u_v2_post_recv(struct ibv_qp *ibvqp, struct ibv_recv_wr *wr,
struct hns_roce_context *ctx = to_hr_ctx(ibvqp->context);
struct hns_roce_v2_wqe_data_seg *dseg;
struct hns_roce_rinl_sge *sge_list;
+ struct ibv_qp_attr attr;
+ int attr_mask;
void *wqe;
int i;
@@ -848,6 +884,18 @@ out:
else
hns_roce_update_rq_db(ctx, qp->ibv_qp.qp_num,
qp->rq.head & ((qp->rq.wqe_cnt << 1) - 1));
+
+ if (ibvqp->state == IBV_QPS_ERR) {
+ attr_mask = IBV_QP_STATE;
+ attr.qp_state = IBV_QPS_ERR;
+
+ ret = hns_roce_u_v2_modify_qp(ibvqp, &attr, attr_mask);
+ if (ret) {
+ pthread_spin_unlock(&qp->rq.lock);
+ *bad_wr = wr;
+ return ret;
+ }
+ }
}
pthread_spin_unlock(&qp->rq.lock);
@@ -991,6 +1039,9 @@ static int hns_roce_u_v2_destroy_qp(struct ibv_qp *ibqp)
if (qp->rq.max_gs)
hns_roce_free_db(to_hr_ctx(ibqp->context), qp->rdb,
HNS_ROCE_QP_TYPE_DB);
+ if (qp->sq.max_gs)
+ hns_roce_free_db(to_hr_ctx(ibqp->context), qp->sdb,
+ HNS_ROCE_QP_TYPE_DB);
hns_roce_free_buf(&qp->buf);
if (qp->rq_rinl_buf.wqe_list) {
@@ -42,6 +42,7 @@
enum {
HNS_ROCE_SUPPORT_RQ_RECORD_DB = 1 << 0,
+ HNS_ROCE_SUPPORT_SQ_RECORD_DB = 1 << 1,
};
enum {
@@ -573,7 +573,7 @@ struct ibv_qp *hns_roce_u_create_qp(struct ibv_pd *pd,
if (hns_roce_alloc_qp_buf(pd, &attr->cap, attr->qp_type, qp)) {
fprintf(stderr, "hns_roce_alloc_qp_buf failed!\n");
- goto err;
+ goto err_buf;
}
hns_roce_init_qp_indices(qp);
@@ -585,10 +585,21 @@ struct ibv_qp *hns_roce_u_create_qp(struct ibv_pd *pd,
}
if ((to_hr_dev(pd->context->device)->hw_version != HNS_ROCE_HW_VER1) &&
+ attr->cap.max_send_sge) {
+ qp->sdb = hns_roce_alloc_db(context, HNS_ROCE_QP_TYPE_DB);
+ if (!qp->sdb)
+ goto err_free;
+
+ *(qp->sdb) = 0;
+ cmd.sdb_addr = (uintptr_t)qp->sdb;
+ } else
+ cmd.sdb_addr = 0;
+
+ if ((to_hr_dev(pd->context->device)->hw_version != HNS_ROCE_HW_VER1) &&
attr->cap.max_recv_sge) {
qp->rdb = hns_roce_alloc_db(context, HNS_ROCE_QP_TYPE_DB);
if (!qp->rdb)
- goto err_free;
+ goto err_sq_db;
*(qp->rdb) = 0;
cmd.db_addr = (uintptr_t) qp->rdb;
@@ -643,13 +654,18 @@ err_rq_db:
attr->cap.max_recv_sge)
hns_roce_free_db(context, qp->rdb, HNS_ROCE_QP_TYPE_DB);
+err_sq_db:
+ if ((to_hr_dev(pd->context->device)->hw_version != HNS_ROCE_HW_VER1) &&
+ attr->cap.max_send_sge)
+ hns_roce_free_db(context, qp->sdb, HNS_ROCE_QP_TYPE_DB);
+
err_free:
free(qp->sq.wrid);
if (qp->rq.wqe_cnt)
free(qp->rq.wrid);
hns_roce_free_buf(&qp->buf);
-err:
+err_buf:
free(qp);
return NULL;
The cqe should be flushed error completion status if an related error is detected while poll cqe, post send or post recv. Record doorbell is used to notify the head pointer of sq and rq to the kernel. Signed-off-by: Yixian Liu <liuyixian@huawei.com> --- v1->v2: 1. Remove unnecessary prints. --- --- providers/hns/hns_roce_u.h | 1 + providers/hns/hns_roce_u_hw_v2.c | 51 ++++++++++++++++++++++++++++++++++++++++ providers/hns/hns_roce_u_hw_v2.h | 1 + providers/hns/hns_roce_u_verbs.c | 22 ++++++++++++++--- 4 files changed, 72 insertions(+), 3 deletions(-)