From patchwork Tue Apr 9 11:47:31 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 10891139 X-Patchwork-Delegate: leon@leon.nu Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 021FF1669 for ; Tue, 9 Apr 2019 11:48:26 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id DFC4E28877 for ; Tue, 9 Apr 2019 11:48:25 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id D38FC28891; Tue, 9 Apr 2019 11:48:25 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-7.9 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id AFEB32888D for ; Tue, 9 Apr 2019 11:48:24 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726633AbfDILsY (ORCPT ); Tue, 9 Apr 2019 07:48:24 -0400 Received: from szxga06-in.huawei.com ([45.249.212.32]:43738 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726035AbfDILsX (ORCPT ); Tue, 9 Apr 2019 07:48:23 -0400 Received: from DGGEMS401-HUB.china.huawei.com (unknown [172.30.72.58]) by Forcepoint Email with ESMTP id 9D49550D88703A3BBCFA; Tue, 9 Apr 2019 19:47:26 +0800 (CST) Received: from linux-ioko.site (10.71.200.31) by DGGEMS401-HUB.china.huawei.com (10.3.19.201) with Microsoft SMTP Server id 14.3.408.0; Tue, 9 Apr 2019 19:47:20 +0800 From: Lijun Ou To: , CC: , , Subject: [PATCH for-next] RDMA/hns: Support querying qp&mr&pd with rdmatool Date: Tue, 9 Apr 2019 19:47:31 +0800 Message-ID: <1554810451-14715-1-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 1.9.1 MIME-Version: 1.0 X-Originating-IP: [10.71.200.31] X-CFilter-Loop: Reflected Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Rdmatool supports query qp/mr/pd/cm_id context info. Like these: ./rdma res show pd dev hns_0 local_dma_lkey 0x3793f080 users 2 unsafe_global_rkey 0x0 pid 3713 comm ib_send_bw drv_pdn 2 ... ./rdma res show qp link hns_0 lqpn 9 irqpn 8 type RC state RTS rq-psn 12596614 sq-psn 13512359 path-mig-state ARMED pid 3713 comm ib_send_bw mac_idx 0 drv_vid 4095 drv_mtu 3 drv_sgid_idx 2 drv_srqn 0 drv_srq_en 0 drv_chk_flg 0 drv_retry_cnt 7 drv_err_type 0 drv_flush_idx 0 drv_rq_pi 44192 drv_rq_ci 38 drv_rq_shift 9 drv_rq_cqeidx 38 drv_rq_rx_err 0 drv_rq_tx_err 0 drv_rq_rty_tx_err 0 drv_rq_db_doing 0 drv_rx_cqn 4 drv_sq_pi 0 drv_sq_ci 0 drv_sq_shift 7 drv_sq_maxidx 0 drv_sq_rx_err 0 drv_sq_tx_err 0 drv_sq_db_doing 0 drv_sq_tx_err 0 drv_tx_cqn 2 ... ./rdma res show mr dev hns_0 rkey 0x300 lkey 0x300 mrlen 392 pid 3713 comm ib_send_bw drv_status 0 drv_lkey 0 drv_size 0 drv_ra 0 drv_ri 0 drv_li 0 drv_atomic_en 0 drv_rr_en 0 drv_rw_en 0 drv_lw_en 0 drv_pbl_buf_pgsz 0 drv_len 0 Signed-off-by: chenglang Signed-off-by: Lijun Ou Reviewed-by: Yixian Liu --- --- drivers/infiniband/hw/hns/hns_roce_device.h | 4 + drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 + drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 5 + drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c | 56 ++++ drivers/infiniband/hw/hns/hns_roce_restrack.c | 393 +++++++++++++++++++++++++ 5 files changed, 460 insertions(+) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 563cf39..46165ed 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -870,6 +870,10 @@ struct hns_roce_work { struct hns_roce_dfx_hw { int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn, int *buffer); + int (*query_qpc_info)(struct hns_roce_dev *hr_dev, u32 qpn, + int *buffer); + int (*query_mpt_info)(struct hns_roce_dev *hr_dev, u32 key, + int *buffer); }; struct hns_roce_hw { diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 0e97182..7938864 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -6070,6 +6070,8 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = { .query_cqc_info = hns_roce_v2_query_cqc_info, + .query_qpc_info = hns_roce_v2_query_qpc_info, + .query_mpt_info = hns_roce_v2_query_mpt_info, }; static const struct ib_device_ops hns_roce_v2_dev_ops = { diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index edfdbe2..1f3df31 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -643,6 +643,7 @@ struct hns_roce_v2_qp_context { #define V2_QPC_BYTE_76_RQIE_S 28 #define V2_QPC_BYTE_76_RQ_VLAN_EN_S 30 +#define V2_QPC_BYTE_76_RQ_RTY_TX_ERR_S 31 #define V2_QPC_BYTE_80_RX_CQN_S 0 #define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0) @@ -1801,6 +1802,10 @@ struct hns_roce_sccc_clr_done { int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn, int *buffer); +int hns_roce_v2_query_qpc_info(struct hns_roce_dev *hr_dev, u32 qpn, + int *buffer); +int hns_roce_v2_query_mpt_info(struct hns_roce_dev *hr_dev, u32 key, + int *buffer); static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2], void __iomem *dest) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c index 5a97b5a..2c4adad 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c @@ -33,3 +33,59 @@ int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn, return ret; } + +int hns_roce_v2_query_qpc_info(struct hns_roce_dev *hr_dev, u32 qpn, + int *buffer) +{ + struct hns_roce_v2_qp_context *context; + struct hns_roce_cmd_mailbox *mailbox; + int ret; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, qpn, 0, + HNS_ROCE_CMD_QUERY_QPC, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (ret) { + dev_err(hr_dev->dev, "query qpc cmd process error\n"); + goto err_mailbox; + } + + context = mailbox->buf; + memcpy(buffer, context, sizeof(*context)); + +err_mailbox: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return ret; +} + +int hns_roce_v2_query_mpt_info(struct hns_roce_dev *hr_dev, u32 key, + int *buffer) +{ + struct hns_roce_v2_mpt_entry *context; + struct hns_roce_cmd_mailbox *mailbox; + int ret; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + context = mailbox->buf; + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, key, 0, + HNS_ROCE_CMD_QUERY_MPT, + HNS_ROCE_CMD_TIMEOUT_MSECS); + if (ret) { + dev_err(hr_dev->dev, "query mpt cmd process error\n"); + goto err_mailbox; + } + + memcpy(buffer, context, sizeof(*context)); + +err_mailbox: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return ret; +} diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c index 0a31d0a..63cc51d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_restrack.c +++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c @@ -116,11 +116,404 @@ static int hns_roce_fill_res_cq_entry(struct sk_buff *msg, return -EMSGSIZE; } +static int hns_roce_qp_fill_rp(struct sk_buff *msg, + struct hns_roce_v2_qp_context *context) +{ + if (rdma_nl_put_driver_u32(msg, "rq_pi", + roce_get_field(context->byte_84_rq_ci_pi, + V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, + V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rq_ci", + roce_get_field(context->byte_84_rq_ci_pi, + V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M, + V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rq_shift", + roce_get_field( + context->byte_20_smac_sgid_idx, + V2_QPC_BYTE_20_RQ_SHIFT_M, + V2_QPC_BYTE_20_RQ_SHIFT_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rq_cqeidx", + roce_get_field( + context->byte_256_sqflush_rqcqe, + V2_QPC_BYTE_256_RQ_CQE_IDX_M, + V2_QPC_BYTE_256_RQ_CQE_IDX_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rq_rx_err", + roce_get_bit(context->byte_56_dqpn_err, + V2_QPC_BYTE_56_RQ_RX_ERR_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rq_tx_err", + roce_get_bit(context->byte_56_dqpn_err, + V2_QPC_BYTE_56_RQ_TX_ERR_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rq_rty_tx_err", + roce_get_bit(context->byte_76_srqn_op_en, + V2_QPC_BYTE_76_RQ_RTY_TX_ERR_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rq_db_doing", + roce_get_bit(context->byte_60_qpst_tempid, + V2_QPC_BYTE_60_RQ_DB_DOING_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rx_cqn", + roce_get_field(context->byte_80_rnr_rx_cqn, + V2_QPC_BYTE_80_RX_CQN_M, + V2_QPC_BYTE_80_RX_CQN_S))) + goto err; + + return 0; + +err: + return -EMSGSIZE; +} + +static int hns_roce_qp_fill_sp(struct sk_buff *msg, + struct hns_roce_v2_qp_context *context) +{ + if (rdma_nl_put_driver_u32(msg, "sq_pi", + roce_get_field(context->byte_160_sq_ci_pi, + V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, + V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "sq_ci", + roce_get_field(context->byte_160_sq_ci_pi, + V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M, + V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "sq_shift", + roce_get_field( + context->byte_20_smac_sgid_idx, + V2_QPC_BYTE_20_SQ_SHIFT_M, + V2_QPC_BYTE_20_SQ_SHIFT_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "sq_maxidx", + roce_get_field(context->byte_200_sq_max, + V2_QPC_BYTE_200_SQ_MAX_IDX_M, + V2_QPC_BYTE_200_SQ_MAX_IDX_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "sq_rx_err", + roce_get_bit(context->byte_56_dqpn_err, + V2_QPC_BYTE_56_SQ_RX_ERR_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "sq_tx_err", + roce_get_bit(context->byte_56_dqpn_err, + V2_QPC_BYTE_56_SQ_TX_ERR_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "sq_db_doing", + roce_get_bit(context->byte_60_qpst_tempid, + V2_QPC_BYTE_60_SQ_DB_DOING_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "sq_tx_err", + roce_get_bit(context->byte_56_dqpn_err, + V2_QPC_BYTE_56_SQ_TX_ERR_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "tx_cqn", + roce_get_field(context->byte_252_err_txcqn, + V2_QPC_BYTE_252_TX_CQN_M, + V2_QPC_BYTE_252_TX_CQN_S))) + goto err; + + return 0; + +err: + return -EMSGSIZE; +} +static int hns_roce_fill_qp(struct sk_buff *msg, + struct hns_roce_v2_qp_context *context) +{ + if (rdma_nl_put_driver_u32(msg, "smac_idx", + roce_get_field( + context->byte_20_smac_sgid_idx, + V2_QPC_BYTE_20_SMAC_IDX_M, + V2_QPC_BYTE_20_SMAC_IDX_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "vid", + roce_get_field(context->byte_24_mtu_tc, + V2_QPC_BYTE_24_VLAN_ID_M, + V2_QPC_BYTE_24_VLAN_ID_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "mtu", + roce_get_field(context->byte_24_mtu_tc, + V2_QPC_BYTE_24_MTU_M, + V2_QPC_BYTE_24_MTU_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "sgid_idx", + roce_get_field( + context->byte_20_smac_sgid_idx, + V2_QPC_BYTE_20_SGID_IDX_M, + V2_QPC_BYTE_20_SGID_IDX_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "srqn", + roce_get_field(context->byte_76_srqn_op_en, + V2_QPC_BYTE_76_SRQN_M, + V2_QPC_BYTE_76_SRQN_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "srq_en", + roce_get_bit(context->byte_76_srqn_op_en, + V2_QPC_BYTE_76_SRQ_EN_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "chk_flg", + roce_get_field(context->byte_212_lsn, + V2_QPC_BYTE_212_CHECK_FLG_M, + V2_QPC_BYTE_212_CHECK_FLG_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "retry_cnt", + roce_get_field(context->byte_212_lsn, + V2_QPC_BYTE_212_RETRY_CNT_M, + V2_QPC_BYTE_212_RETRY_CNT_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "err_type", + roce_get_field(context->byte_252_err_txcqn, + V2_QPC_BYTE_252_ERR_TYPE_M, + V2_QPC_BYTE_252_ERR_TYPE_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "flush_idx", + roce_get_field( + context->byte_256_sqflush_rqcqe, + V2_QPC_BYTE_256_SQ_FLUSH_IDX_M, + V2_QPC_BYTE_256_SQ_FLUSH_IDX_S))) + goto err; + + if (hns_roce_qp_fill_rp(msg, context)) + goto err; + + if (hns_roce_qp_fill_sp(msg, context)) + goto err; + + return 0; + +err: + return -EMSGSIZE; +} + +static int hns_roce_fill_res_qp_entry(struct sk_buff *msg, + struct rdma_restrack_entry *res) +{ + struct ib_qp *ib_qp = container_of(res, struct ib_qp, res); + struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device); + struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp); + struct hns_roce_v2_qp_context *context; + struct nlattr *table_attr; + int ret; + + if (!hr_dev->dfx->query_qpc_info) + return -EINVAL; + + context = kzalloc(sizeof(struct hns_roce_v2_qp_context), GFP_KERNEL); + if (!context) + return -ENOMEM; + + ret = hr_dev->dfx->query_qpc_info(hr_dev, hr_qp->qpn, (int *)context); + if (ret) + goto err; + + table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); + if (!table_attr) + goto err; + + if (hns_roce_fill_qp(msg, context)) + goto err_cancel_table; + + nla_nest_end(msg, table_attr); + kfree(context); + + return 0; + +err_cancel_table: + nla_nest_cancel(msg, table_attr); +err: + kfree(context); + return -EMSGSIZE; +} + + +static int hns_roce_fill_mr(struct sk_buff *msg, + struct hns_roce_v2_mpt_entry *context) +{ + u64 val_h32; + + if (rdma_nl_put_driver_u32(msg, "status", + roce_get_field(context->byte_4_pd_hop_st, + V2_MPT_BYTE_4_MPT_ST_M, + V2_MPT_BYTE_4_MPT_ST_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "lkey", context->lkey)) + goto err; + + if (rdma_nl_put_driver_u32(msg, "size", context->pbl_size)) + goto err; + + if (rdma_nl_put_driver_u32(msg, "ra", + roce_get_bit(context->byte_8_mw_cnt_en, + V2_MPT_BYTE_8_RA_EN_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "ri", + roce_get_bit(context->byte_8_mw_cnt_en, + V2_MPT_BYTE_8_R_INV_EN_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "li", + roce_get_bit(context->byte_8_mw_cnt_en, + V2_MPT_BYTE_8_L_INV_EN_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "atomic_en", + roce_get_bit(context->byte_8_mw_cnt_en, + V2_MPT_BYTE_8_ATOMIC_EN_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rr_en", + roce_get_bit(context->byte_8_mw_cnt_en, + V2_MPT_BYTE_8_RR_EN_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "rw_en", + roce_get_bit(context->byte_8_mw_cnt_en, + V2_MPT_BYTE_8_RW_EN_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "lw_en", + roce_get_bit(context->byte_8_mw_cnt_en, + V2_MPT_BYTE_8_LW_EN_S))) + goto err; + + if (rdma_nl_put_driver_u32(msg, "pbl_buf_pgsz", + roce_get_field(context->byte_64_buf_pa1, + V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, + V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S))) + goto err; + + val_h32 = context->len_h; + if (rdma_nl_put_driver_u64(msg, "len", + val_h32 << 32 | context->len_l)) + goto err; + + return 0; + +err: + return -EMSGSIZE; +} + +static int hns_roce_fill_res_mr_entry(struct sk_buff *msg, + struct rdma_restrack_entry *res) +{ + struct ib_mr *ib_mr = container_of(res, struct ib_mr, res); + struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device); + struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr); + struct hns_roce_v2_mpt_entry *context; + struct nlattr *table_attr; + int key = hr_mr->key; + int ret; + + if (!hr_dev->dfx->query_mpt_info) + return -EINVAL; + + context = kzalloc(sizeof(struct hns_roce_v2_mpt_entry), GFP_KERNEL); + if (!context) + return -ENOMEM; + + ret = hr_dev->dfx->query_mpt_info(hr_dev, key, (int *)context); + if (ret) + goto err; + + table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); + if (!table_attr) + goto err; + + if (hns_roce_fill_mr(msg, context)) + goto err_cancel_table; + + nla_nest_end(msg, table_attr); + kfree(context); + + return 0; + +err_cancel_table: + nla_nest_cancel(msg, table_attr); +err: + kfree(context); + return -EMSGSIZE; +} + +static int hns_roce_fill_pd(struct sk_buff *msg, + struct hns_roce_pd *hr_pd) +{ + if (rdma_nl_put_driver_u32(msg, "pdn", hr_pd->pdn)) + goto err; + + return 0; + +err: + return -EMSGSIZE; +} + +static int hns_roce_fill_res_pd_entry(struct sk_buff *msg, + struct rdma_restrack_entry *res) +{ + struct ib_pd *ib_pd = container_of(res, struct ib_pd, res); + struct hns_roce_pd *hr_pd = to_hr_pd(ib_pd); + struct nlattr *table_attr; + + table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); + if (!table_attr) + goto err; + + if (hns_roce_fill_pd(msg, hr_pd)) + goto err_cancel_table; + + nla_nest_end(msg, table_attr); + + return 0; + +err_cancel_table: + nla_nest_cancel(msg, table_attr); +err: + return -EMSGSIZE; +} + int hns_roce_fill_res_entry(struct sk_buff *msg, struct rdma_restrack_entry *res) { + if (res->type == RDMA_RESTRACK_PD) + return hns_roce_fill_res_pd_entry(msg, res); + if (res->type == RDMA_RESTRACK_CQ) return hns_roce_fill_res_cq_entry(msg, res); + if (res->type == RDMA_RESTRACK_QP) + return hns_roce_fill_res_qp_entry(msg, res); + + if (res->type == RDMA_RESTRACK_MR) + return hns_roce_fill_res_mr_entry(msg, res); + return 0; }