From patchwork Sat Sep 22 08:21:05 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 10611323 X-Patchwork-Delegate: jgg@ziepe.ca Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 171D2913 for ; Sat, 22 Sep 2018 07:43:42 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 05DF62B2DC for ; Sat, 22 Sep 2018 07:43:42 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id EE9742B2F1; Sat, 22 Sep 2018 07:43:41 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-7.9 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 8BDA52B611 for ; Sat, 22 Sep 2018 07:43:41 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726039AbeIVNgP (ORCPT ); Sat, 22 Sep 2018 09:36:15 -0400 Received: from szxga06-in.huawei.com ([45.249.212.32]:59055 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1725911AbeIVNgP (ORCPT ); Sat, 22 Sep 2018 09:36:15 -0400 Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.59]) by Forcepoint Email with ESMTP id 5207D93FACA8F; Sat, 22 Sep 2018 15:43:35 +0800 (CST) Received: from linux-ioko.site (10.71.200.31) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.399.0; Sat, 22 Sep 2018 15:43:36 +0800 From: Lijun Ou To: , CC: , Subject: [RESEND PATCH V2 for-next 1/4] RDMA/hns: Refactor the codes for setting transport opode Date: Sat, 22 Sep 2018 16:21:05 +0800 Message-ID: <1537604468-52162-2-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1537604468-52162-1-git-send-email-oulijun@huawei.com> References: <1537604468-52162-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.71.200.31] X-CFilter-Loop: Reflected Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Currently the transport opcodes which come from users configuration is set by programed similar codes. This patch mainly simplifies it. Signed-off-by: Lijun Ou --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 66 ++++++++---------------------- 1 file changed, 18 insertions(+), 48 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 7ccf377..903ed66 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -191,6 +191,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, int attr_mask; u32 tmp_len; int ret = 0; + u32 hr_op; u8 *smac; int nreq; int i; @@ -408,91 +409,60 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, switch (wr->opcode) { case IB_WR_RDMA_READ: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_RDMA_READ); + hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ; rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey); rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr); break; case IB_WR_RDMA_WRITE: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_RDMA_WRITE); + hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE; rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey); rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr); break; case IB_WR_RDMA_WRITE_WITH_IMM: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM); + hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM; rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey); rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr); break; case IB_WR_SEND: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_SEND); + hr_op = HNS_ROCE_V2_WQE_OP_SEND; break; case IB_WR_SEND_WITH_INV: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_SEND_WITH_INV); + hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_INV; break; case IB_WR_SEND_WITH_IMM: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM); + hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM; break; case IB_WR_LOCAL_INV: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_LOCAL_INV); + hr_op = HNS_ROCE_V2_WQE_OP_LOCAL_INV; break; case IB_WR_ATOMIC_CMP_AND_SWP: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP); + hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP; break; case IB_WR_ATOMIC_FETCH_AND_ADD: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD); + hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD; break; case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP); + hr_op = + HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP; break; case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD); + hr_op = + HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD; break; default: - roce_set_field(rc_sq_wqe->byte_4, - V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, - HNS_ROCE_V2_WQE_OP_MASK); + hr_op = HNS_ROCE_V2_WQE_OP_MASK; break; } + roce_set_field(rc_sq_wqe->byte_4, + V2_RC_SEND_WQE_BYTE_4_OPCODE_M, + V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op); wqe += sizeof(struct hns_roce_v2_rc_send_wqe); ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, wqe, From patchwork Sat Sep 22 08:21:06 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 10611319 X-Patchwork-Delegate: jgg@ziepe.ca Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 93B55112B for ; Sat, 22 Sep 2018 07:43:41 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 781EE2B2DC for ; Sat, 22 Sep 2018 07:43:41 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 6A3852B67D; Sat, 22 Sep 2018 07:43:41 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-7.9 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 9E3202B2DC for ; Sat, 22 Sep 2018 07:43:40 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726170AbeIVNgO (ORCPT ); Sat, 22 Sep 2018 09:36:14 -0400 Received: from szxga04-in.huawei.com ([45.249.212.190]:13103 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726022AbeIVNgO (ORCPT ); Sat, 22 Sep 2018 09:36:14 -0400 Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.58]) by Forcepoint Email with ESMTP id B3B377B55F1A9; Sat, 22 Sep 2018 15:43:35 +0800 (CST) Received: from linux-ioko.site (10.71.200.31) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.399.0; Sat, 22 Sep 2018 15:43:36 +0800 From: Lijun Ou To: , CC: , Subject: [RESEND PATCH V2 for-next 2/4] RDMA/hns: Add atomic support Date: Sat, 22 Sep 2018 16:21:06 +0800 Message-ID: <1537604468-52162-3-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1537604468-52162-1-git-send-email-oulijun@huawei.com> References: <1537604468-52162-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.71.200.31] X-CFilter-Loop: Reflected Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP This patch adds atomic operations for hip08, includes fetchadd and cmpswap operation. In order to enable atomic, the driver needs to do the following step: 1. Enable the atomic caps for RoCE device 2. Post the wqe context of atomic type 3. Configure the atomic type of mtpt Signed-off-by: Lijun Ou --- drivers/infiniband/hw/hns/hns_roce_device.h | 1 + drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 39 +++++++++++++++++++++++++++-- drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 5 ++++ drivers/infiniband/hw/hns/hns_roce_main.c | 3 ++- 4 files changed, 45 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index cfb88a4..cdd0bb0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -193,6 +193,7 @@ enum { HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2), HNS_ROCE_CAP_FLAG_RECORD_DB = BIT(3), HNS_ROCE_CAP_FLAG_SQ_RECORD_DB = BIT(4), + HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10), }; enum hns_roce_mtt_type { diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 903ed66..97ff5da 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -54,6 +54,18 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg, dseg->len = cpu_to_le32(sg->length); } +static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg, + const struct ib_atomic_wr *wr) +{ + if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { + aseg->fetchadd_swap_data = cpu_to_le64(wr->swap); + aseg->cmp_data = cpu_to_le64(wr->compare_add); + } else { + aseg->fetchadd_swap_data = cpu_to_le64(wr->compare_add); + aseg->cmp_data = 0; + } +} + static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr, unsigned int *sge_ind) { @@ -179,6 +191,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct hns_roce_v2_ud_send_wqe *ud_sq_wqe; struct hns_roce_v2_rc_send_wqe *rc_sq_wqe; struct hns_roce_qp *qp = to_hr_qp(ibqp); + struct hns_roce_v2_wqe_data_seg *dseg; struct device *dev = hr_dev->dev; struct hns_roce_v2_db sq_db; struct ib_qp_attr attr; @@ -407,6 +420,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit); + wqe += sizeof(struct hns_roce_v2_rc_send_wqe); switch (wr->opcode) { case IB_WR_RDMA_READ: hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ; @@ -443,9 +457,21 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, break; case IB_WR_ATOMIC_CMP_AND_SWP: hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP; + rc_sq_wqe->rkey = + cpu_to_le32(atomic_wr(wr)->rkey); + rc_sq_wqe->va = + cpu_to_le32(atomic_wr(wr)->remote_addr); + wqe += sizeof(struct hns_roce_v2_wqe_data_seg); + set_atomic_seg(wqe, atomic_wr(wr)); break; case IB_WR_ATOMIC_FETCH_AND_ADD: hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD; + rc_sq_wqe->rkey = + cpu_to_le32(atomic_wr(wr)->rkey); + rc_sq_wqe->va = + cpu_to_le32(atomic_wr(wr)->remote_addr); + wqe += sizeof(struct hns_roce_v2_wqe_data_seg); + set_atomic_seg(wqe, atomic_wr(wr)); break; case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: hr_op = @@ -463,7 +489,12 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M, V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op); - wqe += sizeof(struct hns_roce_v2_rc_send_wqe); + if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || + wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) + dseg = + wqe - sizeof(struct hns_roce_v2_wqe_data_seg); + else + dseg = wqe; ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, wqe, &sge_ind, bad_wr); @@ -1232,6 +1263,9 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) caps->local_ca_ack_delay = 0; caps->max_mtu = IB_MTU_4096; + if (hr_dev->pci_dev->revision == 0x21) + caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC; + ret = hns_roce_v2_set_bt(hr_dev); if (ret) dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n", @@ -1663,7 +1697,8 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S, (mr->access & IB_ACCESS_MW_BIND ? 1 : 0)); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S, 0); + roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S, + mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S, (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0)); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 14aa308..6f92722 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -1564,4 +1564,9 @@ struct hns_roce_eq_context { #define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0 #define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0) +struct hns_roce_wqe_atomic_seg { + __le64 fetchadd_swap_data; + __le64 cmp_data; +}; + #endif diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index c5cae9a..1306d79 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -215,7 +215,8 @@ static int hns_roce_query_device(struct ib_device *ib_dev, props->max_pd = hr_dev->caps.num_pds; props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma; props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma; - props->atomic_cap = IB_ATOMIC_NONE; + props->atomic_cap = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_ATOMIC ? + IB_ATOMIC_HCA : IB_ATOMIC_NONE; props->max_pkeys = 1; props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay; From patchwork Sat Sep 22 08:21:07 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 10611325 X-Patchwork-Delegate: jgg@ziepe.ca Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 32848112B for ; Sat, 22 Sep 2018 07:43:45 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 214072B2DC for ; Sat, 22 Sep 2018 07:43:45 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 15C092B611; Sat, 22 Sep 2018 07:43:45 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-7.9 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id B3B502B2DC for ; Sat, 22 Sep 2018 07:43:44 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726419AbeIVNgS (ORCPT ); Sat, 22 Sep 2018 09:36:18 -0400 Received: from szxga05-in.huawei.com ([45.249.212.191]:12677 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1725911AbeIVNgS (ORCPT ); Sat, 22 Sep 2018 09:36:18 -0400 Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.58]) by Forcepoint Email with ESMTP id EB8422AE86F5B; Sat, 22 Sep 2018 15:43:39 +0800 (CST) Received: from linux-ioko.site (10.71.200.31) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.399.0; Sat, 22 Sep 2018 15:43:36 +0800 From: Lijun Ou To: , CC: , Subject: [RESEND PATCH V2 for-next 3/4] RDMA/hns: Add CM of vlan device support Date: Sat, 22 Sep 2018 16:21:07 +0800 Message-ID: <1537604468-52162-4-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1537604468-52162-1-git-send-email-oulijun@huawei.com> References: <1537604468-52162-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.71.200.31] X-CFilter-Loop: Reflected Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP This patch mainly set the vlan_id field in the wc for rdma_listen() to work over vlan. This is required by ib_init_ah_attr_from_wc() which is called by the CM REQ handler. Signed-off-by: Lijun Ou --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 9 ++++++++- drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 5 +++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 97ff5da..a781f6b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -2292,7 +2292,14 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, wc->smac[5] = roce_get_field(cqe->byte_28, V2_CQE_BYTE_28_SMAC_5_M, V2_CQE_BYTE_28_SMAC_5_S); - wc->vlan_id = 0xffff; + if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) { + wc->vlan_id = (u16)roce_get_field(cqe->byte_28, + V2_CQE_BYTE_28_VID_M, + V2_CQE_BYTE_28_VID_S); + } else { + wc->vlan_id = 0xffff; + } + wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); wc->network_hdr_type = roce_get_field(cqe->byte_28, V2_CQE_BYTE_28_PORT_TYPE_M, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 6f92722..d04be1c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -818,6 +818,11 @@ struct hns_roce_v2_cqe { #define V2_CQE_BYTE_28_PORT_TYPE_S 16 #define V2_CQE_BYTE_28_PORT_TYPE_M GENMASK(17, 16) +#define V2_CQE_BYTE_28_VID_S 18 +#define V2_CQE_BYTE_28_VID_M GENMASK(29, 18) + +#define V2_CQE_BYTE_28_VID_VLD_S 30 + #define V2_CQE_BYTE_32_RMT_QPN_S 0 #define V2_CQE_BYTE_32_RMT_QPN_M GENMASK(23, 0) From patchwork Sat Sep 22 08:21:08 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 10611327 X-Patchwork-Delegate: jgg@ziepe.ca Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 1B183112B for ; Sat, 22 Sep 2018 07:43:46 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 0A09B2B2DC for ; Sat, 22 Sep 2018 07:43:46 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id F2B9C2B611; Sat, 22 Sep 2018 07:43:45 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-7.9 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 934B62B2DC for ; Sat, 22 Sep 2018 07:43:45 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1725911AbeIVNgT (ORCPT ); Sat, 22 Sep 2018 09:36:19 -0400 Received: from szxga05-in.huawei.com ([45.249.212.191]:12676 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726205AbeIVNgT (ORCPT ); Sat, 22 Sep 2018 09:36:19 -0400 Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.58]) by Forcepoint Email with ESMTP id E647DBFC1E6C6; Sat, 22 Sep 2018 15:43:39 +0800 (CST) Received: from linux-ioko.site (10.71.200.31) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.399.0; Sat, 22 Sep 2018 15:43:36 +0800 From: Lijun Ou To: , CC: , Subject: [RESEND PATCH V2 for-next 4/4] RDMA/hns: Add enable judgement for UD vlan Date: Sat, 22 Sep 2018 16:21:08 +0800 Message-ID: <1537604468-52162-5-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1537604468-52162-1-git-send-email-oulijun@huawei.com> References: <1537604468-52162-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.71.200.31] X-CFilter-Loop: Reflected Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP According to the hardware modification, the vlan of the UD packet is based on the ud_vlan_en field of ud wqe to determine whether to add a vlan header to the ud packet. The ud_vlan_en field is filled by the driver according to judge the net device. Signed-off-by: Lijun Ou --- drivers/infiniband/hw/hns/hns_roce_ah.c | 6 +++++- drivers/infiniband/hw/hns/hns_roce_device.h | 1 + drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 3 +++ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 2 ++ 4 files changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index 0d96c5b..9990dc9 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c @@ -49,6 +49,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, struct hns_roce_ah *ah; u16 vlan_tag = 0xffff; const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); + bool vlan_en = false; ah = kzalloc(sizeof(*ah), GFP_ATOMIC); if (!ah) @@ -58,8 +59,10 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN); gid_attr = ah_attr->grh.sgid_attr; - if (is_vlan_dev(gid_attr->ndev)) + if (is_vlan_dev(gid_attr->ndev)) { vlan_tag = vlan_dev_vlan_id(gid_attr->ndev); + vlan_en = true; + } if (vlan_tag < 0x1000) vlan_tag |= (rdma_ah_get_sl(ah_attr) & @@ -71,6 +74,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, HNS_ROCE_PORT_NUM_SHIFT)); ah->av.gid_index = grh->sgid_index; ah->av.vlan = cpu_to_le16(vlan_tag); + ah->av.vlan_en = vlan_en; dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index, ah->av.vlan); diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index cdd0bb0..c0a65b8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -458,6 +458,7 @@ struct hns_roce_av { u8 dgid[HNS_ROCE_GID_SIZE]; u8 mac[6]; __le16 vlan; + bool vlan_en; }; struct hns_roce_ah { diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index a781f6b..3b52d56 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -370,6 +370,9 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, V2_UD_SEND_WQE_BYTE_40_PORTN_S, qp->port); + roce_set_bit(ud_sq_wqe->byte_40, + V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S, + ah->av.vlan_en ? 1 : 0); roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M, V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index d04be1c..7ee6ed2 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -993,6 +993,8 @@ struct hns_roce_v2_ud_send_wqe { #define V2_UD_SEND_WQE_BYTE_40_PORTN_S 24 #define V2_UD_SEND_WQE_BYTE_40_PORTN_M GENMASK(26, 24) +#define V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S 30 + #define V2_UD_SEND_WQE_BYTE_40_LBI_S 31 #define V2_UD_SEND_WQE_DMAC_0_S 0