diff mbox series

[v2,rdma-core,3/3] libhns: Add bind mw support for hip08

Message ID 1538996263-18072-4-git-send-email-liuyixian@huawei.com (mailing list archive)
State Superseded
Headers show
Series libhns: Add mw support for hip08 in user space | expand

Commit Message

Yixian Liu Oct. 8, 2018, 10:57 a.m. UTC
This patch adds memory window bind support in the user space
driver.

Signed-off-by: Yixian Liu <liuyixian@huawei.com>
---
 providers/hns/hns_roce_u.c       |  1 +
 providers/hns/hns_roce_u.h       |  2 ++
 providers/hns/hns_roce_u_hw_v2.c | 34 ++++++++++++++++++++++++++++++++--
 providers/hns/hns_roce_u_hw_v2.h | 12 ++++++++++++
 providers/hns/hns_roce_u_verbs.c | 36 ++++++++++++++++++++++++++++++++++++
 5 files changed, 83 insertions(+), 2 deletions(-)

Comments

Jason Gunthorpe Oct. 10, 2018, 8:36 p.m. UTC | #1
On Mon, Oct 08, 2018 at 06:57:43PM +0800, Yixian Liu wrote:
> +int hns_roce_u_bind_mw(struct ibv_qp *qp, struct ibv_mw *mw,
> +		       struct ibv_mw_bind *mw_bind)
> +{
> +	struct ibv_mw_bind_info *bind_info = &mw_bind->bind_info;
> +	struct ibv_send_wr *bad_wr = NULL;
> +	struct ibv_send_wr wr = {};
> +	int ret;
> +
> +	if ((mw->pd != qp->pd) || (mw->pd != bind_info->mr->pd))
> +		return EINVAL;
> +
> +	if (mw->type == IBV_MW_TYPE_2)
> +		return EINVAL;

This should be written as mw->type != IBV_MW_TYPE1

Same reason as why access flags should be a list of supported, not a
list of unsupported

Jason
Yixian Liu Oct. 14, 2018, 1:31 a.m. UTC | #2
On 2018/10/11 4:36, Jason Gunthorpe wrote:
> On Mon, Oct 08, 2018 at 06:57:43PM +0800, Yixian Liu wrote:
>> +int hns_roce_u_bind_mw(struct ibv_qp *qp, struct ibv_mw *mw,
>> +		       struct ibv_mw_bind *mw_bind)
>> +{
>> +	struct ibv_mw_bind_info *bind_info = &mw_bind->bind_info;
>> +	struct ibv_send_wr *bad_wr = NULL;
>> +	struct ibv_send_wr wr = {};
>> +	int ret;
>> +
>> +	if ((mw->pd != qp->pd) || (mw->pd != bind_info->mr->pd))
>> +		return EINVAL;
>> +
>> +	if (mw->type == IBV_MW_TYPE_2)
>> +		return EINVAL;
> 
> This should be written as mw->type != IBV_MW_TYPE1
> 
> Same reason as why access flags should be a list of supported, not a
> list of unsupported
> 
> Jason

It's a good suggestion! It will be much clear to code readers to know
what the code will do. Will fix it next version.

Thanks
Eason
diff mbox series

Patch

diff --git a/providers/hns/hns_roce_u.c b/providers/hns/hns_roce_u.c
index 2d12365..3597e9a 100644
--- a/providers/hns/hns_roce_u.c
+++ b/providers/hns/hns_roce_u.c
@@ -64,6 +64,7 @@  static const struct verbs_match_ent hca_table[] = {
 static const struct verbs_context_ops hns_common_ops = {
 	.alloc_mw = hns_roce_u_alloc_mw,
 	.alloc_pd = hns_roce_u_alloc_pd,
+	.bind_mw = hns_roce_u_bind_mw,
 	.cq_event = hns_roce_u_cq_event,
 	.create_cq = hns_roce_u_create_cq,
 	.create_qp = hns_roce_u_create_qp,
diff --git a/providers/hns/hns_roce_u.h b/providers/hns/hns_roce_u.h
index ac75533..93c917d 100644
--- a/providers/hns/hns_roce_u.h
+++ b/providers/hns/hns_roce_u.h
@@ -277,6 +277,8 @@  int hns_roce_u_dereg_mr(struct verbs_mr *mr);
 
 struct ibv_mw *hns_roce_u_alloc_mw(struct ibv_pd *pd, enum ibv_mw_type type);
 int hns_roce_u_dealloc_mw(struct ibv_mw *mw);
+int hns_roce_u_bind_mw(struct ibv_qp *qp, struct ibv_mw *mw,
+		       struct ibv_mw_bind *mw_bind);
 
 struct ibv_cq *hns_roce_u_create_cq(struct ibv_context *context, int cqe,
 				    struct ibv_comp_channel *channel,
diff --git a/providers/hns/hns_roce_u_hw_v2.c b/providers/hns/hns_roce_u_hw_v2.c
index de8a96c..58049f0 100644
--- a/providers/hns/hns_roce_u_hw_v2.c
+++ b/providers/hns/hns_roce_u_hw_v2.c
@@ -57,6 +57,29 @@  static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg,
 	}
 }
 
+static void set_mw_seg(struct hns_roce_rc_sq_wqe *rc_sq_wqe,
+		       struct ibv_send_wr *wr)
+{
+	roce_set_bit(rc_sq_wqe->byte_4, RC_SQ_WQE_BYTE_4_MW_TYPE_S,
+		     wr->bind_mw.mw->type - 1);
+	roce_set_bit(rc_sq_wqe->byte_4,	RC_SQ_WQE_BYTE_4_ATOMIC_S,
+		     wr->bind_mw.bind_info.mw_access_flags &
+		     IBV_ACCESS_REMOTE_ATOMIC ? 1 : 0);
+	roce_set_bit(rc_sq_wqe->byte_4,	RC_SQ_WQE_BYTE_4_RDMA_READ_S,
+		     wr->bind_mw.bind_info.mw_access_flags &
+		     IBV_ACCESS_REMOTE_READ ? 1 : 0);
+	roce_set_bit(rc_sq_wqe->byte_4,	RC_SQ_WQE_BYTE_4_RDMA_WRITE_S,
+		     wr->bind_mw.bind_info.mw_access_flags &
+		     IBV_ACCESS_REMOTE_WRITE ? 1 : 0);
+
+	rc_sq_wqe->new_rkey = htole32(wr->bind_mw.rkey);
+	rc_sq_wqe->byte_16 = htole32(wr->bind_mw.bind_info.length & 0xffffffff);
+	rc_sq_wqe->byte_20 = htole32(wr->bind_mw.bind_info.length >> 32);
+	rc_sq_wqe->rkey = htole32(wr->bind_mw.bind_info.mr->rkey);
+
+	rc_sq_wqe->va = htole64(wr->bind_mw.bind_info.addr);
+}
+
 static void hns_roce_v2_handle_error_cqe(struct hns_roce_v2_cqe *cqe,
 					 struct ibv_wc *wc)
 {
@@ -548,8 +571,8 @@  static int hns_roce_u_v2_arm_cq(struct ibv_cq *ibvcq, int solicited)
 	return 0;
 }
 
-static int hns_roce_u_v2_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr,
-				   struct ibv_send_wr **bad_wr)
+int hns_roce_u_v2_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr,
+			    struct ibv_send_wr **bad_wr)
 {
 	unsigned int sq_shift;
 	unsigned int ind_sge;
@@ -710,6 +733,13 @@  static int hns_roce_u_v2_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr,
 				wqe += sizeof(struct hns_roce_v2_wqe_data_seg);
 				set_atomic_seg(wqe, wr);
 				break;
+			case IBV_WR_BIND_MW:
+				roce_set_field(rc_sq_wqe->byte_4,
+					RC_SQ_WQE_BYTE_4_OPCODE_M,
+					RC_SQ_WQE_BYTE_4_OPCODE_S,
+					HNS_ROCE_WQE_OP_BIND_MW_TYPE);
+				set_mw_seg(rc_sq_wqe, wr);
+				break;
 			default:
 				roce_set_field(rc_sq_wqe->byte_4,
 					       RC_SQ_WQE_BYTE_4_OPCODE_M,
diff --git a/providers/hns/hns_roce_u_hw_v2.h b/providers/hns/hns_roce_u_hw_v2.h
index 99c7b99..ff63bb2 100644
--- a/providers/hns/hns_roce_u_hw_v2.h
+++ b/providers/hns/hns_roce_u_hw_v2.h
@@ -228,6 +228,7 @@  struct hns_roce_rc_sq_wqe {
 	union {
 		__le32	inv_key;
 		__le32	immtdata;
+		__le32	new_rkey;
 	};
 	__le32	byte_16;
 	__le32	byte_20;
@@ -251,6 +252,14 @@  struct hns_roce_rc_sq_wqe {
 
 #define RC_SQ_WQE_BYTE_4_INLINE_S 12
 
+#define RC_SQ_WQE_BYTE_4_MW_TYPE_S 14
+
+#define RC_SQ_WQE_BYTE_4_ATOMIC_S 20
+
+#define RC_SQ_WQE_BYTE_4_RDMA_READ_S 21
+
+#define RC_SQ_WQE_BYTE_4_RDMA_WRITE_S 22
+
 #define RC_SQ_WQE_BYTE_16_XRC_SRQN_S 0
 #define RC_SQ_WQE_BYTE_16_XRC_SRQN_M \
 	(((1UL << 24) - 1) << RC_SQ_WQE_BYTE_16_XRC_SRQN_S)
@@ -280,4 +289,7 @@  struct hns_roce_wqe_atomic_seg {
 	__le64		cmp_data;
 };
 
+int hns_roce_u_v2_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr,
+			    struct ibv_send_wr **bad_wr);
+
 #endif /* _HNS_ROCE_U_HW_V2_H */
diff --git a/providers/hns/hns_roce_u_verbs.c b/providers/hns/hns_roce_u_verbs.c
index 53c8104..0d4b346 100644
--- a/providers/hns/hns_roce_u_verbs.c
+++ b/providers/hns/hns_roce_u_verbs.c
@@ -175,6 +175,42 @@  int hns_roce_u_dereg_mr(struct verbs_mr *vmr)
 	return ret;
 }
 
+int hns_roce_u_bind_mw(struct ibv_qp *qp, struct ibv_mw *mw,
+		       struct ibv_mw_bind *mw_bind)
+{
+	struct ibv_mw_bind_info *bind_info = &mw_bind->bind_info;
+	struct ibv_send_wr *bad_wr = NULL;
+	struct ibv_send_wr wr = {};
+	int ret;
+
+	if ((mw->pd != qp->pd) || (mw->pd != bind_info->mr->pd))
+		return EINVAL;
+
+	if (mw->type == IBV_MW_TYPE_2)
+		return EINVAL;
+
+	if (!bind_info->mr && bind_info->length)
+		return EINVAL;
+
+	wr.opcode = IBV_WR_BIND_MW;
+	wr.next = NULL;
+
+	wr.wr_id = mw_bind->wr_id;
+	wr.send_flags = mw_bind->send_flags;
+
+	wr.bind_mw.mw = mw;
+	wr.bind_mw.rkey = ibv_inc_rkey(mw->rkey);
+	wr.bind_mw.bind_info = mw_bind->bind_info;
+
+	ret = hns_roce_u_v2_post_send(qp, &wr, &bad_wr);
+	if (ret)
+		return ret;
+
+	mw->rkey = wr.bind_mw.rkey;
+
+	return 0;
+}
+
 struct ibv_mw *hns_roce_u_alloc_mw(struct ibv_pd *pd, enum ibv_mw_type type)
 {
 	struct ibv_mw *mw;