Message ID | 1536223798-21227-4-git-send-email-liuyixian@huawei.com (mailing list archive) |
---|---|
State | Changes Requested |
Delegated to: | Jason Gunthorpe |
Headers | show |
Series | Add mw support for hip08 in user space | expand |
On Thu, Sep 06, 2018 at 04:49:58PM +0800, Yixian Liu wrote: > @@ -251,6 +252,14 @@ struct hns_roce_rc_sq_wqe { > > #define RC_SQ_WQE_BYTE_4_INLINE_S 12 > > +#define RC_SQ_WQE_BYTE_4_MW_TYPE_S 14 > + > +#define RC_SQ_WQE_BYTE_4_ATOMIC_S 20 > + > +#define RC_SQ_WQE_BYTE_4_RDMA_READ_S 21 > + > +#define RC_SQ_WQE_BYTE_4_RDMA_WRITE_S 22 > + No extra horizontal space please > #define RC_SQ_WQE_BYTE_16_XRC_SRQN_S 0 > #define RC_SQ_WQE_BYTE_16_XRC_SRQN_M \ > (((1UL << 24) - 1) << RC_SQ_WQE_BYTE_16_XRC_SRQN_S) > @@ -275,4 +284,7 @@ struct hns_roce_v2_wqe_raddr_seg { > __le64 raddr; > }; > > +int hns_roce_u_v2_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr, > + struct ibv_send_wr **bad_wr); > + > #endif /* _HNS_ROCE_U_HW_V2_H */ > diff --git a/providers/hns/hns_roce_u_verbs.c b/providers/hns/hns_roce_u_verbs.c > index 5a5e767..943eea5 100644 > +++ b/providers/hns/hns_roce_u_verbs.c > @@ -175,6 +175,32 @@ int hns_roce_u_dereg_mr(struct verbs_mr *vmr) > return ret; > } > > +int hns_roce_u_bind_mw(struct ibv_qp *qp, struct ibv_mw *mw, > + struct ibv_mw_bind *mw_bind) > +{ > + struct ibv_send_wr *bad_wr = NULL; > + struct ibv_send_wr wr = { }; No space in {} > + int ret; > + > + wr.opcode = IBV_WR_BIND_MW; > + wr.next = NULL; > + > + wr.wr_id = mw_bind->wr_id; > + wr.send_flags = mw_bind->send_flags; > + > + wr.bind_mw.mw = mw; > + wr.bind_mw.rkey = ibv_inc_rkey(mw->rkey); > + wr.bind_mw.bind_info = mw_bind->bind_info; > + > + ret = hns_roce_u_v2_post_send(qp, &wr, &bad_wr); > + if (ret) > + return ret; > + > + mw->rkey = wr.bind_mw.rkey; > + > + return 0; > +} This is exactly the same as mlx4_bind_mw.. We shouldn't have duplicates.. It is also missing all the correctness checking that is in mlx5_bind_mw Jason
On 2018/9/7 2:43, Jason Gunthorpe wrote: > On Thu, Sep 06, 2018 at 04:49:58PM +0800, Yixian Liu wrote: >> @@ -251,6 +252,14 @@ struct hns_roce_rc_sq_wqe { >> >> #define RC_SQ_WQE_BYTE_4_INLINE_S 12 >> >> +#define RC_SQ_WQE_BYTE_4_MW_TYPE_S 14 >> + >> +#define RC_SQ_WQE_BYTE_4_ATOMIC_S 20 >> + >> +#define RC_SQ_WQE_BYTE_4_RDMA_READ_S 21 >> + >> +#define RC_SQ_WQE_BYTE_4_RDMA_WRITE_S 22 >> + > > No extra horizontal space please Ok, will fix it next version. > >> #define RC_SQ_WQE_BYTE_16_XRC_SRQN_S 0 >> #define RC_SQ_WQE_BYTE_16_XRC_SRQN_M \ >> (((1UL << 24) - 1) << RC_SQ_WQE_BYTE_16_XRC_SRQN_S) >> @@ -275,4 +284,7 @@ struct hns_roce_v2_wqe_raddr_seg { >> __le64 raddr; >> }; >> >> +int hns_roce_u_v2_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr, >> + struct ibv_send_wr **bad_wr); >> + >> #endif /* _HNS_ROCE_U_HW_V2_H */ >> diff --git a/providers/hns/hns_roce_u_verbs.c b/providers/hns/hns_roce_u_verbs.c >> index 5a5e767..943eea5 100644 >> +++ b/providers/hns/hns_roce_u_verbs.c >> @@ -175,6 +175,32 @@ int hns_roce_u_dereg_mr(struct verbs_mr *vmr) >> return ret; >> } >> >> +int hns_roce_u_bind_mw(struct ibv_qp *qp, struct ibv_mw *mw, >> + struct ibv_mw_bind *mw_bind) >> +{ >> + struct ibv_send_wr *bad_wr = NULL; >> + struct ibv_send_wr wr = { }; > > No space in {} Ok > >> + int ret; >> + >> + wr.opcode = IBV_WR_BIND_MW; >> + wr.next = NULL; >> + >> + wr.wr_id = mw_bind->wr_id; >> + wr.send_flags = mw_bind->send_flags; >> + >> + wr.bind_mw.mw = mw; >> + wr.bind_mw.rkey = ibv_inc_rkey(mw->rkey); >> + wr.bind_mw.bind_info = mw_bind->bind_info; >> + >> + ret = hns_roce_u_v2_post_send(qp, &wr, &bad_wr); >> + if (ret) >> + return ret; >> + >> + mw->rkey = wr.bind_mw.rkey; >> + >> + return 0; >> +} > > This is exactly the same as mlx4_bind_mw.. We shouldn't have > duplicates.. > > It is also missing all the correctness checking that is in > mlx5_bind_mw > > Jason > Yes, as bind mw is a simple interface it looks very similar for different vendors. I will add some checking in next version according to IB protocol.
diff --git a/providers/hns/hns_roce_u.c b/providers/hns/hns_roce_u.c index 7a59644..2a69a02 100644 --- a/providers/hns/hns_roce_u.c +++ b/providers/hns/hns_roce_u.c @@ -77,6 +77,7 @@ static const struct verbs_context_ops hns_common_ops = { .rereg_mr = hns_roce_u_rereg_mr, .alloc_mw = hns_roce_u_alloc_mw, .dealloc_mw = hns_roce_u_dealloc_mw, + .bind_mw = hns_roce_u_bind_mw, }; static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, diff --git a/providers/hns/hns_roce_u.h b/providers/hns/hns_roce_u.h index ac75533..93c917d 100644 --- a/providers/hns/hns_roce_u.h +++ b/providers/hns/hns_roce_u.h @@ -277,6 +277,8 @@ int hns_roce_u_dereg_mr(struct verbs_mr *mr); struct ibv_mw *hns_roce_u_alloc_mw(struct ibv_pd *pd, enum ibv_mw_type type); int hns_roce_u_dealloc_mw(struct ibv_mw *mw); +int hns_roce_u_bind_mw(struct ibv_qp *qp, struct ibv_mw *mw, + struct ibv_mw_bind *mw_bind); struct ibv_cq *hns_roce_u_create_cq(struct ibv_context *context, int cqe, struct ibv_comp_channel *channel, diff --git a/providers/hns/hns_roce_u_hw_v2.c b/providers/hns/hns_roce_u_hw_v2.c index bc2ef47..1a0ac5d 100644 --- a/providers/hns/hns_roce_u_hw_v2.c +++ b/providers/hns/hns_roce_u_hw_v2.c @@ -536,8 +536,8 @@ static int hns_roce_u_v2_arm_cq(struct ibv_cq *ibvcq, int solicited) return 0; } -static int hns_roce_u_v2_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr, - struct ibv_send_wr **bad_wr) +int hns_roce_u_v2_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr, + struct ibv_send_wr **bad_wr) { unsigned int sq_shift; unsigned int ind_sge; @@ -688,6 +688,40 @@ static int hns_roce_u_v2_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr, RC_SQ_WQE_BYTE_4_OPCODE_S, HNS_ROCE_WQE_OP_ATOMIC_FETCH_AND_ADD); break; + case IBV_WR_BIND_MW: + roce_set_field(rc_sq_wqe->byte_4, + RC_SQ_WQE_BYTE_4_OPCODE_M, + RC_SQ_WQE_BYTE_4_OPCODE_S, + HNS_ROCE_WQE_OP_BIND_MW_TYPE); + roce_set_bit(rc_sq_wqe->byte_4, + RC_SQ_WQE_BYTE_4_MW_TYPE_S, + wr->bind_mw.mw->type - 1); + roce_set_bit(rc_sq_wqe->byte_4, + RC_SQ_WQE_BYTE_4_ATOMIC_S, + (wr->bind_mw.bind_info.mw_access_flags & + IBV_ACCESS_REMOTE_ATOMIC) ? 1 : 0); + roce_set_bit(rc_sq_wqe->byte_4, + RC_SQ_WQE_BYTE_4_RDMA_READ_S, + (wr->bind_mw.bind_info.mw_access_flags & + IBV_ACCESS_REMOTE_READ) ? 1 : 0); + roce_set_bit(rc_sq_wqe->byte_4, + RC_SQ_WQE_BYTE_4_RDMA_WRITE_S, + (wr->bind_mw.bind_info.mw_access_flags & + IBV_ACCESS_REMOTE_WRITE) ? 1 : 0); + + rc_sq_wqe->new_rkey = htole32(wr->bind_mw.rkey); + rc_sq_wqe->byte_16 = + htole32(wr->bind_mw.bind_info.length & + 0xffffffff); + rc_sq_wqe->byte_20 = + htole32(wr->bind_mw.bind_info.length >> + 32); + rc_sq_wqe->rkey = + htole32(wr->bind_mw.bind_info.mr->rkey); + + rc_sq_wqe->va = + htole64(wr->bind_mw.bind_info.addr); + break; default: roce_set_field(rc_sq_wqe->byte_4, RC_SQ_WQE_BYTE_4_OPCODE_M, diff --git a/providers/hns/hns_roce_u_hw_v2.h b/providers/hns/hns_roce_u_hw_v2.h index 0cb264f..130b41f 100644 --- a/providers/hns/hns_roce_u_hw_v2.h +++ b/providers/hns/hns_roce_u_hw_v2.h @@ -228,6 +228,7 @@ struct hns_roce_rc_sq_wqe { union { __le32 inv_key; __le32 immtdata; + __le32 new_rkey; }; __le32 byte_16; __le32 byte_20; @@ -251,6 +252,14 @@ struct hns_roce_rc_sq_wqe { #define RC_SQ_WQE_BYTE_4_INLINE_S 12 +#define RC_SQ_WQE_BYTE_4_MW_TYPE_S 14 + +#define RC_SQ_WQE_BYTE_4_ATOMIC_S 20 + +#define RC_SQ_WQE_BYTE_4_RDMA_READ_S 21 + +#define RC_SQ_WQE_BYTE_4_RDMA_WRITE_S 22 + #define RC_SQ_WQE_BYTE_16_XRC_SRQN_S 0 #define RC_SQ_WQE_BYTE_16_XRC_SRQN_M \ (((1UL << 24) - 1) << RC_SQ_WQE_BYTE_16_XRC_SRQN_S) @@ -275,4 +284,7 @@ struct hns_roce_v2_wqe_raddr_seg { __le64 raddr; }; +int hns_roce_u_v2_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr, + struct ibv_send_wr **bad_wr); + #endif /* _HNS_ROCE_U_HW_V2_H */ diff --git a/providers/hns/hns_roce_u_verbs.c b/providers/hns/hns_roce_u_verbs.c index 5a5e767..943eea5 100644 --- a/providers/hns/hns_roce_u_verbs.c +++ b/providers/hns/hns_roce_u_verbs.c @@ -175,6 +175,32 @@ int hns_roce_u_dereg_mr(struct verbs_mr *vmr) return ret; } +int hns_roce_u_bind_mw(struct ibv_qp *qp, struct ibv_mw *mw, + struct ibv_mw_bind *mw_bind) +{ + struct ibv_send_wr *bad_wr = NULL; + struct ibv_send_wr wr = { }; + int ret; + + wr.opcode = IBV_WR_BIND_MW; + wr.next = NULL; + + wr.wr_id = mw_bind->wr_id; + wr.send_flags = mw_bind->send_flags; + + wr.bind_mw.mw = mw; + wr.bind_mw.rkey = ibv_inc_rkey(mw->rkey); + wr.bind_mw.bind_info = mw_bind->bind_info; + + ret = hns_roce_u_v2_post_send(qp, &wr, &bad_wr); + if (ret) + return ret; + + mw->rkey = wr.bind_mw.rkey; + + return 0; +} + struct ibv_mw *hns_roce_u_alloc_mw(struct ibv_pd *pd, enum ibv_mw_type type) { struct ibv_mw *mw;
This patch adds memory window bind support in the user space driver. Signed-off-by: Yixian Liu <liuyixian@huawei.com> --- providers/hns/hns_roce_u.c | 1 + providers/hns/hns_roce_u.h | 2 ++ providers/hns/hns_roce_u_hw_v2.c | 38 ++++++++++++++++++++++++++++++++++++-- providers/hns/hns_roce_u_hw_v2.h | 12 ++++++++++++ providers/hns/hns_roce_u_verbs.c | 26 ++++++++++++++++++++++++++ 5 files changed, 77 insertions(+), 2 deletions(-)