old mode 100755
new mode 100644
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <rdma/ib_user_verbs.h>
+#include <infiniband/verbs.h>
#ifndef RDMA_ATOMIC_UAPI
#define RDMA_ATOMIC_UAPI(_type, _name) _type _name
#endif
@@ -28,4 +29,31 @@ struct rvt_cq_wc {
struct ib_uverbs_wc uqueue[0];
};
+/*
+ * Receive work request queue entry.
+ * The size of the sg_list is determined when the QP (or SRQ) is created
+ * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
+ */
+struct rvt_rwqe {
+ __u64 wr_id;
+ __u8 num_sge;
+ __u8 padding[7];
+ struct ibv_sge sg_list[0];
+};
+
+/*
+ * This structure is used to contain the head pointer, tail pointer,
+ * and receive work queue entries as a single memory allocation so
+ * it can be mmap'ed into user space.
+ * Note that the wq array elements are variable size so you can't
+ * just index into the array to get the N'th element;
+ * use get_rwqe_ptr() instead.
+ */
+struct rvt_rwq {
+ /* new work requests posted to the head */
+ RDMA_ATOMIC_UAPI(__u32, head);
+ /* receives pull requests from here. */
+ RDMA_ATOMIC_UAPI(__u32, tail);
+ struct rvt_rwqe wq[0];
+};
#endif /* RVT_ABI_USER_H */
@@ -83,34 +83,8 @@ struct hfi1_cq {
struct rvt_cq_wc *queue;
pthread_spinlock_t lock;
};
-/*
- * Receive work request queue entry.
- * The size of the sg_list is determined when the QP is created and stored
- * in qp->r_max_sge.
- */
-struct hfi1_rwqe {
- uint64_t wr_id;
- uint8_t num_sge;
- uint8_t padding[7];
- struct ibv_sge sg_list[0];
-};
-
-/*
- * This struture is used to contain the head pointer, tail pointer,
- * and receive work queue entries as a single memory allocation so
- * it can be mmap'ed into user space.
- * Note that the wq array elements are variable size so you can't
- * just index into the array to get the N'th element;
- * use get_rwqe_ptr() instead.
- */
-struct hfi1_rwq {
- _Atomic(uint32_t) head; /* new requests posted to the head. */
- _Atomic(uint32_t) tail; /* receives pull requests from here. */
- struct hfi1_rwqe wq[0];
-};
-
struct hfi1_rq {
- struct hfi1_rwq *rwq;
+ struct rvt_rwq *rwq;
pthread_spinlock_t lock;
uint32_t size;
uint32_t max_sge;
@@ -158,12 +132,12 @@ static inline struct hfi1_srq *to_isrq(struct ibv_srq *ibsrq)
* Since struct hfi1_rwqe is not a fixed size, we can't simply index into
* struct hfi1_rq.wq. This function does the array index computation.
*/
-static inline struct hfi1_rwqe *get_rwqe_ptr(struct hfi1_rq *rq,
+static inline struct rvt_rwqe *get_rwqe_ptr(struct hfi1_rq *rq,
unsigned n)
{
- return (struct hfi1_rwqe *)
+ return (struct rvt_rwqe *)
((char *) rq->rwq->wq +
- (sizeof(struct hfi1_rwqe) +
+ (sizeof(struct rvt_rwqe) +
rq->max_sge * sizeof(struct ibv_sge)) * n);
}
old mode 100644
new mode 100755
@@ -342,8 +342,8 @@ struct ibv_qp *hfi1_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *attr)
} else {
qp->rq.size = attr->cap.max_recv_wr + 1;
qp->rq.max_sge = attr->cap.max_recv_sge;
- size = sizeof(struct hfi1_rwq) +
- (sizeof(struct hfi1_rwqe) +
+ size = sizeof(struct rvt_rwq) +
+ (sizeof(struct rvt_rwqe) +
(sizeof(struct ibv_sge) * qp->rq.max_sge)) *
qp->rq.size;
qp->rq.rwq = mmap(NULL, size,
@@ -412,8 +412,8 @@ int hfi1_destroy_qp(struct ibv_qp *ibqp)
if (qp->rq.rwq) {
size_t size;
- size = sizeof(struct hfi1_rwq) +
- (sizeof(struct hfi1_rwqe) +
+ size = sizeof(struct rvt_rwq) +
+ (sizeof(struct rvt_rwqe) +
(sizeof(struct ibv_sge) * qp->rq.max_sge)) *
qp->rq.size;
(void) munmap(qp->rq.rwq, size);
@@ -470,8 +470,8 @@ static int post_recv(struct hfi1_rq *rq, struct ibv_recv_wr *wr,
struct ibv_recv_wr **bad_wr)
{
struct ibv_recv_wr *i;
- struct hfi1_rwq *rwq;
- struct hfi1_rwqe *wqe;
+ struct rvt_rwq *rwq;
+ struct rvt_rwqe *wqe;
uint32_t head;
int n, ret;
@@ -541,8 +541,8 @@ struct ibv_srq *hfi1_create_srq(struct ibv_pd *pd,
srq->rq.size = attr->attr.max_wr + 1;
srq->rq.max_sge = attr->attr.max_sge;
- size = sizeof(struct hfi1_rwq) +
- (sizeof(struct hfi1_rwqe) +
+ size = sizeof(struct rvt_rwq) +
+ (sizeof(struct rvt_rwqe) +
(sizeof(struct ibv_sge) * srq->rq.max_sge)) * srq->rq.size;
srq->rq.rwq = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
pd->context->cmd_fd, resp.offset);
@@ -591,8 +591,8 @@ int hfi1_modify_srq(struct ibv_srq *ibsrq,
if (attr_mask & IBV_SRQ_MAX_WR) {
pthread_spin_lock(&srq->rq.lock);
/* Save the old size so we can unmmap the queue. */
- size = sizeof(struct hfi1_rwq) +
- (sizeof(struct hfi1_rwqe) +
+ size = sizeof(struct rvt_rwq) +
+ (sizeof(struct rvt_rwqe) +
(sizeof(struct ibv_sge) * srq->rq.max_sge)) *
srq->rq.size;
}
@@ -607,8 +607,8 @@ int hfi1_modify_srq(struct ibv_srq *ibsrq,
if (attr_mask & IBV_SRQ_MAX_WR) {
(void) munmap(srq->rq.rwq, size);
srq->rq.size = attr->max_wr + 1;
- size = sizeof(struct hfi1_rwq) +
- (sizeof(struct hfi1_rwqe) +
+ size = sizeof(struct rvt_rwq) +
+ (sizeof(struct rvt_rwqe) +
(sizeof(struct ibv_sge) * srq->rq.max_sge)) *
srq->rq.size;
srq->rq.rwq = mmap(NULL, size,
@@ -649,8 +649,8 @@ int hfi1_destroy_srq(struct ibv_srq *ibsrq)
if (ret)
return ret;
- size = sizeof(struct hfi1_rwq) +
- (sizeof(struct hfi1_rwqe) +
+ size = sizeof(struct rvt_rwq) +
+ (sizeof(struct rvt_rwqe) +
(sizeof(struct ibv_sge) * srq->rq.max_sge)) * srq->rq.size;
(void) munmap(srq->rq.rwq, size);
free(srq);